Merge tag 'drm-next-2020-04-01' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "This is the main drm pull request for 5.7-rc1.

  Highlights:

   - i915 enables Tigerlake by default

   - i915 and amdgpu have initial OLED backlight support

     [ Jani Nikula pipes up and points out that we've had a bunch of
       "initial support" code for a long time already, but only now
       Lyude made it actually work on real world machines ]

   - vmwgfx add support to enable OpenGL 4 userspace

   - zero length arrays are mostly removed.

  Detailed summary:

  new driver:
   - tidss: TI Keystone platform display subsystem

  core:
   - new drm device warn macros
   - mode config valid for memory constrained devices
   - bridge bus format negotation
   - consolidated fake vblank event handling
   - dma_alloc related cleanups
   - drop get_crtc callback
   - dp: DP1.4 EDID corruption test
   - EDID CEA detailed timings improvements
   - relicense some code to dual GPL2/MIT
   - convert core vblank support to per-crtc support
   - rework drm_global_mutex
   - bridge rework to allow omap_dss custom driver removeal
   - remove drm_fb_helper connector interrfaces
   - zero-length array removal

  scheduler:
   - support for modifying the sched list
   - revert job distribution optimization
   - helper to pick least loaded scheduler
   - race condition fix

  mst:
   - various fixes
   - remove register_connector callback

  i915:
   - uapi to allows userspace specific CS ring buffer sizes
   - Tigerlake enablement patches + Tigerlake enabled by default
   - new sysfs entries for engine properties
   - display/logging refactors
   - eDP/DP fixes for DPCD
   - Gen7 back to aliasing-ppgtt
   - Gen8+ irq refactor
   - Avoid globals
   - GEM locking fixes and simplifications
   - Ice Lake and Elkhart Lake fixes and workarounds
   - Baytrail/Haswell instability fix
   - GVT - VFIO edid better support

  amdgpu:
   - Rework VM update handling in preparation for HMM support
   - drm load/unload removal fixups
   - USB-C PD firmware updates
   - HDCP srm support
   - Navi/renoir PM watermark fixes
   - OLED panel support
   - Optimize debugging vram access
   - Use BACO for runtime pm
   - DC clock programming optimizations and fixes
   - PSP fw loading sequence updates
   - Drop DRIVER_USE_AGP
   - Remove legacy drm load and unload callbacks
   - ACP Kconfig fix
   - Lots of fixes across the driver

  amdkfd:
   - runtime pm support
   - more gfx config details in amdgpu

  radeon:
   - drop DRIVER_USE_AGP

  vmwgfx:
   - Disable DMA when SEV encryption in use
   - Shader Model 5 support - needed for GL4 support

  msm:
   - DPU resource manager refactor
   - dpu using atomic global state

  mediatek:
   - MT8183 DPI support

  etnaviv:
   - out-of-bounds read fix
   - expose feature flags for GC400 STM32MP1 SoC
   - runtime suspend entry fix
   - dma32 zone fix

  hisilicon:
   - mode selection fixes

  meson:
   - YUV420 support

  lima:
   - add support for heap buffers

  tinydrm:
   - removal of owner field
   - explicit DT dependency removal
   - YAML schema conversion

  tegra:
   - misc cleanups

  tidss:
   - new driver

  virtio:
   - better batching of notifications to host
   - memory handling reworked
   - shmem + gpu context fixes

  hibmc:
   - add gamma_set support
   - improve DPMS support

  pl111:
   - Integrator IM-PD1 support

  sun4i:
   - LVDS support for A20 + A33
   - DSI panel handling improvements"

* tag 'drm-next-2020-04-01' of git://anongit.freedesktop.org/drm/drm: (1537 commits)
  drm/i915/display: Fix mode private_flags comparison at atomic_check
  drm/i915/gt: Stage the transfer of the virtual breadcrumb
  drm/i915/gt: Select the deepest available parking mode for rc6
  drm/i915: Avoid live-lock with i915_vma_parked()
  drm/i915/gt: Treat idling as a RPS downclock event
  drm/i915/gt: Cancel a hung context if already closed
  drm/i915: Use explicit flag to mark unreachable intel_context
  drm/amdgpu: don't try to reserve training bo for sriov (v2)
  drm/amdgpu/smu11: add support for SMU AC/DC interrupts
  drm/amdgpu/swSMU: handle manual AC/DC notifications
  drm/amdgpu/swSMU: handle DC controlled by GPIO for navi1x
  drm/amdgpu/swSMU: set AC/DC mode based on the current system state (v2)
  drm/amdgpu/swSMU: correct the bootup power source for Navi1X (v2)
  drm/amdgpu/swSMU: use the smu11 power source helper for navi1x
  drm/amdgpu/smu11: add a helper to set the power source
  drm/amd/swSMU: add callback to set AC/DC power source (v2)
  drm/scheduler: fix rare NULL ptr race
  drm/amdgpu: fix the coverage issue to clear ArcVPGRs
  drm/amd/display: Fix pageflip event race condition for DCN.
  drm/[radeon|amdgpu]: Remove HAINAN board from max_sclk override check
  ...
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 5ff9cf2..e5344c4 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -36,6 +36,12 @@
 
       - items:
         - enum:
+          - allwinner,sun7i-a20-tcon0
+          - allwinner,sun7i-a20-tcon1
+        - const: allwinner,sun7i-a20-tcon
+
+      - items:
+        - enum:
             - allwinner,sun50i-a64-tcon-lcd
         - const: allwinner,sun8i-a83t-tcon-lcd
 
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 2c88753..e8ddec5 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -1,10 +1,10 @@
-Analog Device ADV7511(W)/13/33 HDMI Encoders
+Analog Device ADV7511(W)/13/33/35 HDMI Encoders
 -----------------------------------------
 
-The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
-compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
-the others support RGB interface.
+The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
+transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space
+conversion, S/PDIF, CEC and HDCP. ADV7533/5 supports the DSI interface for input
+pixels, while the others support RGB interface.
 
 Required properties:
 
@@ -13,6 +13,7 @@
 		"adi,adv7511w"
 		"adi,adv7513"
 		"adi,adv7533"
+		"adi,adv7535"
 
 - reg: I2C slave addresses
   The ADV7511 internal registers are split into four pages exposed through
@@ -52,14 +53,14 @@
 - bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is
   needed only for ADV7511.
 
-The following properties are required for ADV7533:
+The following properties are required for ADV7533 and ADV7535:
 
 - adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
   be one of 1, 2, 3 or 4.
 - a2vdd-supply: 1.8V supply that powers up the A2VDD pin on the chip.
 - v3p3-supply: A 3.3V supply that powers up the V3P3 pin on the chip.
 - v1p2-supply: A supply that powers up the V1P2 pin on the chip. It can be
-  either 1.2V or 1.8V.
+  either 1.2V or 1.8V for ADV7533 but only 1.8V for ADV7535.
 
 Optional properties:
 
@@ -71,9 +72,9 @@
 - adi,embedded-sync: The input uses synchronization signals embedded in the
   data stream (similar to BT.656). Defaults to separate H/V synchronization
   signals.
-- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
-  generator. The chip will rely on the sync signals in the DSI data lanes,
-  rather than generate its own timings for HDMI output.
+- adi,disable-timing-generator: Only for ADV7533 and ADV7535. Disables the
+  internal timing generator. The chip will rely on the sync signals in the
+  DSI data lanes, rather than generate its own timings for HDMI output.
 - clocks: from common clock binding: reference to the CEC clock.
 - clock-names: from common clock binding: must be "cec".
 - reg-names : Names of maps with programmable addresses.
@@ -85,7 +86,7 @@
 The ADV7511 has two video ports. Their connections are modelled using the OF
 graph bindings specified in Documentation/devicetree/bindings/graph.txt.
 
-- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533/5, the
   remote endpoint phandle should be a reference to a valid mipi_dsi_host device
   node.
 - Video port 1 for the HDMI output
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
new file mode 100644
index 0000000..5dff936
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ps8640.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MIPI DSI to eDP Video Format Converter Device Tree Bindings
+
+maintainers:
+  - Nicolas Boichat <drinkcat@chromium.org>
+  - Enric Balletbo i Serra <enric.balletbo@collabora.com>
+
+description: |
+  The PS8640 is a low power MIPI-to-eDP video format converter supporting
+  mobile devices with embedded panel resolutions up to 2048 x 1536. The
+  device accepts a single channel of MIPI DSI v1.1, with up to four lanes
+  plus clock, at a transmission rate up to 1.5Gbit/sec per lane. The
+  device outputs eDP v1.4, one or two lanes, at a link rate of up to
+  3.24Gbit/sec per lane.
+
+properties:
+  compatible:
+    const: parade,ps8640
+
+  reg:
+    maxItems: 1
+    description: Base I2C address of the device.
+
+  powerdown-gpios:
+    maxItems: 1
+    description: GPIO connected to active low powerdown.
+
+  reset-gpios:
+    maxItems: 1
+    description: GPIO connected to active low reset.
+
+  vdd12-supply:
+    maxItems: 1
+    description: Regulator for 1.2V digital core power.
+
+  vdd33-supply:
+    maxItems: 1
+    description: Regulator for 3.3V digital core power.
+
+  ports:
+    type: object
+    description:
+      A node containing DSI input & output port nodes with endpoint
+      definitions as documented in
+      Documentation/devicetree/bindings/media/video-interfaces.txt
+      Documentation/devicetree/bindings/graph.txt
+    properties:
+      port@0:
+        type: object
+        description: |
+          Video port for DSI input
+
+      port@1:
+        type: object
+        description: |
+          Video port for eDP output (panel or connector).
+
+    required:
+      - port@0
+
+required:
+  - compatible
+  - reg
+  - powerdown-gpios
+  - reset-gpios
+  - vdd12-supply
+  - vdd33-supply
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        ps8640: edp-bridge@18 {
+            compatible = "parade,ps8640";
+            reg = <0x18>;
+            powerdown-gpios = <&pio 116 GPIO_ACTIVE_LOW>;
+            reset-gpios = <&pio 115 GPIO_ACTIVE_LOW>;
+            vdd12-supply = <&ps8640_fixed_1v2>;
+            vdd33-supply = <&mt6397_vgp2_reg>;
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                port@0 {
+                    reg = <0>;
+                    ps8640_in: endpoint {
+                        remote-endpoint = <&dsi0_out>;
+                    };
+                };
+
+                port@1 {
+                    reg = <1>;
+                    ps8640_out: endpoint {
+                        remote-endpoint = <&panel_in>;
+                   };
+                };
+            };
+        };
+    };
+
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
new file mode 100644
index 0000000..c036a75
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/toshiba,tc358768.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toschiba TC358768/TC358778 Parallel RGB to MIPI DSI bridge
+
+maintainers:
+  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+description: |
+  The TC358768/TC358778 is bridge device which converts RGB to DSI.
+
+properties:
+  compatible:
+    enum:
+      - toshiba,tc358768
+      - toshiba,tc358778
+
+  reg:
+    maxItems: 1
+    description: base I2C address of the device
+
+  reset-gpios:
+    maxItems: 1
+    description: GPIO connected to active low RESX pin
+
+  vddc-supply:
+    description: Regulator for 1.2V internal core power.
+
+  vddmipi-supply:
+    description: Regulator for 1.2V for the MIPI.
+
+  vddio-supply:
+    description: Regulator for 1.8V - 3.3V IO power.
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    const: refclk
+
+  ports:
+    type: object
+
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        additionalProperties: false
+
+        description: |
+          Video port for RGB input
+
+        properties:
+          reg:
+            const: 0
+
+        patternProperties:
+          endpoint:
+            type: object
+            additionalProperties: false
+
+            properties:
+              data-lines:
+                enum: [ 16, 18, 24 ]
+
+              remote-endpoint: true
+
+        required:
+          - reg
+
+      port@1:
+        type: object
+        additionalProperties: false
+
+        description: |
+          Video port for DSI output (panel or connector).
+
+        properties:
+          reg:
+            const: 1
+
+        patternProperties:
+          endpoint:
+            type: object
+            additionalProperties: false
+
+            properties:
+              remote-endpoint: true
+
+        required:
+          - reg
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+      - port@0
+      - port@1
+
+required:
+  - compatible
+  - reg
+  - vddc-supply
+  - vddmipi-supply
+  - vddio-supply
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    i2c1 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
+      dsi_bridge: dsi-bridge@e {
+        compatible = "toshiba,tc358768";
+        reg = <0xe>;
+
+        clocks = <&tc358768_refclk>;
+        clock-names = "refclk";
+
+        reset-gpios = <&pcf_display_board 0 GPIO_ACTIVE_LOW>;
+
+        vddc-supply = <&v1_2d>;
+        vddmipi-supply = <&v1_2d>;
+        vddio-supply = <&v3_3d>;
+
+        dsi_bridge_ports: ports {
+          #address-cells = <1>;
+          #size-cells = <0>;
+
+          port@0 {
+            reg = <0>;
+            rgb_in: endpoint {
+              remote-endpoint = <&dpi_out>;
+              data-lines = <24>;
+            };
+          };
+
+          port@1 {
+            reg = <1>;
+            dsi_out: endpoint {
+              remote-endpoint = <&lcd_in>;
+            };
+          };
+        };
+      };
+    };
+    
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml b/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
new file mode 100644
index 0000000..66e93e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ilitek,ili9486.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9486 display panels device tree bindings
+
+maintainers:
+  - Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+
+description:
+  This binding is for display panels using an Ilitek ILI9486 controller in SPI
+  mode.
+
+allOf:
+  - $ref: panel/panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - enum:
+          # Waveshare 3.5" 320x480 Color TFT LCD
+        - waveshare,rpi-lcd-35
+          # Ozzmaker 3.5" 320x480 Color TFT LCD
+        - ozzmaker,piscreen
+      - const: ilitek,ili9486
+
+  spi-max-frequency:
+    maximum: 32000000
+
+  dc-gpios:
+    maxItems: 1
+    description: Display data/command selection (D/CX)
+
+  backlight: true
+  reg: true
+  reset-gpios: true
+  rotation: true
+
+required:
+  - compatible
+  - reg
+  - dc-gpios
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    backlight: backlight {
+            compatible = "gpio-backlight";
+            gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+    };
+    spi {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+
+            display@0{
+                    compatible = "waveshare,rpi-lcd-35", "ilitek,ili9486";
+                    reg = <0>;
+                    spi-max-frequency = <32000000>;
+                    dc-gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>;
+                    reset-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
+                    rotation = <180>;
+                    backlight = <&backlight>;
+            };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
index b6a7e73..58914cf 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
@@ -7,6 +7,7 @@
 
 Required properties:
 - compatible: "mediatek,<chip>-dpi"
+  the supported chips are mt2701 , mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt
deleted file mode 100644
index bf9c7a2..0000000
--- a/Documentation/devicetree/bindings/display/msm/gmu.txt
+++ /dev/null
@@ -1,116 +0,0 @@
-Qualcomm adreno/snapdragon GMU (Graphics management unit)
-
-The GMU is a programmable power controller for the GPU. the CPU controls the
-GMU which in turn handles power controls for the GPU.
-
-Required properties:
-- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu"
-    for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu"
-  Note that you need to list the less specific "qcom,adreno-gmu"
-  for generic matches and the more specific identifier to identify
-  the specific device.
-- reg: Physical base address and length of the GMU registers.
-- reg-names: Matching names for the register regions
-  * "gmu"
-  * "gmu_pdc"
-  * "gmu_pdc_seg"
-- interrupts: The interrupt signals from the GMU.
-- interrupt-names: Matching names for the interrupts
-  * "hfi"
-  * "gmu"
-- clocks: phandles to the device clocks
-- clock-names: Matching names for the clocks
-   * "gmu"
-   * "cxo"
-   * "axi"
-   * "mnoc"
-- power-domains: should be:
-	<&clock_gpucc GPU_CX_GDSC>
-	<&clock_gpucc GPU_GX_GDSC>
-- power-domain-names: Matching names for the power domains
-- iommus: phandle to the adreno iommu
-- operating-points-v2: phandle to the OPP operating points
-
-Optional properties:
-- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon
-        SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
-
-Example:
-
-/ {
-	...
-
-	gmu: gmu@506a000 {
-		compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
-
-		reg = <0x506a000 0x30000>,
-			<0xb280000 0x10000>,
-			<0xb480000 0x10000>;
-		reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
-
-		interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
-		     <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-names = "hfi", "gmu";
-
-		clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
-			<&gpucc GPU_CC_CXO_CLK>,
-			<&gcc GCC_DDRSS_GPU_AXI_CLK>,
-			<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
-		clock-names = "gmu", "cxo", "axi", "memnoc";
-
-		power-domains = <&gpucc GPU_CX_GDSC>,
-				<&gpucc GPU_GX_GDSC>;
-		power-domain-names = "cx", "gx";
-
-		iommus = <&adreno_smmu 5>;
-
-		operating-points-v2 = <&gmu_opp_table>;
-	};
-};
-
-a3xx example with OCMEM support:
-
-/ {
-	...
-
-	gpu: adreno@fdb00000 {
-		compatible = "qcom,adreno-330.2",
-		             "qcom,adreno";
-		reg = <0xfdb00000 0x10000>;
-		reg-names = "kgsl_3d0_reg_memory";
-		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-names = "kgsl_3d0_irq";
-		clock-names = "core",
-		              "iface",
-		              "mem_iface";
-		clocks = <&mmcc OXILI_GFX3D_CLK>,
-		         <&mmcc OXILICX_AHB_CLK>,
-		         <&mmcc OXILICX_AXI_CLK>;
-		sram = <&gmu_sram>;
-		power-domains = <&mmcc OXILICX_GDSC>;
-		operating-points-v2 = <&gpu_opp_table>;
-		iommus = <&gpu_iommu 0>;
-	};
-
-	ocmem@fdd00000 {
-		compatible = "qcom,msm8974-ocmem";
-
-		reg = <0xfdd00000 0x2000>,
-		      <0xfec00000 0x180000>;
-		reg-names = "ctrl",
-		             "mem";
-
-		clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
-		         <&mmcc OCMEMCX_OCMEMNOC_CLK>;
-		clock-names = "core",
-		              "iface";
-
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		gmu_sram: gmu-sram@0 {
-			reg = <0x0 0x100000>;
-			ranges = <0 0 0xfec00000 0x100000>;
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml
new file mode 100644
index 0000000..0b8736a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright 2019-2020, The Linux Foundation, All Rights Reserved
+%YAML 1.2
+---
+
+$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Devicetree bindings for the GMU attached to certain Adreno GPUs
+
+maintainers:
+  - Rob Clark <robdclark@gmail.com>
+
+description: |
+  These bindings describe the Graphics Management Unit (GMU) that is attached
+  to members of the Adreno A6xx GPU family. The GMU provides on-device power
+  management and support to improve power efficiency and reduce the load on
+  the CPU.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - qcom,adreno-gmu-630.2
+      - const: qcom,adreno-gmu
+
+  reg:
+    items:
+      - description: Core GMU registers
+      - description: GMU PDC registers
+      - description: GMU PDC sequence registers
+
+  reg-names:
+    items:
+      - const: gmu
+      - const: gmu_pdc
+      - const: gmu_pdc_seq
+
+  clocks:
+    items:
+     - description: GMU clock
+     - description: GPU CX clock
+     - description: GPU AXI clock
+     - description: GPU MEMNOC clock
+
+  clock-names:
+    items:
+      - const: gmu
+      - const: cxo
+      - const: axi
+      - const: memnoc
+
+  interrupts:
+    items:
+     - description: GMU HFI interrupt
+     - description: GMU interrupt
+
+
+  interrupt-names:
+    items:
+      - const: hfi
+      - const: gmu
+
+  power-domains:
+     items:
+       - description: CX power domain
+       - description: GX power domain
+
+  power-domain-names:
+     items:
+       - const: cx
+       - const: gx
+
+  iommus:
+    maxItems: 1
+
+  operating-points-v2: true
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - clocks
+  - clock-names
+  - interrupts
+  - interrupt-names
+  - power-domains
+  - power-domain-names
+  - iommus
+  - operating-points-v2
+
+examples:
+ - |
+   #include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+   #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+   #include <dt-bindings/interrupt-controller/irq.h>
+   #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+   gmu: gmu@506a000 {
+        compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu";
+
+        reg = <0x506a000 0x30000>,
+              <0xb280000 0x10000>,
+              <0xb480000 0x10000>;
+        reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
+
+        clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
+                 <&gpucc GPU_CC_CXO_CLK>,
+                 <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+                 <&gcc GCC_GPU_MEMNOC_GFX_CLK>;
+        clock-names = "gmu", "cxo", "axi", "memnoc";
+
+        interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "hfi", "gmu";
+
+        power-domains = <&gpucc GPU_CX_GDSC>,
+                        <&gpucc GPU_GX_GDSC>;
+        power-domain-names = "cx", "gx";
+
+        iommus = <&adreno_smmu 5>;
+        operating-points-v2 = <&gmu_opp_table>;
+   };
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 7edc298..fd779cd 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -35,25 +35,54 @@
   bring the GPU out of secure mode.
 - firmware-name: optional property of the 'zap-shader' node, listing the
   relative path of the device specific zap firmware.
+- sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and
+        a4xx Snapdragon SoCs. See
+        Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
 
-Example 3xx/4xx/a5xx:
+Example 3xx/4xx:
 
 / {
 	...
 
-	gpu: qcom,kgsl-3d0@4300000 {
-		compatible = "qcom,adreno-320.2", "qcom,adreno";
-		reg = <0x04300000 0x20000>;
+	gpu: adreno@fdb00000 {
+		compatible = "qcom,adreno-330.2",
+		             "qcom,adreno";
+		reg = <0xfdb00000 0x10000>;
 		reg-names = "kgsl_3d0_reg_memory";
-		interrupts = <GIC_SPI 80 0>;
-		clock-names =
-		    "core",
-		    "iface",
-		    "mem_iface";
-		clocks =
-		    <&mmcc GFX3D_CLK>,
-		    <&mmcc GFX3D_AHB_CLK>,
-		    <&mmcc MMSS_IMEM_AHB_CLK>;
+		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "kgsl_3d0_irq";
+		clock-names = "core",
+		              "iface",
+		              "mem_iface";
+		clocks = <&mmcc OXILI_GFX3D_CLK>,
+		         <&mmcc OXILICX_AHB_CLK>,
+		         <&mmcc OXILICX_AXI_CLK>;
+		sram = <&gpu_sram>;
+		power-domains = <&mmcc OXILICX_GDSC>;
+		operating-points-v2 = <&gpu_opp_table>;
+		iommus = <&gpu_iommu 0>;
+	};
+
+	gpu_sram: ocmem@fdd00000 {
+		compatible = "qcom,msm8974-ocmem";
+
+		reg = <0xfdd00000 0x2000>,
+		      <0xfec00000 0x180000>;
+		reg-names = "ctrl",
+		            "mem";
+
+		clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>,
+		         <&mmcc OCMEMCX_OCMEMNOC_CLK>;
+		clock-names = "core",
+		              "iface";
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		gpu_sram: gpu-sram@0 {
+			reg = <0x0 0x100000>;
+			ranges = <0 0 0xfec00000 0x100000>;
+		};
 	};
 };
 
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
new file mode 100644
index 0000000..93878c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/advantech,idk-1110wr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Advantech IDK-1110WR 10.1" WSVGA LVDS Display Panel
+
+maintainers:
+  - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+  - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+  - $ref: lvds.yaml#
+
+properties:
+  compatible:
+    items:
+      - const: advantech,idk-1110wr
+      - {} # panel-lvds, but not listed here to avoid false select
+
+  data-mapping:
+    const: jeida-24
+
+  width-mm:
+    const: 223
+
+  height-mm:
+    const: 125
+
+  panel-timing: true
+  port: true
+
+additionalProperties: false
+
+required:
+  - compatible
+
+examples:
+  - |+
+    panel {
+      compatible = "advantech,idk-1110wr", "panel-lvds";
+
+      width-mm = <223>;
+      height-mm = <125>;
+
+      data-mapping = "jeida-24";
+
+      panel-timing {
+        /* 1024x600 @60Hz */
+        clock-frequency = <51200000>;
+        hactive = <1024>;
+        vactive = <600>;
+        hsync-len = <240>;
+        hfront-porch = <40>;
+        hback-porch = <40>;
+        vsync-len = <10>;
+        vfront-porch = <15>;
+        vback-porch = <10>;
+      };
+
+      port {
+        panel_in: endpoint {
+          remote-endpoint = <&lvds_encoder>;
+        };
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
new file mode 100644
index 0000000..6b7fddc
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/advantech,idk-2121wr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Advantech IDK-2121WR 21.5" Full-HD dual-LVDS panel
+
+maintainers:
+  - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+  - Thierry Reding <thierry.reding@gmail.com>
+
+description: |
+  The IDK-2121WR from Advantech is a Full-HD dual-LVDS panel.
+  A dual-LVDS interface is a dual-link connection with even pixels traveling
+  on one link, and with odd pixels traveling on the other link.
+
+  The panel expects odd pixels on the first port, and even pixels on the
+  second port, therefore the ports must be marked accordingly (with either
+  dual-lvds-odd-pixels or dual-lvds-even-pixels).
+
+properties:
+  compatible:
+    items:
+      - const: advantech,idk-2121wr
+      - {} # panel-lvds, but not listed here to avoid false select
+
+  width-mm:
+    const: 476
+
+  height-mm:
+    const: 268
+
+  data-mapping:
+    const: vesa-24
+
+  panel-timing: true
+
+  ports:
+    type: object
+    properties:
+      port@0:
+        type: object
+        description: The sink for odd pixels.
+        properties:
+          reg:
+            const: 0
+
+          dual-lvds-odd-pixels: true
+
+        required:
+          - reg
+          - dual-lvds-odd-pixels
+
+      port@1:
+        type: object
+        description: The sink for even pixels.
+        properties:
+          reg:
+            const: 1
+
+          dual-lvds-even-pixels: true
+
+        required:
+          - reg
+          - dual-lvds-even-pixels
+
+additionalProperties: false
+
+required:
+  - compatible
+  - width-mm
+  - height-mm
+  - data-mapping
+  - panel-timing
+  - ports
+
+examples:
+  - |+
+    panel-lvds {
+      compatible = "advantech,idk-2121wr", "panel-lvds";
+
+      width-mm = <476>;
+      height-mm = <268>;
+
+      data-mapping = "vesa-24";
+
+      panel-timing {
+        clock-frequency = <148500000>;
+        hactive = <1920>;
+        vactive = <1080>;
+        hsync-len = <44>;
+        hfront-porch = <88>;
+        hback-porch = <148>;
+        vfront-porch = <4>;
+        vback-porch = <36>;
+        vsync-len = <5>;
+      };
+
+      ports {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        port@0 {
+          reg = <0>;
+          dual-lvds-odd-pixels;
+          panel_in0: endpoint {
+            remote-endpoint = <&lvds0_out>;
+          };
+        };
+
+        port@1 {
+          reg = <1>;
+          dual-lvds-even-pixels;
+          panel_in1: endpoint {
+            remote-endpoint = <&lvds1_out>;
+          };
+        };
+      };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt
deleted file mode 100644
index bae0e2b..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 8.0" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101ean01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt
deleted file mode 100644
index 72e088a..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101aw03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt b/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt
deleted file mode 100644
index 3590b07..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101ean01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt
deleted file mode 100644
index 889d511..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101xtn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt
deleted file mode 100644
index 690d0a5..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
-
-Required properties:
-- compatible: should be "auo,b116xw03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt
deleted file mode 100644
index 302226b..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
-
-Required properties:
-- compatible: should be "auo,b133htn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt
deleted file mode 100644
index 7443b7c..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b133xtn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
deleted file mode 100644
index 49e4105..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g070vvn01"
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: single regulator to provide the supply voltage
-
-Required nodes:
-- port: Parallel port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX6Q based board
-
-	lcd_panel: lcd-panel {
-		compatible = "auo,g070vvn01";
-		backlight = <&backlight_lcd>;
-		power-supply = <&reg_display>;
-
-		port {
-			lcd_panel_in: endpoint {
-				remote-endpoint = <&lcd_display_out>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt b/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
deleted file mode 100644
index bc6a0c8..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g101evn010"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt b/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
deleted file mode 100644
index 85626ed..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g104sn02"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
deleted file mode 100644
index 3afc767..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g133han01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
deleted file mode 100644
index ed657c2..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g185han01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt b/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt
deleted file mode 100644
index 59bb6cd..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,p320hvn03"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
deleted file mode 100644
index cbd9da3..0000000
--- a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,t215hvn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt b/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt
deleted file mode 100644
index b6f2f3e..0000000
--- a/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "avic,tm070ddh03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
deleted file mode 100644
index 55183d3..0000000
--- a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,hv070wsa-100"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-- enable-gpios: GPIO pin to enable and disable panel (active high)
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [1]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
-	panel: panel {
-		compatible = "boe,hv070wsa-100";
-		power-supply = <&vcc_3v3_reg>;
-		enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>;
-		port {
-			panel_ep: endpoint {
-				remote-endpoint = <&bridge_out_ep>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt b/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
deleted file mode 100644
index b258d6a..0000000
--- a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,nv101wxmn51"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
deleted file mode 100644
index 50be5e2..0000000
--- a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Boe Corporation 8.0" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,tv080wum-nl0"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
new file mode 100644
index 0000000..7402134
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-nl6.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BOE TV101WUM-NL6 DSI Display Panel
+
+maintainers:
+  - Thierry Reding <thierry.reding@gmail.com>
+  - Sam Ravnborg <sam@ravnborg.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    enum:
+        # BOE TV101WUM-NL6 10.1" WUXGA TFT LCD panel
+      - boe,tv101wum-nl6
+        # AUO KD101N80-45NA 10.1" WUXGA TFT LCD panel
+      - auo,kd101n80-45na
+        # BOE TV101WUM-N53 10.1" WUXGA TFT LCD panel
+      - boe,tv101wum-n53
+        # AUO B101UAN08.3 10.1" WUXGA TFT LCD panel
+      - auo,b101uan08.3
+
+  reg:
+    description: the virtual channel number of a DSI peripheral
+
+  enable-gpios:
+    description: a GPIO spec for the enable pin
+
+  pp1800-supply:
+    description: core voltage supply
+
+  avdd-supply:
+    description: phandle of the regulator that provides positive voltage
+
+  avee-supply:
+    description: phandle of the regulator that provides negative voltage
+
+  backlight:
+    description: phandle of the backlight device attached to the panel
+
+  port: true
+
+required:
+ - compatible
+ - reg
+ - enable-gpios
+ - pp1800-supply
+ - avdd-supply
+ - avee-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "boe,tv101wum-nl6";
+            reg = <0>;
+            enable-gpios = <&pio 45 0>;
+            avdd-supply = <&ppvarn_lcd>;
+            avee-supply = <&ppvarp_lcd>;
+            pp1800-supply = <&pp1800_lcd>;
+            backlight = <&backlight_lcd0>;
+            status = "okay";
+            port {
+                panel_in: endpoint {
+                    remote-endpoint = <&dsi_out>;
+                };
+            };
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
deleted file mode 100644
index 057f7f3..0000000
--- a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "cdtech,s043wq26h-ct7"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
deleted file mode 100644
index 505615d..0000000
--- a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "cdtech,s070wv95-ct16"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
deleted file mode 100644
index dd22685..0000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa070wp03xg"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt
deleted file mode 100644
index f24614e..0000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa101wa01a"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt
deleted file mode 100644
index 0ab2c05..0000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa101wb03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
deleted file mode 100644
index 897085e..0000000
--- a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
-
-Required properties:
-- compatible: should be "dataimage,scf0700c48ggu18"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index 78222ce..7f55ad4 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -1,123 +1 @@
-display-timing bindings
-=======================
-
-display-timings node
---------------------
-
-required properties:
- - none
-
-optional properties:
- - native-mode: The native mode for the display, in case multiple modes are
-		provided. When omitted, assume the first node is the native.
-
-timing subnode
---------------
-
-required properties:
- - hactive, vactive: display resolution
- - hfront-porch, hback-porch, hsync-len: horizontal display timing parameters
-   in pixels
-   vfront-porch, vback-porch, vsync-len: vertical display timing parameters in
-   lines
- - clock-frequency: display clock in Hz
-
-optional properties:
- - hsync-active: hsync pulse is active low/high/ignored
- - vsync-active: vsync pulse is active low/high/ignored
- - de-active: data-enable pulse is active low/high/ignored
- - pixelclk-active: with
-			- active high = drive pixel data on rising edge/
-					sample data on falling edge
-			- active low  = drive pixel data on falling edge/
-					sample data on rising edge
-			- ignored     = ignored
- - syncclk-active: with
-			- active high = drive sync on rising edge/
-					sample sync on falling edge of pixel
-					clock
-			- active low  = drive sync on falling edge/
-					sample sync on rising edge of pixel
-					clock
-			- omitted     = same configuration as pixelclk-active
- - interlaced (bool): boolean to enable interlaced mode
- - doublescan (bool): boolean to enable doublescan mode
- - doubleclk (bool): boolean to enable doubleclock mode
-
-All the optional properties that are not bool follow the following logic:
-    <1>: high active
-    <0>: low active
-    omitted: not used on hardware
-
-There are different ways of describing the capabilities of a display. The
-devicetree representation corresponds to the one commonly found in datasheets
-for displays. If a display supports multiple signal timings, the native-mode
-can be specified.
-
-The parameters are defined as:
-
-  +----------+-------------------------------------+----------+-------+
-  |          |        ^                            |          |       |
-  |          |        |vback_porch                 |          |       |
-  |          |        v                            |          |       |
-  +----------#######################################----------+-------+
-  |          #        ^                            #          |       |
-  |          #        |                            #          |       |
-  |  hback   #        |                            #  hfront  | hsync |
-  |   porch  #        |       hactive              #  porch   |  len  |
-  |<-------->#<-------+--------------------------->#<-------->|<----->|
-  |          #        |                            #          |       |
-  |          #        |vactive                     #          |       |
-  |          #        |                            #          |       |
-  |          #        v                            #          |       |
-  +----------#######################################----------+-------+
-  |          |        ^                            |          |       |
-  |          |        |vfront_porch                |          |       |
-  |          |        v                            |          |       |
-  +----------+-------------------------------------+----------+-------+
-  |          |        ^                            |          |       |
-  |          |        |vsync_len                   |          |       |
-  |          |        v                            |          |       |
-  +----------+-------------------------------------+----------+-------+
-
-Note: In addition to being used as subnode(s) of display-timings, the timing
-      subnode may also be used on its own. This is appropriate if only one mode
-      need be conveyed. In this case, the node should be named 'panel-timing'.
-
-
-Example:
-
-	display-timings {
-		native-mode = <&timing0>;
-		timing0: 1080p24 {
-			/* 1920x1080p24 */
-			clock-frequency = <52000000>;
-			hactive = <1920>;
-			vactive = <1080>;
-			hfront-porch = <25>;
-			hback-porch = <25>;
-			hsync-len = <25>;
-			vback-porch = <2>;
-			vfront-porch = <2>;
-			vsync-len = <2>;
-			hsync-active = <1>;
-		};
-	};
-
-Every required property also supports the use of ranges, so the commonly used
-datasheet description with minimum, typical and maximum values can be used.
-
-Example:
-
-	timing1: timing {
-		/* 1920x1080p24 */
-		clock-frequency = <148500000>;
-		hactive = <1920>;
-		vactive = <1080>;
-		hsync-len = <0 44 60>;
-		hfront-porch = <80 88 95>;
-		hback-porch = <100 148 160>;
-		vfront-porch = <0 4 6>;
-		vback-porch = <0 36 50>;
-		vsync-len = <0 5 6>;
-	};
+See display-timings.yaml in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timings.yaml b/Documentation/devicetree/bindings/display/panel/display-timings.yaml
new file mode 100644
index 0000000..c8c0c9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/display-timings.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/display-timings.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: display timing bindings
+
+maintainers:
+  - Thierry Reding <thierry.reding@gmail.com>
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+  - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+  A display panel may be able to handle several display timings,
+  with different resolutions.
+  The display-timings node makes it possible to specify the timing
+  and to specify the timing that is native for the display.
+
+properties:
+  $nodename:
+    const: display-timings
+
+  native-mode:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: |
+      The default display timing is the one specified as native-mode.
+      If no native-mode is specified then the first node is assumed the
+      native mode.
+
+patternProperties:
+  "^timing":
+    type: object
+    allOf:
+      - $ref: panel-timing.yaml#
+
+additionalProperties: false
+
+examples:
+  - |+
+
+    /*
+     * Example that specifies panel timing using minimum, typical,
+     * maximum values as commonly used in datasheet description.
+     * timing1 is the native-mode.
+     */
+    display-timings {
+        native-mode = <&timing1>;
+        timing0 {
+            /* 1920x1080p24 */
+            clock-frequency = <148500000>;
+            hactive = <1920>;
+            vactive = <1080>;
+            hsync-len = <0 44 60>;
+            hfront-porch = <80 88 95>;
+            hback-porch = <100 148 160>;
+            vfront-porch = <0 4 6>;
+            vback-porch = <0 36 50>;
+            vsync-len = <0 5 6>;
+        };
+        timing1 {
+            /* 1920x1080p24 */
+            clock-frequency = <52000000>;
+            hactive = <1920>;
+            vactive = <1080>;
+            hfront-porch = <25>;
+            hback-porch = <25>;
+            hsync-len = <0 25 25>;
+            vback-porch = <2>;
+            vfront-porch = <2>;
+            vsync-len = <2>;
+            hsync-active = <1>;
+            pixelclk-active = <1>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
deleted file mode 100644
index fbf5dcd..0000000
--- a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
-
-Required properties:
-- compatible: should be "dlc,dlc1010gig"
-- power-supply: See simple-panel.txt
-
-Optional properties:
-- enable-gpios: See simple-panel.txt
-- backlight: See simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
deleted file mode 100644
index b7ac1c7..0000000
--- a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Emerging Display Technology Corp. Displays
-==========================================
-
-
-Display bindings for EDT Display Technology Corp. Displays which are
-compatible with the simple-panel binding, which is specified in
-simple-panel.txt
-
-3,5" QVGA TFT Panels
---------------------
-+-----------------+---------------------+-------------------------------------+
-| Identifier      | compatbile          | description                         |
-+=================+=====================+=====================================+
-| ET035012DM6     | edt,et035012dm6     | 3.5" QVGA TFT LCD panel             |
-+-----------------+---------------------+-------------------------------------+
-
-4,3" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier      | compatbile          | description                         |
-+=================+=====================+=====================================+
-| ETM0430G0DH6    | edt,etm0430g0dh6    | 480x272 TFT Display                 |
-+-----------------+---------------------+-------------------------------------+
-
-5,7" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier      | compatbile          | description                         |
-+=================+=====================+=====================================+
-| ET057090DHU     | edt,et057090dhu     | 5.7" VGA TFT LCD panel              |
-+-----------------+---------------------+-------------------------------------+
-
-
-7,0" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier      | compatbile          | description                         |
-+=================+=====================+=====================================+
-| ETM0700G0DH6    | edt,etm070080dh6    | WVGA TFT Display with capacitive    |
-|                 | edt,etm0700g0dh6    | Touchscreen                         |
-+-----------------+---------------------+-------------------------------------+
-| ETM0700G0BDH6   | edt,etm070080bdh6   | Same as ETM0700G0DH6 but with       |
-|                 |                     | inverted pixel clock.               |
-+-----------------+---------------------+-------------------------------------+
-| ETM0700G0EDH6   | edt,etm070080edh6   | Same display as the ETM0700G0BDH6,  |
-|                 |                     | but with changed Hardware for the   |
-|                 |                     | backlight and the touch interface   |
-+-----------------+---------------------+-------------------------------------+
-| ET070080DH6     | edt,etm070080dh6    | Same timings as the ETM0700G0DH6,   |
-|                 |                     | but with resistive touch.           |
-+-----------------+---------------------+-------------------------------------+
-
diff --git a/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
new file mode 100644
index 0000000..aa761f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/elida,kd35t133.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Elida KD35T133 3.5in 320x480 DSI panel
+
+maintainers:
+  - Heiko Stuebner <heiko.stuebner@theobroma-systems.com>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: elida,kd35t133
+  reg: true
+  backlight: true
+  reset-gpios: true
+  iovcc-supply:
+     description: regulator that supplies the iovcc voltage
+  vdd-supply:
+     description: regulator that supplies the vdd voltage
+
+required:
+  - compatible
+  - reg
+  - backlight
+  - iovcc-supply
+  - vdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "elida,kd35t133";
+            reg = <0>;
+            backlight = <&backlight>;
+            iovcc-supply = <&vcc_1v8>;
+            vdd-supply = <&vcc3v3_lcd>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt b/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
deleted file mode 100644
index 82d22e1..0000000
--- a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
-
-Required properties:
-- compatible: should be "evervision,vgg804821"
-- power-supply: See simple-panel.txt
-
-Optional properties:
-- backlight: See simple-panel.txt
-- enable-gpios: See simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
new file mode 100644
index 0000000..927f1ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/feixin,k101-im2ba02.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Feixin K101 IM2BA02 10.1" MIPI-DSI LCD panel
+
+maintainers:
+  - Icenowy Zheng <icenowy@aosc.io>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: feixin,k101-im2ba02
+  reg: true
+  backlight: true
+  reset-gpios: true
+  avdd-supply:
+     description: regulator that supplies the AVDD voltage
+  dvdd-supply:
+     description: regulator that supplies the DVDD voltage
+  cvdd-supply:
+     description: regulator that supplies the CVDD voltage
+
+required:
+  - compatible
+  - reg
+  - avdd-supply
+  - dvdd-supply
+  - cvdd-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "feixin,k101-im2ba02";
+            reg = <0>;
+            avdd-supply = <&reg_dc1sw>;
+            dvdd-supply = <&reg_dc1sw>;
+            cvdd-supply = <&reg_ldo_io1>;
+            reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>;
+            backlight = <&backlight>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt b/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt
deleted file mode 100644
index b47f9d8..0000000
--- a/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Foxlink Group 5" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "foxlink,fl500wvr00-a0t"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt b/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
deleted file mode 100644
index 6c9156f..0000000
--- a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-FriendlyELEC HD702E 800x1280 LCD panel
-
-HD702E lcd is FriendlyELEC developed eDP LCD panel with 800x1280
-resolution. It has built in Goodix, GT9271 captive touchscreen
-with backlight adjustable via PWM.
-
-Required properties:
-- compatible: should be "friendlyarm,hd702e"
-- power-supply: regulator to provide the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LCD panel input.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Example:
-
-	panel {
-		compatible ="friendlyarm,hd702e", "simple-panel";
-		backlight = <&backlight>;
-		power-supply = <&vcc3v3_sys>;
-
-		port {
-			panel_in_edp: endpoint {
-				remote-endpoint = <&edp_out_panel>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt b/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt
deleted file mode 100644
index 24b0b62..0000000
--- a/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "giantplus,gpg48273qs5"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt
deleted file mode 100644
index 7da1d5c..0000000
--- a/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "hannstar,hsd070pww1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt b/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt
deleted file mode 100644
index 8270319..0000000
--- a/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
-
-Required properties:
-- compatible: should be "hannstar,hsd100pxn1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt
deleted file mode 100644
index 04caaae..0000000
--- a/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
-
-Required properties:
-- compatible: should be "hit,tx23d38vm0caa"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt b/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt
deleted file mode 100644
index 4104226..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux AT043TN24 4.3" WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,at043tn24"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt b/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
deleted file mode 100644
index 3e10cd7..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux AT070TN92 7.0" WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,at070tn92"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
deleted file mode 100644
index 7c234cf..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g070y2-l01"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
deleted file mode 100644
index 9e75904..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
-
-Required properties:
-- compatible: should be "innolux,g101ice-l01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt
deleted file mode 100644
index 2743b07..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g121i1-l01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
deleted file mode 100644
index 6497446..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g121x1-l03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt b/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt
deleted file mode 100644
index 081bb93..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,n116bge"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt b/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt
deleted file mode 100644
index 7825844..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-InnoLux 15.6" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,n156bge-l21"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt b/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt
deleted file mode 100644
index 824f87f..0000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,zj070na-01p"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
deleted file mode 100644
index be7ac66..0000000
--- a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
-
-Required properties:
-- compatible: should be "koe,tx14d24vm1bpa"
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: single regulator to provide the supply voltage
-
-Required nodes:
-- port: Parallel port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX53 based board
-
-	lcd_panel: lcd-panel {
-		compatible = "koe,tx14d24vm1bpa";
-		backlight = <&backlight_lcd>;
-		power-supply = <&reg_3v3>;
-
-		port {
-			lcd_panel_in: endpoint {
-				remote-endpoint = <&lcd_display_out>;
-			};
-		};
-	};
-
-Then one needs to extend the dispX node:
-
-	lcd_display: disp1 {
-
-		port@1 {
-			reg = <1>;
-
-			lcd_display_out: endpoint {
-				remote-endpoint = <&lcd_panel_in>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
deleted file mode 100644
index 6a036ed..0000000
--- a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "koe,tx31d200vm0baa"
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LVDS panel input.
-
-Example:
-	panel {
-		compatible = "koe,tx31d200vm0baa";
-		backlight = <&backlight_lvds>;
-
-		port {
-			panel_in: endpoint {
-				remote-endpoint = <&lvds0_out>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
deleted file mode 100644
index a8e940f..0000000
--- a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "kyo,tcg121xglp"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
index a614644..fd931b29 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -34,7 +34,7 @@
 
 examples:
   - |
-    dsi@ff450000 {
+    dsi {
         #address-cells = <1>;
         #size-cells = <0>;
         reg = <0xff450000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt b/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
deleted file mode 100644
index 74ee7ea..0000000
--- a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "lemaker,bl035-rgb-002"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt b/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt
deleted file mode 100644
index a7588e5..0000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 7" (800x480 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lb070wv8"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
deleted file mode 100644
index b9877ac..0000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp079qx1-sp0v"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
deleted file mode 100644
index 4214151..0000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 9.7" (2048x1536 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp097qx1-spa1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
deleted file mode 100644
index 8c5de69..0000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 12.0" (1920x1280 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp120up1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt b/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt
deleted file mode 100644
index 9f262e0..0000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 12.9" (2560x1700 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp129qe"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
deleted file mode 100644
index 7d8f6ee..0000000
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "mitsubishi,aa070mc01-ca1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt b/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt
deleted file mode 100644
index 71cbc49..0000000
--- a/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
-
-Required properties:
-- compatible: should be "nec,nl12880bc20-05"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
deleted file mode 100644
index 8e1914d..0000000
--- a/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "nec,nl4827hc19-05b"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt b/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
deleted file mode 100644
index c6d06b5..0000000
--- a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Netron-DY E231732 7.0" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "netron-dy,e231732"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
deleted file mode 100644
index e78292b..0000000
--- a/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Newhaven Display International 480 x 272 TFT LCD panel
-
-Required properties:
-- compatible: should be "newhaven,nhd-4.3-480272ef-atxl"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt b/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt
deleted file mode 100644
index 1a639fd..0000000
--- a/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
-
-Required properties:
-- compatible: should be "nlt,nl192108ac18-02d"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
new file mode 100644
index 0000000..73d2ff3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt35510.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT35510-based display panels
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    items:
+      - const: hydis,hva40wv1
+      - const: novatek,nt35510
+    description: This indicates the panel manufacturer of the panel
+      that is in turn using the NT35510 panel driver. The compatible
+      string determines how the NT35510 panel driver shall be configured
+      to work with the indicated panel. The novatek,nt35510 compatible shall
+      always be provided as a fallback.
+  reg: true
+  reset-gpios: true
+  vdd-supply:
+     description: regulator that supplies the vdd voltage
+  vddi-supply:
+     description: regulator that supplies the vddi voltage
+  backlight: true
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        panel@0 {
+            compatible = "hydis,hva40wv1", "novatek,nt35510";
+            reg = <0>;
+            vdd-supply = <&ab8500_ldo_aux4_reg>;
+            vddi-supply = <&ab8500_ldo_aux6_reg>;
+            reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+            backlight = <&gpio_bl>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt b/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
deleted file mode 100644
index 17bcd01..0000000
--- a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
-
-Required properties:
-- compatible: should be "nvd,9128"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt
deleted file mode 100644
index ddf8e21..0000000
--- a/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
-
-Required properties:
-- compatible: should be "okaya,rs800480t-7x0gp"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
deleted file mode 100644
index 74540a0..0000000
--- a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Olimex 4.3" TFT LCD panel
-
-Required properties:
-- compatible: should be "olimex,lcd-olinuxino-43-ts"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt b/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
deleted file mode 100644
index 3d8a5e0..0000000
--- a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-On Tat Industrial Company 7" DPI TFT panel.
-
-Required properties:
-- compatible: should be "ontat,yx700wv03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
deleted file mode 100644
index 203b03e..0000000
--- a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
-
-The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
-a MIPI-DSI video interface. Its backlight is managed through the DSI link.
-
-Required properties:
-  - compatible: "orisetech,otm8009a"
-  - reg: the virtual channel number of a DSI peripheral
-
-Optional properties:
-  - reset-gpios: a GPIO spec for the reset pin (active low).
-  - power-supply: phandle of the regulator that provides the supply voltage.
-
-Example:
-&dsi {
-	...
-	panel@0 {
-		compatible = "orisetech,otm8009a";
-		reg = <0>;
-		reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
-		power-supply = <&v1v8>;
-	};
-};
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
new file mode 100644
index 0000000..4b6dda6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/orisetech,otm8009a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
+
+maintainers:
+  - Philippe CORNU <philippe.cornu@st.com>
+
+description: |
+             The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
+             a MIPI-DSI video interface. Its backlight is managed through the DSI link.
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+
+  compatible:
+    const: orisetech,otm8009a
+
+  reg:
+    maxItems: 1
+    description: DSI virtual channel
+
+  enable-gpios: true
+  port: true
+  power-supply: true
+
+  reset-gpios:
+    maxItems: 1
+
+additionalProperties: false
+
+required:
+  - compatible
+  - reg
+
+examples:
+  - |
+    dsi {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      panel@0 {
+        compatible = "orisetech,otm8009a";
+        reg = <0>;
+        reset-gpios = <&gpiof 15 0>;
+        power-supply = <&v1v8>;
+      };
+    };
+...
+
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt
deleted file mode 100644
index c16907c..0000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com37h3m05dtc"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt
deleted file mode 100644
index 06a73c3..0000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OrtusTech COM37H3M99DTC Blanview 3.7" VGA portrait TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com37h3m99dtc"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt
deleted file mode 100644
index de19e93..0000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com43h4m85ulc"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
deleted file mode 100644
index e57883c..0000000
--- a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
-
-Required properties:
-- compatible: shall be "osddisplays,osd070t1718-19ts"
-- power-supply: see simple-panel.txt
-
-Optional properties:
-- backlight: see simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory. No other simple-panel properties than
-the ones specified herein are valid.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
deleted file mode 100644
index 85c0b2c..0000000
--- a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
-
-Required properties:
-- compatible: should be "osddisplays,osd101t2045-53ts"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt
deleted file mode 100644
index d328b03..0000000
--- a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Panasonic Corporation 10.1" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "panasonic,vvx10f004b00"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
deleted file mode 100644
index 37dedf6..0000000
--- a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Panasonic 10" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "panasonic,vvx10f034n00"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
-	mdss_dsi@fd922800 {
-		panel@0 {
-			compatible = "panasonic,vvx10f034n00";
-			reg = <0>;
-			power-supply = <&vreg_vsp>;
-			backlight = <&lp8566_wled>;
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index ef8d8cd..ed051ba 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -54,13 +54,20 @@
 
   # Display Timings
   panel-timing:
-    type: object
     description:
       Most display panels are restricted to a single resolution and
       require specific display timings. The panel-timing subnode expresses those
-      timings as specified in the timing subnode section of the display timing
-      bindings defined in
-      Documentation/devicetree/bindings/display/panel/display-timing.txt.
+      timings.
+    allOf:
+      - $ref: panel-timing.yaml#
+
+  display-timings:
+    description:
+      Some display panels supports several resolutions with different timing.
+      The display-timings bindings supports specifying several timings and
+      optional specify which is the native mode.
+    allOf:
+      - $ref: display-timings.yaml#
 
   # Connectivity
   port:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
deleted file mode 100644
index 6b203bc..0000000
--- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Generic MIPI DPI Panel
-======================
-
-Required properties:
-- compatible: "panel-dpi"
-
-Optional properties:
-- label: a symbolic name for the panel
-- enable-gpios: panel enable gpio
-- reset-gpios: GPIO to control the RESET pin
-- vcc-supply: phandle of regulator that will be used to enable power to the display
-- backlight: phandle of the backlight device
-
-Required nodes:
-- "panel-timing" containing video timings
-  (Documentation/devicetree/bindings/display/panel/display-timing.txt)
-- Video port for DPI input
-
-Example
--------
-
-lcd0: display@0 {
-        compatible = "samsung,lte430wq-f0c", "panel-dpi";
-        label = "lcd";
-
-        backlight = <&backlight>;
-
-        port {
-            lcd_in: endpoint {
-                    remote-endpoint = <&dpi_out>;
-            };
-        };
-
-        panel-timing {
-                clock-frequency = <9200000>;
-                hactive = <480>;
-                vactive = <272>;
-                hfront-porch = <8>;
-                hback-porch = <4>;
-                hsync-len = <41>;
-                vback-porch = <2>;
-                vfront-porch = <4>;
-                vsync-len = <10>;
-
-                hsync-active = <0>;
-                vsync-active = <0>;
-                de-active = <1>;
-                pixelclk-active = <1>;
-        };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml b/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml
new file mode 100644
index 0000000..f638703
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-dpi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic MIPI DPI Panel
+
+maintainers:
+  - Sam Ravnborg <sam@ravnborg.org>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    description:
+      Shall contain a panel specific compatible and "panel-dpi"
+      in that order.
+    items:
+      - {}
+      - const: panel-dpi
+
+  data-mapping:
+    enum:
+      - rgb24
+      - rgb565
+      - bgr666
+    description: |
+      Describes the media format, how the display panel is connected
+      to the display interface.
+
+  backlight: true
+  enable-gpios: true
+  height-mm: true
+  label: true
+  panel-timing: true
+  port: true
+  power-supply: true
+  reset-gpios: true
+  width-mm: true
+
+required:
+  - panel-timing
+  - power-supply
+
+additionalProperties: false
+
+examples:
+  - |
+    panel {
+        compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+        label = "osddisplay";
+        power-supply = <&vcc_supply>;
+        data-mapping = "rgb565";
+        backlight = <&backlight>;
+
+        port {
+            lcd_in: endpoint {
+                remote-endpoint = <&dpi_out>;
+            };
+        };
+        panel-timing {
+            clock-frequency = <9200000>;
+            hactive = <800>;
+            vactive = <480>;
+            hfront-porch = <8>;
+            hback-porch = <4>;
+            hsync-len = <41>;
+            vback-porch = <2>;
+            vfront-porch = <4>;
+            vsync-len = <10>;
+
+            hsync-active = <0>;
+            vsync-active = <0>;
+            de-active = <1>;
+            pixelclk-active = <1>;
+        };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
new file mode 100644
index 0000000..b2e8742
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-simple-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple DSI panels with a single power-supply
+
+maintainers:
+  - Thierry Reding <thierry.reding@gmail.com>
+  - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+  This binding file is a collection of the DSI panels that
+  requires only a single power-supply.
+  There are optionally a backlight and an enable GPIO.
+  The panel may use an OF graph binding for the association to the display,
+  or it may be a direct child node of the display.
+
+  If the panel is more advanced a dedicated binding file is required.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+
+  compatible:
+    enum:
+      # compatible must be listed in alphabetical order, ordered by compatible.
+      # The description in the comment is mandatory for each compatible.
+
+        # Panasonic 10" WUXGA TFT LCD panel
+      - panasonic,vvx10f034n00
+
+  reg:
+    maxItems: 1
+    description: DSI virtual channel
+
+  backlight: true
+  enable-gpios: true
+  port: true
+  power-supply: true
+
+additionalProperties: false
+
+required:
+  - compatible
+  - power-supply
+  - reg
+
+examples:
+  - |
+    dsi {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      panel@0 {
+        compatible = "panasonic,vvx10f034n00";
+        reg = <0>;
+        power-supply = <&vcc_lcd_reg>;
+
+        port {
+          panel: endpoint {
+            remote-endpoint = <&ltdc_out>;
+          };
+        };
+      };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 8fe60ee2..393ffc6 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -33,16 +33,225 @@
       - ampire,am-480272h3tmqw-t01h
         # Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
       - ampire,am800480r3tmqwa1h
+        # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+      - auo,b080uan01
+        # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+      - auo,b101aw03
+        # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+      - auo,b101ean01
+        # AU Optronics Corporation 10.1" WXGA TFT LCD panel
+      - auo,b101xtn01
         # AUO B116XAK01 eDP TFT LCD panel
       - auo,b116xa01
+        # AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
+      - auo,b116xw03
+        # AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
+      - auo,b133htn01
+        # AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
+      - auo,b133xtn01
+        # AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+      - auo,g070vvn01
+        # AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
+      - auo,g101evn010
+        # AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
+      - auo,g104sn02
+        # AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
+      - auo,g133han01
+        # AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
+      - auo,g185han01
+        # AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
+      - auo,p320hvn03
+        # AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
+      - auo,t215hvn01
+        # Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
+      - avic,tm070ddh03
+        # BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
+      - boe,hv070wsa-100
+        # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
+      - boe,nv101wxmn51
         # BOE NV140FHM-N49 14.0" FHD a-Si FT panel
       - boe,nv140fhmn49
+        # Boe Corporation 8.0" WUXGA TFT LCD panel
+      - boe,tv080wum-nl0
+        # CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
+      - cdtech,s043wq26h-ct7
+        # CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
+      - cdtech,s070wv95-ct16
+        # Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
+      - chunghwa,claa070wp03xg
+        # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+      - chunghwa,claa101wa01a
+        # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+      - chunghwa,claa101wb03
+        # DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
+      - dataimage,scf0700c48ggu18
+        # DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
+      - dlc,dlc1010gig
+        # Emerging Display Technology Corp. 3.5" QVGA TFT LCD panel
+      - edt,et035012dm6
+        # Emerging Display Technology Corp. 480x272 TFT Display with capacitive touch
+      - edt,etm043080dh6gp
+        # Emerging Display Technology Corp. 480x272 TFT Display
+      - edt,etm0430g0dh6
+        # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+      - edt,et057090dhu
+        # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+      - edt,etm070080dh6
+        # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+      - edt,etm0700g0dh6
+        # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+        # Same as ETM0700G0DH6 but with inverted pixel clock.
+      - edt,etm070080bdh6
+        # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+        # Same display as the ETM0700G0BDH6, but with changed hardware for the
+        # backlight and the touch interface.
+      - edt,etm070080edh6
+        # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+        # Same timings as the ETM0700G0DH6, but with resistive touch.
+      - edt,etm070080dh6
+        # Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
+      - evervision,vgg804821
+        # Foxlink Group 5" WVGA TFT LCD panel
+      - foxlink,fl500wvr00-a0t
+        # Frida FRD350H54004 3.5" QVGA TFT LCD panel
+      - frida,frd350h54004
+        # FriendlyELEC HD702E 800x1280 LCD panel
+      - friendlyarm,hd702e
+        # GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
+      - giantplus,gpg48273qs5
         # GiantPlus GPM940B0 3.0" QVGA TFT LCD panel
       - giantplus,gpm940b0
+        # HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
+      - hannstar,hsd070pww1
+        # HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
+      - hannstar,hsd100pxn1
+        # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
+      - hit,tx23d38vm0caa
+        # Innolux AT043TN24 4.3" WQVGA TFT LCD panel
+      - innolux,at043tn24
+        # Innolux AT070TN92 7.0" WQVGA TFT LCD panel
+      - innolux,at070tn92
+        # Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
+      - innolux,g070y2-l01
+        # Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
+      - innolux,g101ice-l01
+        # Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
+      - innolux,g121i1-l01
+        # Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
+      - innolux,g121x1-l03
+        # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
+      - innolux,n116bge
+        # InnoLux 15.6" WXGA TFT LCD panel
+      - innolux,n156bge-l21
+        # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
+      - innolux,zj070na-01p
+        # Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
+      - koe,tx14d24vm1bpa
+        # Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
+      - koe,tx31d200vm0baa
+        # Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
+      - kyo,tcg121xglp
+        # LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
+      - lemaker,bl035-rgb-002
+        # LG 7" (800x480 pixels) TFT LCD panel
+      - lg,lb070wv8
+        # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+      - lg,lp079qx1-sp0v
+        # LG 9.7" (2048x1536 pixels) TFT LCD panel
+      - lg,lp097qx1-spa1
+        # LG 12.0" (1920x1280 pixels) TFT LCD panel
+      - lg,lp120up1
+        # LG 12.9" (2560x1700 pixels) TFT LCD panel
+      - lg,lp129qe
+        # Logic Technologies LT161010-2NHC 7" WVGA TFT Cap Touch Module
+      - logictechno,lt161010-2nhc
+        # Logic Technologies LT161010-2NHR 7" WVGA TFT Resistive Touch Module
+      - logictechno,lt161010-2nhr
+        # Logic Technologies LT170410-2WHC 10.1" 1280x800 IPS TFT Cap Touch Mod.
+      - logictechno,lt170410-2whc
+        # Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
+      - mitsubishi,aa070mc01-ca1
+        # NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
+      - nec,nl12880bc20-05
+        # NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
+      - nec,nl4827hc19-05b
+        # Netron-DY E231732 7.0" WSVGA TFT LCD panel
+      - netron-dy,e231732
+        # NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
+      - neweast,wjfh116008a
+        # Newhaven Display International 480 x 272 TFT LCD panel
+      - newhaven,nhd-4.3-480272ef-atxl
+        # NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
+      - nlt,nl192108ac18-02d
+        # New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
+      - nvd,9128
+        # OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
+      - okaya,rs800480t-7x0gp
+        # Olimex 4.3" TFT LCD panel
+      - olimex,lcd-olinuxino-43-ts
+        # On Tat Industrial Company 7" DPI TFT panel.
+      - ontat,yx700wv03
+        # OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
+      - ortustech,com37h3m05dtc
+        # OrtusTech COM37H3M99DTC Blanview 3.7" VGA portrait TFT-LCD panel
+      - ortustech,com37h3m99dtc
+        # OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
+      - ortustech,com43h4m85ulc
+        # OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
+      - osddisplays,osd070t1718-19ts
+        # One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
+      - osddisplays,osd101t2045-53ts
+        # QiaoDian XianShi Corporation 4"3 TFT LCD panel
+      - qiaodian,qd43003c0-40
+        # Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800
+      - rocktech,rk101ii01d-ct
+        # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
+      - rocktech,rk070er9427
+        # Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+      - samsung,lsn122dl01-c01
+        # Samsung Electronics 10.1" WSVGA TFT LCD panel
+      - samsung,ltn101nt05
+        # Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
+      - samsung,ltn140at29-301
         # Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
       - satoz,sat050at40h12r2
+        # Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
+      - sharp,lq035q7db03
+        # Sharp LQ070Y3DG3B 7.0" WVGA landscape TFT LCD panel
+      - sharp,lq070y3dg3b
+        # Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+      - sharp,lq101k1ly04
+        # Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+      - sharp,lq123p1jx31
         # Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
       - sharp,ls020b1dd01d
+        # Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
+      - shelly,sca07010-bfn-lnn
+        # Starry 12.2" (1920x1200 pixels) TFT LCD panel
+      - starry,kr122ea0sra
+        # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
+      - tianma,tm070jdhg30
+        # Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
+      - tianma,tm070rvhg71
+        # Toshiba 8.9" WXGA (1280x768) TFT LCD panel
+      - toshiba,lt089ac29000
+        # TPK U.S.A. LLC Fusion 7" 800 x 480 (WVGA) LCD panel with capacitive touch
+      - tpk,f07a-0102
+        # TPK U.S.A. LLC Fusion 10.1" 1024 x 600 (WSVGA) LCD panel with capacitive touch
+      - tpk,f10a-0102
+        # United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
+        # Supported are LVDS versions (-11T, -19T) and parallel ones
+        # (-T, -1T, -7T, -20T).
+      - urt,umsh-8596md-t
+      - urt,umsh-8596md-1t
+      - urt,umsh-8596md-7t
+      - urt,umsh-8596md-11t
+      - urt,umsh-8596md-19t
+      - urt,umsh-8596md-20t
+        # VXT 800x480 color TFT LCD panel
+      - vxt,vl050-8048nt-c01
+        # Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
+      - winstar,wf35ltiacd
 
   backlight: true
   enable-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
new file mode 100644
index 0000000..bd558ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-timing.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: panel timing bindings
+
+maintainers:
+  - Thierry Reding <thierry.reding@gmail.com>
+  - Sam Ravnborg <sam@ravnborg.org>
+
+description: |
+  There are different ways of describing the timing data of a panel. The
+  devicetree representation corresponds to the one commonly found in datasheets
+  for panels.
+
+  The parameters are defined as seen in the following illustration.
+
+  +----------+-------------------------------------+----------+-------+
+  |          |        ^                            |          |       |
+  |          |        |vback_porch                 |          |       |
+  |          |        v                            |          |       |
+  +----------#######################################----------+-------+
+  |          #        ^                            #          |       |
+  |          #        |                            #          |       |
+  |  hback   #        |                            #  hfront  | hsync |
+  |   porch  #        |       hactive              #  porch   |  len  |
+  |<-------->#<-------+--------------------------->#<-------->|<----->|
+  |          #        |                            #          |       |
+  |          #        |vactive                     #          |       |
+  |          #        |                            #          |       |
+  |          #        v                            #          |       |
+  +----------#######################################----------+-------+
+  |          |        ^                            |          |       |
+  |          |        |vfront_porch                |          |       |
+  |          |        v                            |          |       |
+  +----------+-------------------------------------+----------+-------+
+  |          |        ^                            |          |       |
+  |          |        |vsync_len                   |          |       |
+  |          |        v                            |          |       |
+  +----------+-------------------------------------+----------+-------+
+
+
+  The following is the panel timings shown with time on the x-axis.
+  This matches the timing diagrams often found in data sheets.
+
+              Active                 Front           Sync           Back
+              Region                 Porch                          Porch
+  <-----------------------><----------------><-------------><-------------->
+    //////////////////////|
+   ////////////////////// |
+  //////////////////////  |..................               ................
+                                             _______________
+
+  Timing can be specified either as a typical value or as a tuple
+  of min, typ, max values.
+
+properties:
+
+  clock-frequency:
+   description: Panel clock in Hz
+
+  hactive:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Horizontal panel resolution in pixels
+
+  vactive:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Vertical panel resolution in pixels
+
+  hfront-porch:
+    description: Horizontal front porch panel timing
+    oneOf:
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32
+        - maxItems: 1
+          items:
+            description: typical number of pixels
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32-array
+        - minItems: 3
+          maxItems: 3
+          items:
+            description: min, typ, max number of pixels
+
+  hback-porch:
+    description: Horizontal back porch timing
+    oneOf:
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32
+        - maxItems: 1
+          items:
+            description: typical number of pixels
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32-array
+        - minItems: 3
+          maxItems: 3
+          items:
+            description: min, typ, max number of pixels
+
+  hsync-len:
+    description: Horizontal sync length panel timing
+    oneOf:
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32
+        - maxItems: 1
+          items:
+            description: typical number of pixels
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32-array
+        - minItems: 3
+          maxItems: 3
+          items:
+            description: min, typ, max number of pixels
+
+  vfront-porch:
+    description: Vertical front porch panel timing
+    oneOf:
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32
+        - maxItems: 1
+          items:
+            description: typical number of lines
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32-array
+        - minItems: 3
+          maxItems: 3
+          items:
+            description: min, typ, max number of lines
+
+  vback-porch:
+    description: Vertical back porch panel timing
+    oneOf:
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32
+        - maxItems: 1
+          items:
+            description: typical number of lines
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32-array
+        - minItems: 3
+          maxItems: 3
+          items:
+            description: min, typ, max number of lines
+
+  vsync-len:
+    description: Vertical sync length panel timing
+    oneOf:
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32
+        - maxItems: 1
+          items:
+            description: typical number of lines
+      - allOf:
+        - $ref: /schemas/types.yaml#/definitions/uint32-array
+        - minItems: 3
+          maxItems: 3
+          items:
+            description: min, typ, max number of lines
+
+  hsync-active:
+    description: |
+      Horizontal sync pulse.
+      0 selects active low, 1 selects active high.
+      If omitted then it is not used by the hardware
+    enum: [0, 1]
+
+  vsync-active:
+    description: |
+      Vertical sync pulse.
+      0 selects active low, 1 selects active high.
+      If omitted then it is not used by the hardware
+    enum: [0, 1]
+
+  de-active:
+    description: |
+      Data enable.
+      0 selects active low, 1 selects active high.
+      If omitted then it is not used by the hardware
+    enum: [0, 1]
+
+  pixelclk-active:
+    description: |
+      Data driving on rising or falling edge.
+      Use 0 to drive pixel data on falling edge and
+      sample data on rising edge.
+      Use 1 to drive pixel data on rising edge and
+      sample data on falling edge
+    enum: [0, 1]
+
+  syncclk-active:
+    description: |
+      Drive sync on rising or sample sync on falling edge.
+      If not specified then the setup is as specified by pixelclk-active.
+      Use 0 to drive sync on falling edge and
+      sample sync on rising edge of pixel clock.
+      Use 1 to drive sync on rising edge and
+      sample sync on falling edge of pixel clock
+    enum: [0, 1]
+
+  interlaced:
+    type: boolean
+    description: Enable interlaced mode
+
+  doublescan:
+    type: boolean
+    description: Enable double scan mode
+
+  doubleclk:
+    type: boolean
+    description: Enable double clock mode
+
+required:
+ - clock-frequency
+ - hactive
+ - vactive
+ - hfront-porch
+ - hback-porch
+ - hsync-len
+ - vfront-porch
+ - vback-porch
+ - vsync-len
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
deleted file mode 100644
index 0fbdab8..0000000
--- a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-QiaoDian XianShi Corporation 4"3 TFT LCD panel
-
-Required properties:
-- compatible: should be "qiaodian,qd43003c0-40"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
deleted file mode 100644
index cbb79ef..0000000
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
-
-The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
-panel connected using a MIPI-DSI video interface.
-
-Required properties:
-  - compatible: "raydium,rm68200"
-  - reg: the virtual channel number of a DSI peripheral
-
-Optional properties:
-  - reset-gpios: a GPIO spec for the reset pin (active low).
-  - power-supply: phandle of the regulator that provides the supply voltage.
-  - backlight: phandle of the backlight device attached to the panel.
-
-Example:
-&dsi {
-	...
-	panel@0 {
-		compatible = "raydium,rm68200";
-		reg = <0>;
-		reset-gpios = <&gpiof 15 GPIO_ACTIVE_LOW>;
-		power-supply = <&v1v8>;
-		backlight = <&pwm_backlight>;
-	};
-};
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
new file mode 100644
index 0000000..a35ba16
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm68200.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
+
+maintainers:
+  - Philippe CORNU <philippe.cornu@st.com>
+
+description: |
+             The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
+             panel connected using a MIPI-DSI video interface.
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+
+  compatible:
+    const: raydium,rm68200
+
+  reg:
+    maxItems: 1
+    description: DSI virtual channel
+
+  backlight: true
+  enable-gpios: true
+  port: true
+  power-supply: true
+
+  reset-gpios:
+    maxItems: 1
+
+additionalProperties: false
+
+required:
+  - compatible
+  - power-supply
+  - reg
+
+examples:
+  - |
+    dsi {
+      #address-cells = <1>;
+      #size-cells = <0>;
+      panel@0 {
+        compatible = "raydium,rm68200";
+        reg = <0>;
+        reset-gpios = <&gpiof 15 0>;
+        power-supply = <&v1v8>;
+        backlight = <&pwm_backlight>;
+      };
+    };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
deleted file mode 100644
index eb1fb9f..0000000
--- a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "rocktech,rk070er9427"
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LCD panel input.
-
-Example:
-	panel {
-		compatible = "rocktech,rk070er9427";
-		backlight = <&backlight_lcd>;
-
-		port {
-			lcd_panel_in: endpoint {
-				remote-endpoint = <&lcd_display_out>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
deleted file mode 100644
index dba298b..0000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung 12.2" (2560x1600 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,lsn122dl01-c01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt b/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt
deleted file mode 100644
index ef522c6..0000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung Electronics 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,ltn101nt05"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt b/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt
deleted file mode 100644
index e7f969d..0000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,ltn140at29-301"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
new file mode 100644
index 0000000..7a685d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6e88a0-ams452ef01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung AMS452EF01 AMOLED panel with S6E88A0 video mode DSI controller
+
+maintainers:
+  - Michael Srba <Michael.Srba@seznam.cz>
+
+allOf:
+  - $ref: panel-common.yaml#
+
+properties:
+  compatible:
+    const: samsung,s6e88a0-ams452ef01
+  reg: true
+  reset-gpios: true
+  vdd3-supply:
+     description: core voltage supply
+  vci-supply:
+     description: voltage supply for analog circuits
+
+required:
+  - compatible
+  - reg
+  - vdd3-supply
+  - vci-supply
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    dsi {
+            #address-cells = <1>;
+            #size-cells = <0>;
+            panel@0 {
+                    reg = <0>;
+
+                    compatible = "samsung,s6e88a0-ams452ef01";
+
+                    vdd3-supply = <&pm8916_l17>;
+                    vci-supply = <&reg_vlcd_vci>;
+                    reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+            };
+    };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
deleted file mode 100644
index 0753f69..0000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq035q7db03"
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt
deleted file mode 100644
index 95534b5..0000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp LQ070Y3DG3B 7.0" WVGA landscape TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq070y3dg3b"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
deleted file mode 100644
index 4aff25b..0000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq101k1ly04"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
deleted file mode 100644
index bcb0e8a..0000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Sharp 12.3" (2400x1600 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq123p1jx31"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt b/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt
deleted file mode 100644
index fc1ea9e..0000000
--- a/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "shelly,sca07010-bfn-lnn"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
deleted file mode 100644
index 1e87fe6..0000000
--- a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Starry 12.2" (1920x1200 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "starry,kr122ea0sra"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
deleted file mode 100644
index eb9501a..0000000
--- a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "tianma,tm070jdhg30"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
deleted file mode 100644
index b25261e..0000000
--- a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "tianma,tm070rvhg71"
-- power-supply: single regulator to provide the supply voltage
-- backlight: phandle of the backlight device attached to the panel
-
-Required nodes:
-- port: LVDS port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX6Q based board
-
-	panel: panel-lvds0 {
-		compatible = "tianma,tm070rvhg71";
-		backlight = <&backlight_lvds>;
-		power-supply = <&reg_lvds>;
-
-		port {
-			panel_in_lvds0: endpoint {
-				remote-endpoint = <&lvds0_out>;
-			};
-		};
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
deleted file mode 100644
index 8982611..0000000
--- a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Toshiba 8.9" WXGA (1280x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "toshiba,lt089ac29000"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
deleted file mode 100644
index a2613b9..0000000
--- a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TPK U.S.A. LLC Fusion 7" integrated projected capacitive touch display with,
-800 x 480 (WVGA) LCD panel.
-
-Required properties:
-- compatible: should be "tpk,f07a-0102"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
deleted file mode 100644
index b9d0511..0000000
--- a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TPK U.S.A. LLC Fusion 10.1" integrated projected capacitive touch display with,
-1024 x 600 (WSVGA) LCD panel.
-
-Required properties:
-- compatible: should be "tpk,f10a-0102"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt b/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
deleted file mode 100644
index 088a6ce..0000000
--- a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
-
-Supported are LVDS versions (-11T, -19T) and parallel ones
-(-T, -1T, -7T, -20T).
-
-Required properties:
-- compatible: should be one of:
-  "urt,umsh-8596md-t",
-  "urt,umsh-8596md-1t",
-  "urt,umsh-8596md-7t",
-  "urt,umsh-8596md-11t",
-  "urt,umsh-8596md-19t",
-  "urt,umsh-8596md-20t".
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt b/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
deleted file mode 100644
index b42bf06..0000000
--- a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-VXT 800x480 color TFT LCD panel
-
-Required properties:
-- compatible: should be "vxt,vl050-8048nt-c01"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
deleted file mode 100644
index 2a7e6e3..0000000
--- a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
-
-Required properties:
-- compatible: should be "winstar,wf35ltiacd"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Example:
-	backlight: backlight {
-		compatible = "pwm-backlight";
-		pwms = <&hlcdc_pwm 0 50000 PWM_POLARITY_INVERTED>;
-		brightness-levels = <0 31 63 95 127 159 191 223 255>;
-		default-brightness-level = <191>;
-		power-supply = <&bl_reg>;
-	};
-
-	bl_reg: backlight_regulator {
-		compatible = "regulator-fixed";
-		regulator-name = "backlight-power-supply";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-	};
-
-	panel: panel {
-		compatible = "winstar,wf35ltiacd", "simple-panel";
-		backlight = <&backlight>;
-		power-supply = <&panel_reg>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		port {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			panel_input: endpoint {
-				remote-endpoint = <&hlcdc_panel_output>;
-			};
-		};
-	};
-
-	panel_reg: panel_regulator {
-		compatible = "regulator-fixed";
-		regulator-name = "panel-power-supply";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-	};
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
index 22c91be..d9fdb58 100644
--- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -34,7 +34,7 @@
 
 examples:
   - |
-    dsi@ff450000 {
+    dsi {
         #address-cells = <1>;
         #size-cells = <0>;
         reg = <0xff450000 0x1000>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt
deleted file mode 100644
index 5707af8..0000000
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Rockchip DRM master device
-================================
-
-The Rockchip DRM master device is a virtual device needed to list all
-vop devices or other display interface nodes that comprise the
-graphics subsystem.
-
-Required properties:
-- compatible: Should be "rockchip,display-subsystem"
-- ports: Should contain a list of phandles pointing to display interface port
-  of vop devices. vop definitions as defined in
-  Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
-
-example:
-
-display-subsystem {
-	compatible = "rockchip,display-subsystem";
-	ports = <&vopl_out>, <&vopb_out>;
-};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
new file mode 100644
index 0000000..ec8ae74
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip-drm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip DRM master device
+
+maintainers:
+  - Sandy Huang <hjc@rock-chips.com>
+  - Heiko Stuebner <heiko@sntech.de>
+
+description: |
+  The Rockchip DRM master device is a virtual device needed to list all
+  vop devices or other display interface nodes that comprise the
+  graphics subsystem.
+
+properties:
+  compatible:
+    const: rockchip,display-subsystem
+
+  ports:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description: |
+      Should contain a list of phandles pointing to display interface port
+      of vop devices. vop definitions as defined in
+      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+
+required:
+  - compatible
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    display-subsystem {
+        compatible = "rockchip,display-subsystem";
+        ports = <&vopl_out>, <&vopb_out>;
+    };
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
deleted file mode 100644
index cd5c718..0000000
--- a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Sitronix ST7735R display panels
-
-This binding is for display panels using a Sitronix ST7735R controller in SPI
-mode.
-
-Required properties:
-- compatible:	"jianda,jd-t18003-t01", "sitronix,st7735r"
-- dc-gpios:	Display data/command selection (D/CX)
-- reset-gpios:	Reset signal (RSTX)
-
-The node for this driver must be a child node of a SPI controller, hence
-all mandatory properties described in ../spi/spi-bus.txt must be specified.
-
-Optional properties:
-- rotation:	panel rotation in degrees counter clockwise (0,90,180,270)
-- backlight:	phandle of the backlight device attached to the panel
-
-Example:
-
-	backlight: backlight {
-		compatible = "gpio-backlight";
-		gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
-	};
-
-	...
-
-	display@0{
-		compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
-		reg = <0>;
-		spi-max-frequency = <32000000>;
-		dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
-		reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
-		rotation = <270>;
-		backlight = &backlight;
-	};
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
new file mode 100644
index 0000000..0cebaae
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sitronix,st7735r.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7735R Display Panels Device Tree Bindings
+
+maintainers:
+  - David Lechner <david@lechnology.com>
+
+description:
+  This binding is for display panels using a Sitronix ST7715R or ST7735R
+  controller in SPI mode.
+
+allOf:
+  - $ref: panel/panel-common.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - description:
+          Adafruit 1.8" 160x128 Color TFT LCD (Product ID 358 or 618)
+        items:
+          - enum:
+              - jianda,jd-t18003-t01
+          - const: sitronix,st7735r
+      - description:
+          Okaya 1.44" 128x128 Color TFT LCD (E.g. Renesas YRSK-LCD-PMOD)
+        items:
+          - enum:
+              - okaya,rh128128t
+          - const: sitronix,st7715r
+
+  spi-max-frequency:
+    maximum: 32000000
+
+  dc-gpios:
+    maxItems: 1
+    description: Display data/command selection (D/CX)
+
+  backlight: true
+  reg: true
+  reset-gpios: true
+  rotation: true
+
+required:
+  - compatible
+  - reg
+  - dc-gpios
+  - reset-gpios
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+
+    backlight: backlight {
+            compatible = "gpio-backlight";
+            gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
+    };
+
+    spi {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            display@0{
+                    compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
+                    reg = <0>;
+                    spi-max-frequency = <32000000>;
+                    dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
+                    reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
+                    rotation = <270>;
+            };
+    };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
new file mode 100644
index 0000000..cac61a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,am65x-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments AM65x Display Subsystem
+
+maintainers:
+  - Jyri Sarha <jsarha@ti.com>
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+description: |
+  The AM65x TI Keystone Display SubSystem with two output ports and
+  two video planes. The first video port supports OLDI and the second
+  supports DPI format. The fist plane is full video plane with all
+  features and the second is a "lite plane" without scaling support.
+
+properties:
+  compatible:
+    const: ti,am65x-dss
+
+  reg:
+    description:
+      Addresses to each DSS memory region described in the SoC's TRM.
+    items:
+      - description: common DSS register area
+      - description: VIDL1 light video plane
+      - description: VID video plane
+      - description: OVR1 overlay manager for vp1
+      - description: OVR2 overlay manager for vp2
+      - description: VP1 video port 1
+      - description: VP2 video port 2
+
+  reg-names:
+    items:
+      - const: common
+      - const: vidl1
+      - const: vid
+      - const: ovr1
+      - const: ovr2
+      - const: vp1
+      - const: vp2
+
+  clocks:
+    items:
+      - description: fck DSS functional clock
+      - description: vp1 Video Port 1 pixel clock
+      - description: vp2 Video Port 2 pixel clock
+
+  clock-names:
+    items:
+      - const: fck
+      - const: vp1
+      - const: vp2
+
+  interrupts:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+    description: phandle to the associated power domain
+
+  ports:
+    type: object
+    description:
+      Ports as described in Documentation/devictree/bindings/graph.txt
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description:
+          The DSS OLDI output port node form video port 1
+
+      port@1:
+        type: object
+        description:
+          The DSS DPI output port node from video port 2
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+
+  ti,am65x-oldi-io-ctrl:
+    allOf:
+      - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+      - maxItems: 1
+    description:
+      phandle to syscon device node mapping OLDI IO_CTRL registers.
+      The mapped range should point to OLDI_DAT0_IO_CTRL, map it and
+      following OLDI_DAT1_IO_CTRL, OLDI_DAT2_IO_CTRL, OLDI_DAT3_IO_CTRL,
+      and OLDI_CLK_IO_CTRL registers. This property is needed for OLDI
+      interface to work.
+
+  max-memory-bandwidth:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      Input memory (from main memory to dispc) bandwidth limit in
+      bytes per second
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - clocks
+  - clock-names
+  - interrupts
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    dss: dss@04a00000 {
+            compatible = "ti,am65x-dss";
+            reg =   <0x0 0x04a00000 0x0 0x1000>, /* common */
+                    <0x0 0x04a02000 0x0 0x1000>, /* vidl1 */
+                    <0x0 0x04a06000 0x0 0x1000>, /* vid */
+                    <0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
+                    <0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
+                    <0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
+                    <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
+            reg-names = "common", "vidl1", "vid",
+                    "ovr1", "ovr2", "vp1", "vp2";
+            ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+            power-domains = <&k3_pds 67 TI_SCI_PD_EXCLUSIVE>;
+            clocks =        <&k3_clks 67 1>,
+                            <&k3_clks 216 1>,
+                            <&k3_clks 67 2>;
+            clock-names = "fck", "vp1", "vp2";
+            interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+            ports {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+                    port@0 {
+                            reg = <0>;
+                            oldi_out0: endpoint {
+                                    remote-endpoint = <&lcd_in0>;
+                            };
+                    };
+            };
+    };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
new file mode 100644
index 0000000..ade9b2f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,j721e-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments J721E Display Subsystem
+
+maintainers:
+  - Jyri Sarha <jsarha@ti.com>
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+description: |
+  The J721E TI Keystone Display SubSystem with four output ports and
+  four video planes. There is two full video planes and two "lite
+  planes" without scaling support. The video ports can be connected to
+  the SoC's DPI pins or to integrated display bridges on the SoC.
+
+properties:
+  compatible:
+    const: ti,j721e-dss
+
+  reg:
+    items:
+      - description: common_m DSS Master common
+      - description: common_s0 DSS Shared common 0
+      - description: common_s1 DSS Shared common 1
+      - description: common_s2 DSS Shared common 2
+      - description: VIDL1 light video plane 1
+      - description: VIDL2 light video plane 2
+      - description: VID1 video plane 1
+      - description: VID1 video plane 2
+      - description: OVR1 overlay manager for vp1
+      - description: OVR2 overlay manager for vp2
+      - description: OVR3 overlay manager for vp3
+      - description: OVR4 overlay manager for vp4
+      - description: VP1 video port 1
+      - description: VP2 video port 2
+      - description: VP3 video port 3
+      - description: VP4 video port 4
+      - description: WB Write Back
+
+  reg-names:
+    items:
+      - const: common_m
+      - const: common_s0
+      - const: common_s1
+      - const: common_s2
+      - const: vidl1
+      - const: vidl2
+      - const: vid1
+      - const: vid2
+      - const: ovr1
+      - const: ovr2
+      - const: ovr3
+      - const: ovr4
+      - const: vp1
+      - const: vp2
+      - const: vp3
+      - const: vp4
+      - const: wb
+
+  clocks:
+    items:
+      - description: fck DSS functional clock
+      - description: vp1 Video Port 1 pixel clock
+      - description: vp2 Video Port 2 pixel clock
+      - description: vp3 Video Port 3 pixel clock
+      - description: vp4 Video Port 4 pixel clock
+
+  clock-names:
+    items:
+      - const: fck
+      - const: vp1
+      - const: vp2
+      - const: vp3
+      - const: vp4
+
+  interrupts:
+     items:
+      - description: common_m DSS Master common
+      - description: common_s0 DSS Shared common 0
+      - description: common_s1 DSS Shared common 1
+      - description: common_s2 DSS Shared common 2
+
+  interrupt-names:
+    items:
+      - const: common_m
+      - const: common_s0
+      - const: common_s1
+      - const: common_s2
+
+  power-domains:
+    maxItems: 1
+    description: phandle to the associated power domain
+
+  ports:
+    type: object
+    description:
+      Ports as described in Documentation/devictree/bindings/graph.txt
+    properties:
+      "#address-cells":
+        const: 1
+
+      "#size-cells":
+        const: 0
+
+      port@0:
+        type: object
+        description:
+          The output port node form video port 1
+
+      port@1:
+        type: object
+        description:
+          The output port node from video port 2
+
+      port@2:
+        type: object
+        description:
+          The output port node from video port 3
+
+      port@3:
+        type: object
+        description:
+          The output port node from video port 4
+
+    required:
+      - "#address-cells"
+      - "#size-cells"
+
+  max-memory-bandwidth:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      Input memory (from main memory to dispc) bandwidth limit in
+      bytes per second
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - clocks
+  - clock-names
+  - interrupts
+  - interrupt-names
+  - ports
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    dss: dss@04a00000 {
+            compatible = "ti,j721e-dss";
+            reg =   <0x00 0x04a00000 0x00 0x10000>, /* common_m */
+                    <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
+                    <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
+                    <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
+                    <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
+                    <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
+                    <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
+                    <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
+                    <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
+                    <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
+                    <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
+                    <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
+                    <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
+                    <0x00 0x04aa0000 0x00 0x10000>, /* vp2 */
+                    <0x00 0x04ac0000 0x00 0x10000>, /* vp3 */
+                    <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
+                    <0x00 0x04af0000 0x00 0x10000>; /* wb */
+            reg-names = "common_m", "common_s0",
+                    "common_s1", "common_s2",
+                    "vidl1", "vidl2","vid1","vid2",
+                    "ovr1", "ovr2", "ovr3", "ovr4",
+                    "vp1", "vp2", "vp3", "vp4",
+                    "wb";
+            clocks =        <&k3_clks 152 0>,
+                            <&k3_clks 152 1>,
+                            <&k3_clks 152 4>,
+                            <&k3_clks 152 9>,
+                            <&k3_clks 152 13>;
+            clock-names = "fck", "vp1", "vp2", "vp3", "vp4";
+            power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+            interrupts =    <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+            interrupt-names =       "common_m",
+                                    "common_s0",
+                                    "common_s1",
+                                    "common_s2";
+            ports {
+                    #address-cells = <1>;
+                    #size-cells = <0>;
+                    port@0 {
+                            reg = <0>;
+
+                            dpi_out_0: endpoint {
+                                    remote-endpoint = <&dp_bridge_input>;
+                            };
+                    };
+            };
+    };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
new file mode 100644
index 0000000..385bd06
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,k2g-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments K2G Display Subsystem
+
+maintainers:
+  - Jyri Sarha <jsarha@ti.com>
+  - Tomi Valkeinen <tomi.valkeinen@ti.com>
+
+description: |
+  The K2G DSS is an ultra-light version of TI Keystone Display
+  SubSystem. It has only one output port and video plane. The
+  output is DPI.
+
+properties:
+  compatible:
+    const: ti,k2g-dss
+
+  reg:
+    items:
+      - description: cfg DSS top level
+      - description: common DISPC common
+      - description: VID1 video plane 1
+      - description: OVR1 overlay manager for vp1
+      - description: VP1 video port 1
+
+  reg-names:
+    items:
+      - const: cfg
+      - const: common
+      - const: vid1
+      - const: ovr1
+      - const: vp1
+
+  clocks:
+    items:
+      - description: fck DSS functional clock
+      - description: vp1 Video Port 1 pixel clock
+
+  clock-names:
+    items:
+      - const: fck
+      - const: vp1
+
+  interrupts:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+    description: phandle to the associated power domain
+
+  port:
+    type: object
+    description:
+      Port as described in Documentation/devictree/bindings/graph.txt.
+      The DSS DPI output port node
+
+  max-memory-bandwidth:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description:
+      Input memory (from main memory to dispc) bandwidth limit in
+      bytes per second
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - clocks
+  - clock-names
+  - interrupts
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    dss: dss@02540000 {
+            compatible = "ti,k2g-dss";
+            reg =   <0x02540000 0x400>,
+                    <0x02550000 0x1000>,
+                    <0x02557000 0x1000>,
+                    <0x0255a800 0x100>,
+                    <0x0255ac00 0x100>;
+            reg-names = "cfg", "common", "vid1", "ovr1", "vp1";
+            clocks =        <&k2g_clks 0x2 0>,
+                            <&k2g_clks 0x2 1>;
+            clock-names = "fck", "vp1";
+            interrupts = <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>;
+
+            power-domains = <&k2g_pds 0x2>;
+
+            max-memory-bandwidth = <230000000>;
+
+            port {
+                    dpi_out: endpoint {
+                            remote-endpoint = <&sii9022_in>;
+                    };
+            };
+    };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index fba343f..49467b5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -341,12 +341,16 @@
     description: Fastrax Oy
   "^fcs,.*":
     description: Fairchild Semiconductor
+  "^feixin,.*":
+    description: Shenzhen Feixin Photoelectic Co., Ltd
   "^feiyang,.*":
     description: Shenzhen Fly Young Technology Co.,LTD.
   "^firefly,.*":
     description: Firefly
   "^focaltech,.*":
     description: FocalTech Systems Co.,Ltd
+  "^frida,.*":
+    description: Shenzhen Frida LCD Co., Ltd.
   "^friendlyarm,.*":
     description: Guangzhou FriendlyARM Computer Tech Co., Ltd
   "^fsl,.*":
@@ -425,6 +429,8 @@
     description: Shenzhen Hugsun Technology Co. Ltd.
   "^hwacom,.*":
     description: HwaCom Systems Inc.
+  "^hydis,.*":
+    description: Hydis Technologies
   "^hyundai,.*":
     description: Hyundai Technology
   "^i2se,.*":
@@ -557,6 +563,8 @@
     description: Linear Technology Corporation
   "^logicpd,.*":
     description: Logic PD, Inc.
+  "^logictechno,.*":
+    description: Logic Technologies Limited
   "^longcheer,.*":
     description: Longcheer Technology (Shanghai) Co., Ltd.
   "^loongson,.*":
@@ -663,6 +671,8 @@
     description: Netron DY
   "^netxeon,.*":
     description: Shenzhen Netxeon Technology CO., LTD
+  "^neweast,.*":
+    description: Guangdong Neweast Optoelectronics CO., LTD
   "^nexbox,.*":
     description: Nexbox
   "^nextthing,.*":
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 9668a7f..ee73045 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -139,11 +139,17 @@
 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c
    :doc: overview
 
-Default bridge callback sequence
---------------------------------
+Bridge Operations
+-----------------
 
 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c
-   :doc: bridge callbacks
+   :doc: bridge operations
+
+Bridge Connector Helper
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
+   :doc: overview
 
 
 Bridge Helper Reference
@@ -155,6 +161,12 @@
 .. kernel-doc:: drivers/gpu/drm/drm_bridge.c
    :export:
 
+Bridge Connector Helper Reference
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
+   :export:
+
 Panel-Bridge Helper Reference
 -----------------------------
 
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index cc74e24c..f6d363b 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -332,7 +332,7 @@
 GEM BO Management Implementation Details
 ----------------------------------------
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
    :doc: Virtual Memory Address
 
 Buffer Object Eviction
@@ -382,7 +382,7 @@
 Global GTT views
 ----------------
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
    :doc: Global GTT views
 
 .. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index bc869b2..439656f 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -72,6 +72,28 @@
 
 Level: Advanced
 
+Improve plane atomic_check helpers
+----------------------------------
+
+Aside from the clipped coordinates right above there's a few suboptimal things
+with the current helpers:
+
+- drm_plane_helper_funcs->atomic_check gets called for enabled or disabled
+  planes. At best this seems to confuse drivers, worst it means they blow up
+  when the plane is disabled without the CRTC. The only special handling is
+  resetting values in the plane state structures, which instead should be moved
+  into the drm_plane_funcs->atomic_duplicate_state functions.
+
+- Once that's done, helpers could stop calling ->atomic_check for disabled
+  planes.
+
+- Then we could go through all the drivers and remove the more-or-less confused
+  checks for plane_state->fb and plane_state->crtc.
+
+Contact: Daniel Vetter
+
+Level: Advanced
+
 Convert early atomic drivers to async commit helpers
 ----------------------------------------------------
 
@@ -337,23 +359,6 @@
 
 Level: Starter
 
-drm_fb_helper tasks
--------------------
-
-- drm_fb_helper_restore_fbdev_mode_unlocked() should call restore_fbdev_mode()
-  not the _force variant so it can bail out if there is a master. But first
-  these igt tests need to be fixed: kms_fbcon_fbt@psr and
-  kms_fbcon_fbt@psr-suspend.
-
-- The max connector argument for drm_fb_helper_init() isn't used anymore and
-  can be removed.
-
-- The helper doesn't keep an array of connectors anymore so these can be
-  removed: drm_fb_helper_single_add_all_connectors(),
-  drm_fb_helper_add_one_connector() and drm_fb_helper_remove_one_connector().
-
-Level: Intermediate
-
 connector register/unregister fixes
 -----------------------------------
 
@@ -385,6 +390,20 @@
 
 Level: Intermediate
 
+Replace drm_detect_hdmi_monitor() with drm_display_info.is_hdmi
+---------------------------------------------------------------
+
+Once EDID is parsed, the monitor HDMI support information is available through
+drm_display_info.is_hdmi. Many drivers still call drm_detect_hdmi_monitor() to
+retrieve the same information, which is less efficient.
+
+Audit each individual driver calling drm_detect_hdmi_monitor() and switch to
+drm_display_info.is_hdmi if applicable.
+
+Contact: Laurent Pinchart, respective driver maintainers
+
+Level: Intermediate
+
 Core refactorings
 =================
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 242a096..39c90a2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5058,7 +5058,7 @@
 L:	linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
 F:	drivers/dma-buf/
 F:	include/linux/dma-buf*
-F:	include/linux/reservation.h
+F:	include/linux/dma-resv.h
 F:	include/linux/*fence.h
 F:	Documentation/driver-api/dma-buf.rst
 K:	dma_(buf|fence|resv)
@@ -5299,6 +5299,12 @@
 S:	Maintained
 F:	drivers/gpu/drm/tve200/
 
+DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
+M:	Icenowy Zheng <icenowy@aosc.io>
+S:	Maintained
+F:	drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+F:	Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
+
 DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
 M:	Jagan Teki <jagan@amarulasolutions.com>
 S:	Maintained
@@ -5318,6 +5324,13 @@
 F:	drivers/gpu/drm/tiny/ili9225.c
 F:	Documentation/devicetree/bindings/display/ilitek,ili9225.txt
 
+DRM DRIVER FOR ILITEK ILI9486 PANELS
+M:	Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+S:	Maintained
+F:	drivers/gpu/drm/tiny/ili9486.c
+F:	Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
+
 DRM DRIVER FOR HX8357D PANELS
 M:	Eric Anholt <eric@anholt.net>
 T:	git git://anongit.freedesktop.org/drm/drm-misc
@@ -5359,6 +5372,13 @@
 F:	include/uapi/drm/msm_drm.h
 F:	Documentation/devicetree/bindings/display/msm/
 
+DRM DRIVER FOR NOVATEK NT35510 PANELS
+M:	Linus Walleij <linus.walleij@linaro.org>
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+S:	Maintained
+F:	drivers/gpu/drm/panel/panel-novatek-nt35510.c
+F:	Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
+
 DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
 M:	Ben Skeggs <bskeggs@redhat.com>
 L:	dri-devel@lists.freedesktop.org
@@ -5446,7 +5466,7 @@
 T:	git git://anongit.freedesktop.org/drm/drm-misc
 S:	Maintained
 F:	drivers/gpu/drm/tiny/st7735r.c
-F:	Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+F:	Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
 
 DRM DRIVER FOR SONY ACX424AKP PANELS
 M:	Linus Walleij <linus.walleij@linaro.org>
@@ -5526,6 +5546,7 @@
 DRM DRIVERS AND MISC GPU PATCHES
 M:	Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
 M:	Maxime Ripard <mripard@kernel.org>
+M:	Thomas Zimmermann <tzimmermann@suse.de>
 W:	https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
 S:	Maintained
 T:	git git://anongit.freedesktop.org/drm/drm-misc
@@ -5605,7 +5626,6 @@
 F:	drivers/gpu/drm/fsl-dcu/
 F:	Documentation/devicetree/bindings/display/fsl,dcu.txt
 F:	Documentation/devicetree/bindings/display/fsl,tcon.txt
-F:	Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
 T:	git git://anongit.freedesktop.org/drm/drm-misc
 
 DRM DRIVERS FOR FREESCALE IMX
@@ -5624,12 +5644,13 @@
 F:	drivers/gpu/drm/gma500/
 
 DRM DRIVERS FOR HISILICON
-M:	Xinliang Liu <z.liuxinliang@hisilicon.com>
+M:	Xinliang Liu <xinliang.liu@linaro.org>
 M:	Rongrong Zou <zourongrong@gmail.com>
+R:	John Stultz <john.stultz@linaro.org>
 R:	Xinwei Kong <kong.kongxinwei@hisilicon.com>
 R:	Chen Feng <puck.chen@hisilicon.com>
 L:	dri-devel@lists.freedesktop.org
-T:	git git://github.com/xin3liang/linux.git
+T:	git git://anongit.freedesktop.org/drm/drm-misc
 S:	Maintained
 F:	drivers/gpu/drm/hisilicon/
 F:	Documentation/devicetree/bindings/display/hisilicon/
@@ -5644,7 +5665,7 @@
 T:	git git://anongit.freedesktop.org/drm/drm-misc
 
 DRM DRIVERS FOR MEDIATEK
-M:	CK Hu <ck.hu@mediatek.com>
+M:	Chun-Kuang Hu <chunkuang.hu@kernel.org>
 M:	Philipp Zabel <p.zabel@pengutronix.de>
 L:	dri-devel@lists.freedesktop.org
 S:	Supported
@@ -5721,6 +5742,17 @@
 F:	drivers/gpu/drm/omapdrm/
 F:	Documentation/devicetree/bindings/display/ti/
 
+DRM DRIVERS FOR TI KEYSTONE
+M:	Jyri Sarha <jsarha@ti.com>
+M:	Tomi Valkeinen <tomi.valkeinen@ti.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Maintained
+F:	drivers/gpu/drm/tidss/
+F:	Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
+F:	Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+F:	Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+T:	git git://anongit.freedesktop.org/drm/drm-misc
+
 DRM DRIVERS FOR V3D
 M:	Eric Anholt <eric@anholt.net>
 S:	Supported
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index b5ba8d7..e849367 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -158,7 +158,7 @@
 CONFIG_VIDEO_ADV7343=m
 CONFIG_DRM=m
 CONFIG_DRM_TILCDC=m
-CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
 CONFIG_DRM_TINYDRM=m
 CONFIG_TINYDRM_ST7586=m
 CONFIG_FB=y
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 2f0a762..a9755c5 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -55,7 +55,7 @@
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_DRM=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_PL111=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_MATROX=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 017d65f..0b02086 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -670,11 +670,11 @@
 CONFIG_DRM_PANEL_RAYDIUM_RM68200=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_DUMB_VGA_DAC=m
 CONFIG_DRM_NXP_PTN3460=m
 CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_SII902X=m
 CONFIG_DRM_SII9234=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
 CONFIG_DRM_TOSHIBA_TC358764=m
 CONFIG_DRM_I2C_ADV7511=m
 CONFIG_DRM_I2C_ADV7511_AUDIO=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 847f987..54f1a21 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -350,14 +350,13 @@
 CONFIG_OMAP5_DSS_HDMI=y
 CONFIG_OMAP2_DSS_SDI=y
 CONFIG_OMAP2_DSS_DSI=y
-CONFIG_DRM_OMAP_ENCODER_OPA362=m
-CONFIG_DRM_OMAP_ENCODER_TPD12S015=m
-CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
-CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
 CONFIG_DRM_OMAP_PANEL_DSI_CM=m
 CONFIG_DRM_TILCDC=m
 CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
 CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_TI_TPD12S015=m
 CONFIG_DRM_PANEL_LG_LB035Q02=m
 CONFIG_DRM_PANEL_NEC_NL8048HL11=m
 CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 64fa849..838307a 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -125,9 +125,9 @@
 CONFIG_DRM=y
 CONFIG_DRM_RCAR_DU=y
 CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_DUMB_VGA_DAC=y
 CONFIG_DRM_LVDS_CODEC=y
 CONFIG_DRM_SII902X=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_I2C_ADV7511=y
 CONFIG_DRM_I2C_ADV7511_AUDIO=y
 CONFIG_FB_SH_MOBILE_LCDC=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index e9fb573..61b8be1 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -101,7 +101,7 @@
 CONFIG_IR_SUNXI=y
 CONFIG_DRM=y
 CONFIG_DRM_SUN4I=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_FB_SIMPLE=y
 CONFIG_SOUND=y
 CONFIG_SND=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index fe4d4b5..76793533 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -59,7 +59,7 @@
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_ARM_VERSATILE=y
 CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
 CONFIG_DRM_PL111=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 0613bb7..ef73b67 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -6,7 +6,7 @@
 	default n
 	select DMA_SHARED_BUFFER
 	---help---
-	  The Sync File Framework adds explicit syncronization via
+	  The Sync File Framework adds explicit synchronization via
 	  userspace. It enables send/receive 'struct dma_fence' objects to/from
 	  userspace via Sync File fds for synchronization between drivers via
 	  userspace components. It has been ported from Android.
@@ -39,6 +39,16 @@
 	  A driver to let userspace turn memfd regions into dma-bufs.
 	  Qemu can use this to create host dmabufs for guest framebuffers.
 
+config DMABUF_MOVE_NOTIFY
+	bool "Move notify between drivers (EXPERIMENTAL)"
+	default n
+	help
+	  Don''t pin buffers if the dynamic DMA-buf interface is available on both the
+	  exporter as well as the importer. This fixes a security problem where
+	  userspace is able to pin unrestricted amounts of memory through DMA-buf.
+	  But marked experimental because we don''t jet have a consistent execution
+	  context and memory management between drivers.
+
 config DMABUF_SELFTESTS
 	tristate "Selftests for the dma-buf interfaces"
 	default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index c343c7c..ccc9eda 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -525,7 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 	}
 
 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
-		    exp_info->ops->dynamic_mapping))
+		    (exp_info->ops->pin || exp_info->ops->unpin)))
+		return ERR_PTR(-EINVAL);
+
+	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
 		return ERR_PTR(-EINVAL);
 
 	if (!try_module_get(exp_info->owner))
@@ -652,7 +655,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
  * calls attach() of dma_buf_ops to allow device-specific attach functionality
  * @dmabuf:		[in]	buffer to attach device to.
  * @dev:		[in]	device to be attached.
- * @dynamic_mapping:	[in]	calling convention for map/unmap
+ * @importer_ops	[in]	importer operations for the attachment
+ * @importer_priv	[in]	importer private pointer for the attachment
  *
  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
  * must be cleaned up by calling dma_buf_detach().
@@ -668,7 +672,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
  */
 struct dma_buf_attachment *
 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
-		       bool dynamic_mapping)
+		       const struct dma_buf_attach_ops *importer_ops,
+		       void *importer_priv)
 {
 	struct dma_buf_attachment *attach;
 	int ret;
@@ -676,13 +681,17 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 	if (WARN_ON(!dmabuf || !dev))
 		return ERR_PTR(-EINVAL);
 
+	if (WARN_ON(importer_ops && !importer_ops->move_notify))
+		return ERR_PTR(-EINVAL);
+
 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 	if (!attach)
 		return ERR_PTR(-ENOMEM);
 
 	attach->dev = dev;
 	attach->dmabuf = dmabuf;
-	attach->dynamic_mapping = dynamic_mapping;
+	attach->importer_ops = importer_ops;
+	attach->importer_priv = importer_priv;
 
 	if (dmabuf->ops->attach) {
 		ret = dmabuf->ops->attach(dmabuf, attach);
@@ -701,15 +710,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 	    dma_buf_is_dynamic(dmabuf)) {
 		struct sg_table *sgt;
 
-		if (dma_buf_is_dynamic(attach->dmabuf))
+		if (dma_buf_is_dynamic(attach->dmabuf)) {
 			dma_resv_lock(attach->dmabuf->resv, NULL);
+			ret = dma_buf_pin(attach);
+			if (ret)
+				goto err_unlock;
+		}
 
 		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
 		if (!sgt)
 			sgt = ERR_PTR(-ENOMEM);
 		if (IS_ERR(sgt)) {
 			ret = PTR_ERR(sgt);
-			goto err_unlock;
+			goto err_unpin;
 		}
 		if (dma_buf_is_dynamic(attach->dmabuf))
 			dma_resv_unlock(attach->dmabuf->resv);
@@ -723,6 +736,10 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 	kfree(attach);
 	return ERR_PTR(ret);
 
+err_unpin:
+	if (dma_buf_is_dynamic(attach->dmabuf))
+		dma_buf_unpin(attach);
+
 err_unlock:
 	if (dma_buf_is_dynamic(attach->dmabuf))
 		dma_resv_unlock(attach->dmabuf->resv);
@@ -743,7 +760,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 					  struct device *dev)
 {
-	return dma_buf_dynamic_attach(dmabuf, dev, false);
+	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(dma_buf_attach);
 
@@ -766,8 +783,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 
 		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
 
-		if (dma_buf_is_dynamic(attach->dmabuf))
+		if (dma_buf_is_dynamic(attach->dmabuf)) {
+			dma_buf_unpin(attach);
 			dma_resv_unlock(attach->dmabuf->resv);
+		}
 	}
 
 	dma_resv_lock(dmabuf->resv, NULL);
@@ -781,6 +800,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 EXPORT_SYMBOL_GPL(dma_buf_detach);
 
 /**
+ * dma_buf_pin - Lock down the DMA-buf
+ *
+ * @attach:	[in]	attachment which should be pinned
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int dma_buf_pin(struct dma_buf_attachment *attach)
+{
+	struct dma_buf *dmabuf = attach->dmabuf;
+	int ret = 0;
+
+	dma_resv_assert_held(dmabuf->resv);
+
+	if (dmabuf->ops->pin)
+		ret = dmabuf->ops->pin(attach);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_pin);
+
+/**
+ * dma_buf_unpin - Remove lock from DMA-buf
+ *
+ * @attach:	[in]	attachment which should be unpinned
+ */
+void dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+	struct dma_buf *dmabuf = attach->dmabuf;
+
+	dma_resv_assert_held(dmabuf->resv);
+
+	if (dmabuf->ops->unpin)
+		dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_unpin);
+
+/**
  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
  * dma_buf_ops.
@@ -799,6 +856,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 					enum dma_data_direction direction)
 {
 	struct sg_table *sg_table;
+	int r;
 
 	might_sleep();
 
@@ -820,13 +878,23 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 		return attach->sgt;
 	}
 
-	if (dma_buf_is_dynamic(attach->dmabuf))
+	if (dma_buf_is_dynamic(attach->dmabuf)) {
 		dma_resv_assert_held(attach->dmabuf->resv);
+		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+			r = dma_buf_pin(attach);
+			if (r)
+				return ERR_PTR(r);
+		}
+	}
 
 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 	if (!sg_table)
 		sg_table = ERR_PTR(-ENOMEM);
 
+	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+		dma_buf_unpin(attach);
+
 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
 		attach->sgt = sg_table;
 		attach->dir = direction;
@@ -865,10 +933,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 		dma_resv_assert_held(attach->dmabuf->resv);
 
 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+
+	if (dma_buf_is_dynamic(attach->dmabuf) &&
+	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+		dma_buf_unpin(attach);
 }
 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
 /**
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ *
+ * @dmabuf:	[in]	buffer which is moving
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_move_notify(struct dma_buf *dmabuf)
+{
+	struct dma_buf_attachment *attach;
+
+	dma_resv_assert_held(dmabuf->resv);
+
+	list_for_each_entry(attach, &dmabuf->attachments, node)
+		if (attach->importer_ops)
+			attach->importer_ops->move_notify(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+
+/**
  * DOC: cpu access
  *
  * There are mutliple reasons for supporting CPU access to a dma buffer object:
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d0aa6cf..4359497 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,9 +54,6 @@
 
 	  If in doubt, say "N".
 
-config DRM_EXPORT_FOR_TESTS
-	bool
-
 config DRM_DEBUG_SELFTEST
 	tristate "kselftests for DRM"
 	depends on DRM
@@ -389,6 +386,8 @@
 
 source "drivers/gpu/drm/mcde/Kconfig"
 
+source "drivers/gpu/drm/tidss/Kconfig"
+
 # Keep legacy drivers last
 
 menuconfig DRM_LEGACY
@@ -468,6 +467,9 @@
 
 endif # DRM_LEGACY
 
+config DRM_EXPORT_FOR_TESTS
+	bool
+
 # Separate option because drm_panel_orientation_quirks.c is shared with fbdev
 config DRM_PANEL_ORIENTATION_QUIRKS
 	tristate
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6493088..7f72ef5 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -39,7 +39,8 @@
 drm_ttm_helper-y := drm_gem_ttm_helper.o
 obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
 
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
+drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
+		drm_dsc.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
 		drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
 		drm_simple_kms_helper.o drm_modeset_helper.o \
@@ -122,3 +123,4 @@
 obj-$(CONFIG_DRM_PANFROST) += panfrost/
 obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
 obj-$(CONFIG_DRM_MCDE) += mcde/
+obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 13340f3..216d932 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: MIT
 menu "ACP (Audio CoProcessor) Configuration"
+	depends on DRM_AMDGPU
 
 config DRM_AMD_ACP
 	bool "Enable AMD Audio CoProcessor IP support"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index da3bcff..2992a49 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -579,6 +579,7 @@ struct amdgpu_asic_funcs {
 	/* invalidate hdp read cache */
 	void (*invalidate_hdp)(struct amdgpu_device *adev,
 			       struct amdgpu_ring *ring);
+	void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
 	/* check if the asic needs a full reset of if soft reset will work */
 	bool (*need_full_reset)(struct amdgpu_device *adev);
 	/* initialize doorbell layout for specific asic*/
@@ -969,6 +970,7 @@ struct amdgpu_device {
 	int				pstate;
 	/* enable runtime pm on the device */
 	bool                            runpm;
+	bool                            in_runpm;
 
 	bool                            pm_sysfs_en;
 	bool                            ucode_sysfs_en;
@@ -992,6 +994,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
 			uint32_t acc_flags);
 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 		    uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+		    uint32_t acc_flags);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
 
@@ -1174,9 +1178,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
 int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8609287..abfbe89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/dma-buf.h>
 #include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
 
 static const unsigned int compute_vmid_bitmap = 0xFF00;
 
@@ -126,7 +127,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 		/* this is going to have a few of the MSBs set that we need to
 		 * clear
 		 */
-		bitmap_complement(gpu_resources.queue_bitmap,
+		bitmap_complement(gpu_resources.cp_queue_bitmap,
 				  adev->gfx.mec.queue_bitmap,
 				  KGD_MAX_QUEUES);
 
@@ -137,7 +138,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 				* adev->gfx.mec.num_pipe_per_mec
 				* adev->gfx.mec.num_queue_per_pipe;
 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
-			clear_bit(i, gpu_resources.queue_bitmap);
+			clear_bit(i, gpu_resources.cp_queue_bitmap);
 
 		amdgpu_doorbell_get_kfd_info(adev,
 				&gpu_resources.doorbell_physical_address,
@@ -178,18 +179,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
 		kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
 }
 
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
 {
 	if (adev->kfd.dev)
-		kgd2kfd_suspend(adev->kfd.dev);
+		kgd2kfd_suspend(adev->kfd.dev, run_pm);
 }
 
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
 {
 	int r = 0;
 
 	if (adev->kfd.dev)
-		r = kgd2kfd_resume(adev->kfd.dev);
+		r = kgd2kfd_resume(adev->kfd.dev, run_pm);
 
 	return r;
 }
@@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 
 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 				void **mem_obj, uint64_t *gpu_addr,
-				void **cpu_ptr, bool mqd_gfx9)
+				void **cpu_ptr, bool cp_mqd_gfx9)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 	struct amdgpu_bo *bo = NULL;
@@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
 	bp.type = ttm_bo_type_kernel;
 	bp.resv = NULL;
 
-	if (mqd_gfx9)
-		bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+	if (cp_mqd_gfx9)
+		bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
 
 	r = amdgpu_bo_create(adev, &bp, &bo);
 	if (r) {
@@ -402,7 +403,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
 
 	if (amdgpu_sriov_vf(adev))
 		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
-	else if (adev->powerplay.pp_funcs) {
+	else if (adev->pm.dpm_enabled) {
 		if (amdgpu_emu_mode == 1)
 			mem_info->mem_clk_max = 0;
 		else
@@ -427,7 +428,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
 	/* the sclk is in quantas of 10kHz */
 	if (amdgpu_sriov_vf(adev))
 		return adev->clock.default_sclk / 100;
-	else if (adev->powerplay.pp_funcs)
+	else if (adev->pm.dpm_enabled)
 		return amdgpu_dpm_get_sclk(adev, false) / 100;
 	else
 		return 100;
@@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
 					   metadata_size, &metadata_flags);
 	if (flags) {
 		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
-			ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+				KFD_IOC_ALLOC_MEM_FLAGS_VRAM
+				: KFD_IOC_ALLOC_MEM_FLAGS_GTT;
 
 		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
-			*flags |= ALLOC_MEM_FLAGS_PUBLIC;
+			*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
 	}
 
 out_put:
@@ -525,6 +527,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
 
 	return adev->gmc.xgmi.hive_id;
 }
+
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+	return adev->unique_id;
+}
+
 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
 {
 	struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
@@ -647,13 +657,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-	uint32_t flush_type = 0;
+	const uint32_t flush_type = 0;
 	bool all_hub = false;
 
-	if (adev->gmc.xgmi.num_physical_nodes &&
-		adev->asic_type == CHIP_VEGA20)
-		flush_type = 2;
-
 	if (adev->family == AMDGPU_FAMILY_AI)
 		all_hub = true;
 
@@ -677,6 +683,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 {
 }
 
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+	return 0;
+}
+
 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
 					struct amdgpu_vm *vm)
 {
@@ -713,11 +724,11 @@ void kgd2kfd_exit(void)
 {
 }
 
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 }
 
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 47b0f29..13feb31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
 						       struct mm_struct *mm);
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
 
 struct amdkfd_process_info {
 	/* List head of all VMs that belong to a KFD process */
@@ -122,8 +123,8 @@ struct amdkfd_process_info {
 int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
 			const void *ih_ring_entry);
 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
 				  uint32_t *flags);
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
@@ -240,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
 
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+				struct tile_config *config);
+
 /* KGD2KFD callbacks */
 int kgd2kfd_init(void);
 void kgd2kfd_exit(void);
@@ -249,8 +254,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 			 struct drm_device *ddev,
 			 const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd);
-int kgd2kfd_resume(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 4bcc175..6529cac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
 		dev_warn(adev->dev,
 			 "Invalid sdma engine id (%d), using engine id 0\n",
 			 engine_id);
-		/* fall through */
+		fallthrough;
 	case 0:
 		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
 				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
@@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
 	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
 	.get_atc_vmid_pasid_mapping_info =
 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
-	.get_tile_config = kgd_gfx_v9_get_tile_config,
 	.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index a7b17c8..4ec6d0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -42,38 +42,6 @@ enum hqd_dequeue_request_type {
 	SAVE_WAVES
 };
 
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
-		struct tile_config *config)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
-	config->gb_addr_config = adev->gfx.config.gb_addr_config;
-#if 0
-/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
- * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
- * changes commented out related code, doing the same here for now but
- * need to sync with Ken et al
- */
-	config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
-				MC_ARB_RAMCFG, NOOFBANK);
-	config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
-				MC_ARB_RAMCFG, NOOFRANKS);
-#endif
-
-	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
-	config->num_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
-	config->macro_tile_config_ptr =
-			adev->gfx.config.macrotile_mode_array;
-	config->num_macro_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
-	return 0;
-}
-
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 {
 	return (struct amdgpu_device *)kgd;
@@ -805,7 +773,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
 	.address_watch_get_offset = kgd_address_watch_get_offset,
 	.get_atc_vmid_pasid_mapping_info =
 			get_atc_vmid_pasid_mapping_info,
-	.get_tile_config = amdgpu_amdkfd_get_tile_config,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
+	.get_unique_id = amdgpu_amdkfd_get_unique_id,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 8f052e9..0b7e787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS {
 	float f32All;
 };
 
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
-		struct tile_config *config)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
-	config->gb_addr_config = adev->gfx.config.gb_addr_config;
-	config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
-				MC_ARB_RAMCFG, NOOFBANK);
-	config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
-				MC_ARB_RAMCFG, NOOFRANKS);
-
-	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
-	config->num_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
-	config->macro_tile_config_ptr =
-			adev->gfx.config.macrotile_mode_array;
-	config->num_macro_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
-	return 0;
-}
-
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 {
 	return (struct amdgpu_device *)kgd;
@@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
 	.address_watch_get_offset = kgd_address_watch_get_offset,
 	.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
 	.set_scratch_backing_va = set_scratch_backing_va,
-	.get_tile_config = get_tile_config,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
 	.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19a10db..ccd635b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -41,31 +41,6 @@ enum hqd_dequeue_request_type {
 	RESET_WAVES
 };
 
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
-		struct tile_config *config)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
-	config->gb_addr_config = adev->gfx.config.gb_addr_config;
-	config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
-				MC_ARB_RAMCFG, NOOFBANK);
-	config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
-				MC_ARB_RAMCFG, NOOFRANKS);
-
-	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
-	config->num_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
-	config->macro_tile_config_ptr =
-			adev->gfx.config.macrotile_mode_array;
-	config->num_macro_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
-	return 0;
-}
-
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 {
 	return (struct amdgpu_device *)kgd;
@@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
 	.get_atc_vmid_pasid_mapping_info =
 			get_atc_vmid_pasid_mapping_info,
 	.set_scratch_backing_va = set_scratch_backing_va,
-	.get_tile_config = get_tile_config,
 	.set_vm_context_page_table_base = set_vm_context_page_table_base,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 8562afe..df841c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -48,28 +48,6 @@ enum hqd_dequeue_request_type {
 	RESET_WAVES
 };
 
-
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
-		struct tile_config *config)
-{
-	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
-	config->gb_addr_config = adev->gfx.config.gb_addr_config;
-
-	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
-	config->num_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
-	config->macro_tile_config_ptr =
-			adev->gfx.config.macrotile_mode_array;
-	config->num_macro_tile_configs =
-			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
-	return 0;
-}
-
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
 {
 	return (struct amdgpu_device *)kgd;
@@ -736,7 +714,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
 	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
 	.get_atc_vmid_pasid_mapping_info =
 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
-	.get_tile_config = kgd_gfx_v9_get_tile_config,
 	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
+	.get_unique_id = amdgpu_amdkfd_get_unique_id,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 63d3e66..aedf67d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
 
 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
 					uint8_t vmid, uint16_t *p_pasid);
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
-		struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa8ac9d..9dff792 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -29,6 +29,7 @@
 #include "amdgpu_vm.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_dma_buf.h"
+#include <uapi/linux/kfd_ioctl.h>
 
 /* BO flag to indicate a KFD userptr BO */
 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -276,6 +277,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 	return 0;
 }
 
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+	struct amdgpu_bo *root = bo;
+	struct amdgpu_vm_bo_base *vm_bo;
+	struct amdgpu_vm *vm;
+	struct amdkfd_process_info *info;
+	struct amdgpu_amdkfd_fence *ef;
+	int ret;
+
+	/* we can always get vm_bo from root PD bo.*/
+	while (root->parent)
+		root = root->parent;
+
+	vm_bo = root->vm_bo;
+	if (!vm_bo)
+		return 0;
+
+	vm = vm_bo->vm;
+	if (!vm)
+		return 0;
+
+	info = vm->process_info;
+	if (!info || !info->eviction_fence)
+		return 0;
+
+	ef = container_of(dma_fence_get(&info->eviction_fence->base),
+			struct amdgpu_amdkfd_fence, base);
+
+	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
+	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
+	dma_resv_unlock(bo->tbo.base.resv);
+
+	dma_fence_put(&ef->base);
+	return ret;
+}
+
 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
 				     bool wait)
 {
@@ -364,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
 {
 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
-	bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
 	uint32_t mapping_flags;
 
 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
-	if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
-	if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
 
 	switch (adev->asic_type) {
 	case CHIP_ARCTURUS:
-		if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
 			if (bo_adev == adev)
 				mapping_flags |= coherent ?
 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -847,9 +884,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
 			    vm_list_node) {
 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
 
-		ret = amdgpu_sync_resv(NULL,
-					sync, pd->tbo.base.resv,
-					AMDGPU_FENCE_OWNER_KFD, false);
+		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+				       AMDGPU_SYNC_NE_OWNER,
+				       AMDGPU_FENCE_OWNER_KFD);
 		if (ret)
 			return ret;
 	}
@@ -1044,6 +1081,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
 	list_del(&vm->vm_list_node);
 	mutex_unlock(&process_info->lock);
 
+	vm->process_info = NULL;
+
 	/* Release per-process resources when last compute VM is destroyed */
 	if (!process_info->n_vms) {
 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
@@ -1122,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	/*
 	 * Check on which domain to allocate BO
 	 */
-	if (flags & ALLOC_MEM_FLAGS_VRAM) {
+	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
-		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
-	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
+	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
 		alloc_flags = 0;
-	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
 		domain = AMDGPU_GEM_DOMAIN_GTT;
 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
 		alloc_flags = 0;
 		if (!offset || !*offset)
 			return -EINVAL;
 		user_addr = untagged_addr(*offset);
-	} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
-			ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+	} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+			KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
 		domain = AMDGPU_GEM_DOMAIN_GTT;
 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
 		bo_type = ttm_bo_type_sg;
@@ -1160,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 	}
 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
 	mutex_init(&(*mem)->lock);
-	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
 
 	/* Workaround for AQL queue wraparound bug. Map the same
 	 * memory twice. That means we only actually allocate half
@@ -1642,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
 
 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
 	mutex_init(&(*mem)->lock);
+	
 	(*mem)->alloc_flags =
 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
-		 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
-		ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
+		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
+		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
+		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 
 	(*mem)->bo = amdgpu_bo_ref(bo);
 	(*mem)->va = va;
@@ -2204,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
 	kfree(mem);
 	return 0;
 }
+
+/* Returns GPU-specific tiling mode information */
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+				struct tile_config *config)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+	config->gb_addr_config = adev->gfx.config.gb_addr_config;
+	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+	config->num_tile_configs =
+			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+	config->macro_tile_config_ptr =
+			adev->gfx.config.macrotile_mode_array;
+	config->num_macro_tile_configs =
+			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+	/* Those values are not set from GFX9 onwards */
+	config->num_banks = adev->gfx.config.num_banks;
+	config->num_ranks = adev->gfx.config.num_ranks;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a62cbc8..f355d9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
 	return MODE_OK;
 }
 
+static int
+amdgpu_connector_late_register(struct drm_connector *connector)
+{
+	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+	int r = 0;
+
+	if (amdgpu_connector->ddc_bus->has_aux) {
+		amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+		r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+	}
+
+	return r;
+}
+
 static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
 	.get_modes = amdgpu_connector_dp_get_modes,
 	.mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
 	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.force = amdgpu_connector_dvi_force,
+	.late_register = amdgpu_connector_late_register,
 };
 
 static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
 	.early_unregister = amdgpu_connector_unregister,
 	.destroy = amdgpu_connector_destroy,
 	.force = amdgpu_connector_dvi_force,
+	.late_register = amdgpu_connector_late_register,
 };
 
 void
@@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
 		connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	connector->display_info.subpixel_order = subpixel_order;
-	drm_connector_register(connector);
 
 	if (has_aux)
 		amdgpu_atombios_dp_aux_init(amdgpu_connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a52a084..af91627b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -28,6 +28,7 @@
 #include <linux/file.h>
 #include <linux/pagemap.h>
 #include <linux/sync_file.h>
+#include <linux/dma-buf.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_syncobj.h>
@@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 	/* Don't move this buffer if we have depleted our allowance
 	 * to move it. Don't move anything if the threshold is zero.
 	 */
-	if (p->bytes_moved < p->bytes_moved_threshold) {
+	if (p->bytes_moved < p->bytes_moved_threshold &&
+	    (!bo->tbo.base.dma_buf ||
+	    list_empty(&bo->tbo.base.dma_buf->attachments))) {
 		if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 		    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
 			/* And don't move a CPU_ACCESS_REQUIRED BO to limited
@@ -651,16 +654,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 {
+	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 	struct amdgpu_bo_list_entry *e;
 	int r;
 
 	list_for_each_entry(e, &p->validated, tv.head) {
 		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 		struct dma_resv *resv = bo->tbo.base.resv;
+		enum amdgpu_sync_mode sync_mode;
 
-		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
-				     amdgpu_bo_explicit_sync(bo));
-
+		sync_mode = amdgpu_bo_explicit_sync(bo) ?
+			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+		r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
+				     &fpriv->vm);
 		if (r)
 			return r;
 	}
@@ -1202,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
 	struct drm_sched_entity *entity = p->entity;
 	enum drm_sched_priority priority;
-	struct amdgpu_ring *ring;
 	struct amdgpu_bo_list_entry *e;
 	struct amdgpu_job *job;
 	uint64_t seq;
@@ -1211,7 +1216,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	job = p->job;
 	p->job = NULL;
 
-	r = drm_sched_job_init(&job->base, entity, p->filp);
+	r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
 	if (r)
 		goto error_unlock;
 
@@ -1255,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	priority = job->base.s_priority;
 	drm_sched_entity_push_job(&job->base, entity);
 
-	ring = to_amdgpu_ring(entity->rq->sched);
-	amdgpu_ring_priority_get(ring, priority);
-
 	amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
 
 	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42..6ed36a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
 	return -EACCES;
 }
 
+static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+{
+	switch (prio) {
+	case DRM_SCHED_PRIORITY_HIGH_HW:
+	case DRM_SCHED_PRIORITY_KERNEL:
+		return AMDGPU_GFX_PIPE_PRIO_HIGH;
+	default:
+		return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+	}
+}
+
 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
 {
 	struct amdgpu_device *adev = ctx->adev;
 	struct amdgpu_ctx_entity *entity;
 	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
 	unsigned num_scheds = 0;
+	enum gfx_pipe_priority hw_prio;
 	enum drm_sched_priority priority;
 	int r;
 
@@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
 	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
 				ctx->init_priority : ctx->override_priority;
 	switch (hw_ip) {
-		case AMDGPU_HW_IP_GFX:
-			sched = &adev->gfx.gfx_ring[0].sched;
-			scheds = &sched;
-			num_scheds = 1;
-			break;
-		case AMDGPU_HW_IP_COMPUTE:
-			scheds = adev->gfx.compute_sched;
-			num_scheds = adev->gfx.num_compute_sched;
-			break;
-		case AMDGPU_HW_IP_DMA:
-			scheds = adev->sdma.sdma_sched;
-			num_scheds = adev->sdma.num_sdma_sched;
-			break;
-		case AMDGPU_HW_IP_UVD:
-			sched = &adev->uvd.inst[0].ring.sched;
-			scheds = &sched;
-			num_scheds = 1;
-			break;
-		case AMDGPU_HW_IP_VCE:
-			sched = &adev->vce.ring[0].sched;
-			scheds = &sched;
-			num_scheds = 1;
-			break;
-		case AMDGPU_HW_IP_UVD_ENC:
-			sched = &adev->uvd.inst[0].ring_enc[0].sched;
-			scheds = &sched;
-			num_scheds = 1;
-			break;
-		case AMDGPU_HW_IP_VCN_DEC:
-			scheds = adev->vcn.vcn_dec_sched;
-			num_scheds =  adev->vcn.num_vcn_dec_sched;
-			break;
-		case AMDGPU_HW_IP_VCN_ENC:
-			scheds = adev->vcn.vcn_enc_sched;
-			num_scheds =  adev->vcn.num_vcn_enc_sched;
-			break;
-		case AMDGPU_HW_IP_VCN_JPEG:
-			scheds = adev->jpeg.jpeg_sched;
-			num_scheds =  adev->jpeg.num_jpeg_sched;
-			break;
+	case AMDGPU_HW_IP_GFX:
+		sched = &adev->gfx.gfx_ring[0].sched;
+		scheds = &sched;
+		num_scheds = 1;
+		break;
+	case AMDGPU_HW_IP_COMPUTE:
+		hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+		scheds = adev->gfx.compute_prio_sched[hw_prio];
+		num_scheds = adev->gfx.num_compute_sched[hw_prio];
+		break;
+	case AMDGPU_HW_IP_DMA:
+		scheds = adev->sdma.sdma_sched;
+		num_scheds = adev->sdma.num_sdma_sched;
+		break;
+	case AMDGPU_HW_IP_UVD:
+		sched = &adev->uvd.inst[0].ring.sched;
+		scheds = &sched;
+		num_scheds = 1;
+		break;
+	case AMDGPU_HW_IP_VCE:
+		sched = &adev->vce.ring[0].sched;
+		scheds = &sched;
+		num_scheds = 1;
+		break;
+	case AMDGPU_HW_IP_UVD_ENC:
+		sched = &adev->uvd.inst[0].ring_enc[0].sched;
+		scheds = &sched;
+		num_scheds = 1;
+		break;
+	case AMDGPU_HW_IP_VCN_DEC:
+		sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
+					    adev->vcn.num_vcn_dec_sched);
+		scheds = &sched;
+		num_scheds = 1;
+		break;
+	case AMDGPU_HW_IP_VCN_ENC:
+		sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
+					    adev->vcn.num_vcn_enc_sched);
+		scheds = &sched;
+		num_scheds = 1;
+		break;
+	case AMDGPU_HW_IP_VCN_JPEG:
+		scheds = adev->jpeg.jpeg_sched;
+		num_scheds =  adev->jpeg.num_jpeg_sched;
+		break;
 	}
 
 	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 	return fence;
 }
 
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+					    struct amdgpu_ctx_entity *aentity,
+					    int hw_ip,
+					    enum drm_sched_priority priority)
+{
+	struct amdgpu_device *adev = ctx->adev;
+	enum gfx_pipe_priority hw_prio;
+	struct drm_gpu_scheduler **scheds = NULL;
+	unsigned num_scheds;
+
+	/* set sw priority */
+	drm_sched_entity_set_priority(&aentity->entity, priority);
+
+	/* set hw priority */
+	if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+		hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+		scheds = adev->gfx.compute_prio_sched[hw_prio];
+		num_scheds = adev->gfx.num_compute_sched[hw_prio];
+		drm_sched_entity_modify_sched(&aentity->entity, scheds,
+					      num_scheds);
+	}
+}
+
 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
 				  enum drm_sched_priority priority)
 {
@@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
 			ctx->init_priority : ctx->override_priority;
 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
-			struct drm_sched_entity *entity;
-
 			if (!ctx->entities[i][j])
 				continue;
 
-			entity = &ctx->entities[i][j]->entity;
-			drm_sched_entity_set_priority(entity, ctx_prio);
+			amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+						       i, ctx_prio);
 		}
 	}
 }
@@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 	mutex_destroy(&mgr->lock);
 }
 
+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+	int num_compute_sched_normal = 0;
+	int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+	int i;
+
+	/* use one drm sched array, gfx.compute_sched to store both high and
+	 * normal priority drm compute schedulers */
+	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+		if (!adev->gfx.compute_ring[i].has_high_prio)
+			adev->gfx.compute_sched[num_compute_sched_normal++] =
+				&adev->gfx.compute_ring[i].sched;
+		else
+			adev->gfx.compute_sched[num_compute_sched_high--] =
+				&adev->gfx.compute_ring[i].sched;
+	}
+
+	/* compute ring only has two priority for now */
+	i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+	adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+	if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+		/* When compute has no high priority rings then use */
+		/* normal priority sched array */
+		adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+		adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+	} else {
+		adev->gfx.compute_prio_sched[i] =
+			&adev->gfx.compute_sched[num_compute_sched_high - 1];
+		adev->gfx.num_compute_sched[i] =
+			adev->gfx.num_compute_rings - num_compute_sched_normal;
+	}
+}
+
 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
 {
 	int i, j;
 
+	amdgpu_ctx_init_compute_sched(adev);
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
 		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
 		adev->gfx.num_gfx_sched++;
 	}
 
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
-		adev->gfx.num_compute_sched++;
-	}
-
 	for (i = 0; i < adev->sdma.num_instances; i++) {
 		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
 		adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 337d7cd..c0f9a65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -31,6 +31,9 @@
 #include <drm/drm_debugfs.h>
 
 #include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_ras.h"
 
 /**
  * amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -176,7 +179,7 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
 		} else {
 			r = get_user(value, (uint32_t *)buf);
 			if (!r)
-				WREG32(*pos >> 2, value);
+				amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
 		}
 		if (r) {
 			result = r;
@@ -840,6 +843,55 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
 	return result;
 }
 
+/**
+ * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos:  Offset to seek to
+ *
+ * Write a 32-bit zero to disable or a 32-bit non-zero to enable
+ */
+static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
+					 size_t size, loff_t *pos)
+{
+	struct amdgpu_device *adev = file_inode(f)->i_private;
+	ssize_t result = 0;
+	int r;
+
+	if (size & 0x3 || *pos & 0x3)
+		return -EINVAL;
+
+	r = pm_runtime_get_sync(adev->ddev->dev);
+	if (r < 0)
+		return r;
+
+	while (size) {
+		uint32_t value;
+
+		r = get_user(value, (uint32_t *)buf);
+		if (r) {
+			pm_runtime_mark_last_busy(adev->ddev->dev);
+			pm_runtime_put_autosuspend(adev->ddev->dev);
+			return r;
+		}
+
+		amdgpu_gfx_off_ctrl(adev, value ? true : false);
+
+		result += 4;
+		buf += 4;
+		*pos += 4;
+		size -= 4;
+	}
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	return result;
+}
+
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
 	.owner = THIS_MODULE,
 	.read = amdgpu_debugfs_regs_read,
@@ -888,6 +940,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
 	.llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
+	.owner = THIS_MODULE,
+	.write = amdgpu_debugfs_gfxoff_write,
+};
+
 static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_regs_fops,
 	&amdgpu_debugfs_regs_didt_fops,
@@ -897,6 +954,7 @@ static const struct file_operations *debugfs_regs[] = {
 	&amdgpu_debugfs_sensors_fops,
 	&amdgpu_debugfs_wave_fops,
 	&amdgpu_debugfs_gpr_fops,
+	&amdgpu_debugfs_gfxoff_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -908,6 +966,7 @@ static const char *debugfs_regs_names[] = {
 	"amdgpu_sensors",
 	"amdgpu_wave",
 	"amdgpu_gpr",
+	"amdgpu_gfxoff",
 };
 
 /**
@@ -934,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
 	return 0;
 }
 
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
-	unsigned i;
-
-	for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
-		if (adev->debugfs_regs[i]) {
-			debugfs_remove(adev->debugfs_regs[i]);
-			adev->debugfs_regs[i] = NULL;
-		}
-	}
-}
-
 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1211,11 +1258,47 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 	return 0;
 }
 
+static int amdgpu_debugfs_sclk_set(void *data, u64 val)
+{
+	int ret = 0;
+	uint32_t max_freq, min_freq;
+	struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+		return -EINVAL;
+
+	ret = pm_runtime_get_sync(adev->ddev->dev);
+	if (ret < 0)
+		return ret;
+
+	if (is_support_sw_smu(adev)) {
+		ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
+		if (ret || val > max_freq || val < min_freq)
+			return -EINVAL;
+		ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+	} else {
+		return 0;
+	}
+
+	pm_runtime_mark_last_busy(adev->ddev->dev);
+	pm_runtime_put_autosuspend(adev->ddev->dev);
+
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
+
 DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
 			amdgpu_debugfs_ib_preempt, "%llu\n");
 
+DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+			amdgpu_debugfs_sclk_set, "%llu\n");
+
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
+	int r, i;
+
 	adev->debugfs_preempt =
 		debugfs_create_file("amdgpu_preempt_ib", 0600,
 				    adev->ddev->primary->debugfs_root, adev,
@@ -1225,24 +1308,78 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
 		return -EIO;
 	}
 
+	adev->smu.debugfs_sclk =
+		debugfs_create_file("amdgpu_force_sclk", 0200,
+				    adev->ddev->primary->debugfs_root, adev,
+				    &fops_sclk_set);
+	if (!(adev->smu.debugfs_sclk)) {
+		DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+		return -EIO;
+	}
+
+	/* Register debugfs entries for amdgpu_ttm */
+	r = amdgpu_ttm_debugfs_init(adev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+
+	r = amdgpu_debugfs_pm_init(adev);
+	if (r) {
+		DRM_ERROR("Failed to register debugfs file for dpm!\n");
+		return r;
+	}
+
+	if (amdgpu_debugfs_sa_init(adev)) {
+		dev_err(adev->dev, "failed to register debugfs file for SA\n");
+	}
+
+	if (amdgpu_debugfs_fence_init(adev))
+		dev_err(adev->dev, "fence debugfs file creation failed\n");
+
+	r = amdgpu_debugfs_gem_init(adev);
+	if (r)
+		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+
+	r = amdgpu_debugfs_regs_init(adev);
+	if (r)
+		DRM_ERROR("registering register debugfs failed (%d).\n", r);
+
+	r = amdgpu_debugfs_firmware_init(adev);
+	if (r)
+		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+
+#if defined(CONFIG_DRM_AMD_DC)
+	if (amdgpu_device_has_dc_support(adev)) {
+		if (dtn_debugfs_init(adev))
+			DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
+	}
+#endif
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		struct amdgpu_ring *ring = adev->rings[i];
+
+		if (!ring)
+			continue;
+
+		if (amdgpu_debugfs_ring_init(adev, ring)) {
+			DRM_ERROR("Failed to register debugfs file for rings !\n");
+		}
+	}
+
+	amdgpu_ras_debugfs_create_all(adev);
+
 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
 					ARRAY_SIZE(amdgpu_debugfs_list));
 }
 
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
-{
-	debugfs_remove(adev->debugfs_preempt);
-}
-
 #else
 int amdgpu_debugfs_init(struct amdgpu_device *adev)
 {
 	return 0;
 }
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
 {
 	return 0;
 }
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index f289d28..de12d11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,9 +32,8 @@ struct amdgpu_debugfs {
 };
 
 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
 int amdgpu_debugfs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev);
+void amdgpu_debugfs_fini(struct amdgpu_device *adev);
 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
 			     const struct drm_info_list *files,
 			     unsigned nfiles);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b897585..faa3e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 			       uint32_t *buf, size_t size, bool write)
 {
-	uint64_t last;
 	unsigned long flags;
+	uint32_t hi = ~0;
+	uint64_t last;
 
-	last = size - 4;
-	for (last += pos; pos <= last; pos += 4) {
-		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+
+#ifdef CONFIG_64BIT
+	last = min(pos + size, adev->gmc.visible_vram_size);
+	if (last > pos) {
+		void __iomem *addr = adev->mman.aper_base_kaddr + pos;
+		size_t count = last - pos;
+
+		if (write) {
+			memcpy_toio(addr, buf, count);
+			mb();
+			amdgpu_asic_flush_hdp(adev, NULL);
+		} else {
+			amdgpu_asic_invalidate_hdp(adev, NULL);
+			mb();
+			memcpy_fromio(buf, addr, count);
+		}
+
+		if (count == size)
+			return;
+
+		pos += count;
+		buf += count / 4;
+		size -= count;
+	}
+#endif
+
+	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+	for (last = pos + size; pos < last; pos += 4) {
+		uint32_t tmp = pos >> 31;
+
 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
-		WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+		if (tmp != hi) {
+			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+			hi = tmp;
+		}
 		if (write)
 			WREG32_NO_KIQ(mmMM_DATA, *buf++);
 		else
 			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
-		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 	}
+	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
 }
 
 /*
@@ -275,28 +306,10 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 		BUG();
 }
 
-/**
- * amdgpu_mm_wreg - write to a memory mapped IO register
- *
- * @adev: amdgpu_device pointer
- * @reg: dword aligned register offset
- * @v: 32 bit value to write to the register
- * @acc_flags: access flags which require special behavior
- *
- * Writes the value specified to the offset specified.
- */
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-		    uint32_t acc_flags)
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
 {
 	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
 
-	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
-		adev->last_mm_index = v;
-	}
-
-	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
-		return amdgpu_kiq_wreg(adev, reg, v);
-
 	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
 	else {
@@ -314,6 +327,48 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
 }
 
 /**
+ * amdgpu_mm_wreg - write to a memory mapped IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ * @acc_flags: access flags which require special behavior
+ *
+ * Writes the value specified to the offset specified.
+ */
+void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+		    uint32_t acc_flags)
+{
+	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+		adev->last_mm_index = v;
+	}
+
+	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+		return amdgpu_kiq_wreg(adev, reg, v);
+
+	amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+}
+
+/*
+ * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
+ *
+ * this function is invoked only the debugfs register access
+ * */
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+		    uint32_t acc_flags)
+{
+	if (amdgpu_sriov_fullaccess(adev) &&
+		adev->gfx.rlc.funcs &&
+		adev->gfx.rlc.funcs->is_rlcg_access_range) {
+
+		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
+			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+	}
+
+	amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+}
+
+/**
  * amdgpu_io_rreg - read an IO register
  *
  * @adev: amdgpu_device pointer
@@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
 	* locking inversion with the driver load path. And the access here is
 	* completely racy anyway. So don't bother with locking for now.
 	*/
-	return dev->open_count == 0;
+	return atomic_read(&dev->open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
@@ -2344,15 +2399,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
 		}
 		adev->ip_blocks[i].status.hw = false;
 		/* handle putting the SMC in the appropriate state */
-		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
-			r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
-			if (r) {
-				DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
-					  adev->mp1_state, r);
-				return r;
+		if(!amdgpu_sriov_vf(adev)){
+			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+				if (r) {
+					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+							adev->mp1_state, r);
+					return r;
+				}
 			}
 		}
-
 		adev->ip_blocks[i].status.hw = false;
 	}
 
@@ -2686,6 +2742,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
 
 		if (adev->asic_reset_res)
 			goto fail;
+
+		if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+			adev->mmhub.funcs->reset_ras_error_count(adev);
 	} else {
 
 		task_barrier_full(&hive->tb);
@@ -2800,7 +2859,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
 	if (amdgpu_emu_mode == 1)
-		adev->usec_timeout *= 2;
+		adev->usec_timeout *= 10;
 	adev->gmc.gart_size = 512 * 1024 * 1024;
 	adev->accel_working = false;
 	adev->num_rings = 0;
@@ -3088,22 +3147,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	} else
 		adev->ucode_sysfs_en = true;
 
-	r = amdgpu_debugfs_gem_init(adev);
-	if (r)
-		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
-
-	r = amdgpu_debugfs_regs_init(adev);
-	if (r)
-		DRM_ERROR("registering register debugfs failed (%d).\n", r);
-
-	r = amdgpu_debugfs_firmware_init(adev);
-	if (r)
-		DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
-
-	r = amdgpu_debugfs_init(adev);
-	if (r)
-		DRM_ERROR("Creating debugfs files failed (%d).\n", r);
-
 	if ((amdgpu_testing & 1)) {
 		if (adev->accel_working)
 			amdgpu_test_moves(adev);
@@ -3177,6 +3220,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	flush_delayed_work(&adev->delayed_init_work);
 	adev->shutdown = true;
 
+	/* make sure IB test finished before entering exclusive mode
+	 * to avoid preemption on IB test
+	 * */
+	if (amdgpu_sriov_vf(adev))
+		amdgpu_virt_request_full_gpu(adev, false);
+
 	/* disable all interrupts */
 	amdgpu_irq_disable_all(adev);
 	if (adev->mode_info.mode_config_initialized){
@@ -3219,13 +3268,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 	adev->rmmio = NULL;
 	amdgpu_device_doorbell_fini(adev);
 
-	amdgpu_debugfs_regs_cleanup(adev);
 	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
 	if (adev->ucode_sysfs_en)
 		amdgpu_ucode_sysfs_fini(adev);
 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
 		amdgpu_pmu_fini(adev);
-	amdgpu_debugfs_preempt_cleanup(adev);
 	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
 		amdgpu_discovery_fini(adev);
 }
@@ -3309,7 +3356,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 		}
 	}
 
-	amdgpu_amdkfd_suspend(adev);
+	amdgpu_amdkfd_suspend(adev, !fbcon);
 
 	amdgpu_ras_suspend(adev);
 
@@ -3393,7 +3440,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 			}
 		}
 	}
-	r = amdgpu_amdkfd_resume(adev);
+	r = amdgpu_amdkfd_resume(adev, !fbcon);
 	if (r)
 		return r;
 
@@ -3866,8 +3913,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
 		}
 	}
 
-	if (!r && amdgpu_ras_intr_triggered())
+	if (!r && amdgpu_ras_intr_triggered()) {
+		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+			if (tmp_adev->mmhub.funcs &&
+			    tmp_adev->mmhub.funcs->reset_ras_error_count)
+				tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+		}
+
 		amdgpu_ras_intr_cleared();
+	}
 
 	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 		if (need_full_reset) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index f950927..27d8ae1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
 
 			for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
 				if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
-					DRM_INFO("set register base offset for %s\n",
+					DRM_DEBUG("set register base offset for %s\n",
 							hw_id_names[le16_to_cpu(ip->hw_id)]);
 					adev->reg_offset[hw_ip][ip->number_instance] =
 						ip->base_address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6d520a3..84cee27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -99,7 +99,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
 	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
 	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 	    (int)(work->target_vblank -
-		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+		  amdgpu_get_vblank_counter_kms(crtc)) > 0) {
 		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
 		return;
 	}
@@ -219,7 +219,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
 	if (!adev->enable_virtual_display)
 		work->base = amdgpu_bo_gpu_offset(new_abo);
 	work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
-		amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+		amdgpu_get_vblank_counter_kms(crtc);
 
 	/* we borrow the event spin lock for protecting flip_wrok */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -924,3 +924,15 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
 		return AMDGPU_CRTC_IRQ_NONE;
 	}
 }
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+			bool in_vblank_irq, int *vpos,
+			int *hpos, ktime_t *stime, ktime_t *etime,
+			const struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
+
+	return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+						  stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index a59cd47..ffeb20f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
 }
 
 /**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+	/* pin buffer into GTT */
+	return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+	amdgpu_bo_unpin(bo);
+}
+
+/**
  * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
  * @attach: DMA-buf attachment
  * @dir: DMA direction
@@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 	struct sg_table *sgt;
 	long r;
 
-	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
-	if (r)
-		return ERR_PTR(r);
+	if (!bo->pin_count) {
+		/* move buffer into GTT */
+		struct ttm_operation_ctx ctx = { false, false };
+
+		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+		if (r)
+			return ERR_PTR(r);
+
+	} else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+		     AMDGPU_GEM_DOMAIN_GTT)) {
+		return ERR_PTR(-EBUSY);
+	}
 
 	sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
 	if (IS_ERR(sgt))
@@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
 				 struct sg_table *sgt,
 				 enum dma_data_direction dir)
 {
-	struct drm_gem_object *obj = attach->dmabuf->priv;
-	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
 	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 	sg_free_table(sgt);
 	kfree(sgt);
-	amdgpu_bo_unpin(bo);
 }
 
 /**
@@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 }
 
 const struct dma_buf_ops amdgpu_dmabuf_ops = {
-	.dynamic_mapping = true,
 	.attach = amdgpu_dma_buf_attach,
 	.detach = amdgpu_dma_buf_detach,
+	.pin = amdgpu_dma_buf_pin,
+	.unpin = amdgpu_dma_buf_unpin,
 	.map_dma_buf = amdgpu_dma_buf_map,
 	.unmap_dma_buf = amdgpu_dma_buf_unmap,
 	.release = drm_gem_dmabuf_release,
@@ -413,6 +451,73 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
 }
 
 /**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = attach->importer_priv;
+	struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	struct ttm_operation_ctx ctx = { false, false };
+	struct ttm_placement placement = {};
+	struct amdgpu_vm_bo_base *bo_base;
+	int r;
+
+	if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+		return;
+
+	r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+	if (r) {
+		DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+		return;
+	}
+
+	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+		struct amdgpu_vm *vm = bo_base->vm;
+		struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+
+		if (ticket) {
+			/* When we get an error here it means that somebody
+			 * else is holding the VM lock and updating page tables
+			 * So we can just continue here.
+			 */
+			r = dma_resv_lock(resv, ticket);
+			if (r)
+				continue;
+
+		} else {
+			/* TODO: This is more problematic and we actually need
+			 * to allow page tables updates without holding the
+			 * lock.
+			 */
+			if (!dma_resv_trylock(resv))
+				continue;
+		}
+
+		r = amdgpu_vm_clear_freed(adev, vm, NULL);
+		if (!r)
+			r = amdgpu_vm_handle_moved(adev, vm);
+
+		if (r && r != -EBUSY)
+			DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+				  r);
+
+		dma_resv_unlock(resv);
+	}
+}
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+	.move_notify = amdgpu_dma_buf_move_notify
+};
+
+/**
  * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
  * @dev: DRM device
  * @dma_buf: Shared DMA buffer
@@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
 	if (IS_ERR(obj))
 		return obj;
 
-	attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+	attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
+					&amdgpu_dma_buf_attach_ops, obj);
 	if (IS_ERR(attach)) {
 		drm_gem_object_put(obj);
 		return ERR_CAST(attach);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index a2e8c3d..ba1bb95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
 
 	return ret;
 }
+
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+			     uint32_t cstate)
+{
+	int ret = 0;
+	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+	void *pp_handle = adev->powerplay.pp_handle;
+	struct smu_context *smu = &adev->smu;
+
+	if (is_support_sw_smu(adev))
+		ret = smu_set_df_cstate(smu, cstate);
+	else if (pp_funcs &&
+		 pp_funcs->set_df_cstate)
+		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 902ca6c..936d85a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -448,6 +448,8 @@ struct amdgpu_pm {
 	/* powerplay feature */
 	uint32_t pp_feature;
 
+	/* Used for I2C access to various EEPROMs on relevant ASICs */
+	struct i2c_adapter smu_i2c;
 };
 
 #define R600_SSTU_DFLT                               0
@@ -533,4 +535,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
 
 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
 
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+			     uint32_t cstate);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 42f4feb..8ea86ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1021,6 +1021,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
 	struct drm_device *dev;
+	struct amdgpu_device *adev;
 	unsigned long flags = ent->driver_data;
 	int ret, retry = 0;
 	bool supports_atomic = false;
@@ -1090,6 +1091,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 
 	pci_set_drvdata(pdev, dev);
 
+	amdgpu_driver_load_kms(dev, ent->driver_data);
+
 retry_init:
 	ret = drm_dev_register(dev, ent->driver_data);
 	if (ret == -EAGAIN && ++retry <= 3) {
@@ -1100,6 +1103,11 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 	} else if (ret)
 		goto err_pci;
 
+	adev = dev->dev_private;
+	ret = amdgpu_debugfs_init(adev);
+	if (ret)
+		DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+
 	return 0;
 
 err_pci:
@@ -1119,9 +1127,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
 #endif
 		DRM_ERROR("Hotplug removal is not supported\n");
 	drm_dev_unplug(dev);
-	drm_dev_put(dev);
+	amdgpu_driver_unload_kms(dev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
+	drm_dev_put(dev);
 }
 
 static void
@@ -1220,11 +1229,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
 		}
 	}
 
+	adev->in_runpm = true;
 	if (amdgpu_device_supports_boco(drm_dev))
 		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 	drm_kms_helper_poll_disable(drm_dev);
 
 	ret = amdgpu_device_suspend(drm_dev, false);
+	if (ret)
+		return ret;
+
 	if (amdgpu_device_supports_boco(drm_dev)) {
 		/* Only need to handle PCI state in the driver for ATPX
 		 * PCI core handles it for _PR3.
@@ -1278,6 +1291,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
 	drm_kms_helper_poll_enable(drm_dev);
 	if (amdgpu_device_supports_boco(drm_dev))
 		drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	adev->in_runpm = false;
 	return 0;
 }
 
@@ -1285,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
 {
 	struct drm_device *drm_dev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = drm_dev->dev_private;
-	struct drm_crtc *crtc;
+	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+	int ret = 1;
 
 	if (!adev->runpm) {
 		pm_runtime_forbid(dev);
 		return -EBUSY;
 	}
 
-	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
-		if (crtc->enabled) {
-			DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
-			return -EBUSY;
+	if (amdgpu_device_has_dc_support(adev)) {
+		struct drm_crtc *crtc;
+
+		drm_modeset_lock_all(drm_dev);
+
+		drm_for_each_crtc(crtc, drm_dev) {
+			if (crtc->state->active) {
+				ret = -EBUSY;
+				break;
+			}
 		}
+
+		drm_modeset_unlock_all(drm_dev);
+
+	} else {
+		struct drm_connector *list_connector;
+		struct drm_connector_list_iter iter;
+
+		mutex_lock(&drm_dev->mode_config.mutex);
+		drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+		drm_connector_list_iter_begin(drm_dev, &iter);
+		drm_for_each_connector_iter(list_connector, &iter) {
+			if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
+				ret = -EBUSY;
+				break;
+			}
+		}
+
+		drm_connector_list_iter_end(&iter);
+
+		drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+		mutex_unlock(&drm_dev->mode_config.mutex);
 	}
 
+	if (ret == -EBUSY)
+		DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+
 	pm_runtime_mark_last_busy(dev);
 	pm_runtime_autosuspend(dev);
-	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
-	return 1;
+	return ret;
 }
 
 long amdgpu_drm_ioctl(struct file *filp,
@@ -1377,32 +1422,15 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
 	return 0;
 }
 
-static bool
-amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
-				 bool in_vblank_irq, int *vpos, int *hpos,
-				 ktime_t *stime, ktime_t *etime,
-				 const struct drm_display_mode *mode)
-{
-	return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
-						  stime, etime, mode);
-}
-
 static struct drm_driver kms_driver = {
 	.driver_features =
 	    DRIVER_ATOMIC |
 	    DRIVER_GEM |
 	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
 	    DRIVER_SYNCOBJ_TIMELINE,
-	.load = amdgpu_driver_load_kms,
 	.open = amdgpu_driver_open_kms,
 	.postclose = amdgpu_driver_postclose_kms,
 	.lastclose = amdgpu_driver_lastclose_kms,
-	.unload = amdgpu_driver_unload_kms,
-	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
-	.enable_vblank = amdgpu_enable_vblank_kms,
-	.disable_vblank = amdgpu_disable_vblank_kms,
-	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-	.get_scanout_position = amdgpu_get_crtc_scanout_position,
 	.irq_handler = amdgpu_irq_handler,
 	.ioctls = amdgpu_ioctls_kms,
 	.gem_free_object_unlocked = amdgpu_gem_object_free,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 2672dc6..9ae7b61 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -336,15 +336,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
 	drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
 			&amdgpu_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
-				 AMDGPUFB_CONN_LIMIT);
+	ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
 	if (ret) {
 		kfree(rfbdev);
 		return ret;
 	}
 
-	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	if (!amdgpu_device_has_dc_support(adev))
 		drm_helper_disable_unused_functions(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3c01252..7531527 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
-	if (amdgpu_debugfs_fence_init(adev))
-		dev_err(adev->dev, "fence debugfs file creation failed\n");
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 0f960b4..6b9c919 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
 	return adev->gfx.mec.num_mec > 1;
 }
 
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+					       int queue)
+{
+	/* Policy: make queue 0 of each pipe as high priority compute queue */
+	return (queue == 0);
+
+}
+
 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
 {
 	int i, queue, pipe, mec;
@@ -477,7 +485,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
 					   RESET_QUEUES, 0, 0);
 
-	return amdgpu_ring_test_ring(kiq_ring);
+	return amdgpu_ring_test_helper(kiq_ring);
 }
 
 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
@@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
 	int r;
 	struct ras_fs_if fs_info = {
 		.sysfs_name = "gfx_err_count",
-		.debugfs_name = "gfx_err_inject",
 	};
 	struct ras_ih_if ih_info = {
 		.cb = amdgpu_gfx_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ca17ffb..5825692 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -41,6 +41,15 @@
 #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
 
+enum gfx_pipe_priority {
+	AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
+	AMDGPU_GFX_PIPE_PRIO_HIGH,
+	AMDGPU_GFX_PIPE_PRIO_MAX
+};
+
+#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM  0
+#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM  15
+
 struct amdgpu_mec {
 	struct amdgpu_bo	*hpd_eop_obj;
 	u64			hpd_eop_gpu_addr;
@@ -151,6 +160,8 @@ struct amdgpu_gfx_config {
 	unsigned num_gpus;
 	unsigned multi_gpu_tile_size;
 	unsigned mc_arb_ramcfg;
+	unsigned num_banks;
+	unsigned num_ranks;
 	unsigned gb_addr_config;
 	unsigned num_rbs;
 	unsigned gs_vgt_table_depth;
@@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs {
 				 u32 queue, u32 vmid);
 	int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
 	int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
+	void (*reset_ras_error_count) (struct amdgpu_device *adev);
 };
 
 struct sq_work {
@@ -278,8 +290,9 @@ struct amdgpu_gfx {
 	uint32_t			num_gfx_sched;
 	unsigned			num_gfx_rings;
 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+	struct drm_gpu_scheduler        **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
 	struct drm_gpu_scheduler	*compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
-	uint32_t			num_compute_sched;
+	uint32_t                        num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
 	unsigned			num_compute_rings;
 	struct amdgpu_irq_src		eop_irq;
 	struct amdgpu_irq_src		priv_reg_irq;
@@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
 				 int *mec, int *pipe, int *queue);
 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
 				     int pipe, int queue);
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+					       int queue);
 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
 			       int pipe, int queue);
 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6065583..ccbd7ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -48,7 +48,6 @@
  * produce command buffers which are send to the kernel and
  * put in IBs for execution by the requested ring.
  */
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
 
 /**
  * amdgpu_ib_get - request an IB (Indirect Buffer)
@@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
 	}
 
 	adev->ib_pool_ready = true;
-	if (amdgpu_debugfs_sa_init(adev)) {
-		dev_err(adev->dev, "failed to register debugfs file for SA\n");
-	}
+
 	return 0;
 }
 
@@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
 
 #endif
 
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d42be88..4981e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
 
 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 {
-	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
 	struct amdgpu_job *job = to_amdgpu_job(s_job);
 
 	drm_sched_job_cleanup(s_job);
 
-	amdgpu_ring_priority_put(ring, s_job->s_priority);
 	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
 	amdgpu_sync_free(&job->sched_sync);
@@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 		      void *owner, struct dma_fence **f)
 {
 	enum drm_sched_priority priority;
-	struct amdgpu_ring *ring;
 	int r;
 
 	if (!f)
@@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 	priority = job->base.s_priority;
 	drm_sched_entity_push_job(&job->base, entity);
 
-	ring = to_amdgpu_ring(entity->rq->sched);
-	amdgpu_ring_priority_get(ring, priority);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 60591db..fd1dc32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
 	if (adev->rmmio == NULL)
 		goto done_free;
 
-	if (amdgpu_sriov_vf(adev))
-		amdgpu_virt_request_full_gpu(adev, false);
-
 	if (adev->runpm) {
 		pm_runtime_get_sync(dev->dev);
 		pm_runtime_forbid(dev->dev);
@@ -170,10 +167,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	}
 
 	if (amdgpu_device_supports_boco(dev) &&
-	    (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+	    (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */
 		adev->runpm = true;
 	else if (amdgpu_device_supports_baco(dev) &&
-		 (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+		 (amdgpu_runtime_pm != 0) &&
+		 (adev->asic_type >= CHIP_TOPAZ) &&
+		 (adev->asic_type != CHIP_VEGA10) &&
+		 (adev->asic_type != CHIP_VEGA20) &&
+		 (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
+		adev->runpm = true;
+	else if (amdgpu_device_supports_baco(dev) &&
+		 (amdgpu_runtime_pm > 0))  /* enable runpm if runpm=1 on CI */
 		adev->runpm = true;
 
 	/* Call ACPI methods: require modeset init
@@ -1110,14 +1114,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 /**
  * amdgpu_get_vblank_counter_kms - get frame count
  *
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
  *
  * Gets the frame count on the requested crtc (all asics).
  * Returns frame count on success, -EINVAL on failure.
  */
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct amdgpu_device *adev = dev->dev_private;
 	int vpos, hpos, stat;
 	u32 count;
@@ -1177,14 +1182,15 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
 /**
  * amdgpu_enable_vblank_kms - enable vblank interrupt
  *
- * @dev: drm dev pointer
- * @pipe: crtc to enable vblank interrupt for
+ * @crtc: crtc to enable vblank interrupt for
  *
  * Enable the interrupt on the requested crtc (all asics).
  * Returns 0 on success, -EINVAL on failure.
  */
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct amdgpu_device *adev = dev->dev_private;
 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
 
@@ -1194,13 +1200,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
 /**
  * amdgpu_disable_vblank_kms - disable vblank interrupt
  *
- * @dev: drm dev pointer
- * @pipe: crtc to disable vblank interrupt for
+ * @crtc: crtc to disable vblank interrupt for
  *
  * Disable the interrupt on the requested crtc (all asics).
  */
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct amdgpu_device *adev = dev->dev_private;
 	int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
index 676c48c..ead3dc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
 	};
 	struct ras_fs_if fs_info = {
 		.sysfs_name = "mmhub_err_count",
-		.debugfs_name = "mmhub_err_inject",
 	};
 
 	if (!adev->mmhub.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1cd7894..e89fb35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs {
 	int (*ras_late_init)(struct amdgpu_device *adev);
 	void (*query_ras_error_count)(struct amdgpu_device *adev,
 					void *ras_error_status);
+	void (*reset_ras_error_count)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index eb9975f..37ba07e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -612,6 +612,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
 			     struct drm_display_mode *adjusted_mode);
 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
 
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+			bool in_vblank_irq, int *vpos,
+			int *hpos, ktime_t *stime, ktime_t *etime,
+			const struct drm_display_mode *mode);
+
 /* fbdev layer */
 int amdgpu_fbdev_init(struct amdgpu_device *adev);
 void amdgpu_fbdev_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 7d5c3a9..6201a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
 	};
 	struct ras_fs_if fs_info = {
 		.sysfs_name = "pcie_bif_err_count",
-		.debugfs_name = "pcie_bif_err_inject",
 	};
 
 	if (!adev->nbio.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3f16b4..c687f54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,6 +31,7 @@
  */
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <linux/dma-buf.h>
 
 #include <drm/amdgpu_drm.h>
 #include <drm/drm_cache.h>
@@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 		return 0;
 	}
 
+	if (bo->tbo.base.import_attach)
+		dma_buf_pin(bo->tbo.base.import_attach);
+
 	bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 	/* force to pin into visible video ram */
 	if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 
 	amdgpu_bo_subtract_pin_size(bo);
 
+	if (bo->tbo.base.import_attach)
+		dma_buf_unpin(bo->tbo.base.import_attach);
+
 	for (i = 0; i < bo->placement.num_placement; i++) {
 		bo->placements[i].lpfn = 0;
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
@@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 
 	amdgpu_bo_kunmap(abo);
 
+	if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+	    bo->mem.mem_type != TTM_PL_SYSTEM)
+		dma_buf_move_notify(abo->tbo.base.dma_buf);
+
 	/* remember the eviction */
 	if (evict)
 		atomic64_inc(&adev->num_evictions);
@@ -1307,6 +1318,12 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 	if (abo->kfd_bo)
 		amdgpu_amdkfd_unreserve_memory_limit(abo);
 
+	/* We only remove the fence if the resv has individualized. */
+	WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
+			&& bo->base.resv != &bo->base._resv);
+	if (bo->base.resv == &bo->base._resv)
+		amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+
 	if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
 		return;
@@ -1403,27 +1420,49 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 }
 
 /**
- * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
  *
- * @bo: buffer object
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
  * @owner: fence owner
  * @intr: Whether the wait is interruptible
  *
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+			     enum amdgpu_sync_mode sync_mode, void *owner,
+			     bool intr)
+{
+	struct amdgpu_sync sync;
+	int r;
+
+	amdgpu_sync_create(&sync);
+	amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
+	r = amdgpu_sync_wait(&sync, intr);
+	amdgpu_sync_free(&sync);
+	return r;
+}
+
+/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
  * Returns:
  * 0 on success, errno otherwise.
  */
 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-	struct amdgpu_sync sync;
-	int r;
 
-	amdgpu_sync_create(&sync);
-	amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
-	r = amdgpu_sync_wait(&sync, intr);
-	amdgpu_sync_free(&sync);
-
-	return r;
+	return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+					AMDGPU_SYNC_NE_OWNER, owner, intr);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 36dec51..5e39ecd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+			     enum amdgpu_sync_mode sync_mode, void *owner,
+			     bool intr);
 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 int amdgpu_bo_validate(struct amdgpu_bo *bo);
@@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 					 struct seq_file *m);
 #endif
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
 
 bool amdgpu_bo_support_uswc(u64 bo_flags);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b03b1eb..f197f1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -41,8 +41,6 @@
 #include "hwmgr.h"
 #define WIDTH_4K 3840
 
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
 static const struct cg_flag_name clocks[] = {
 	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
 	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
@@ -94,6 +92,9 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 		if (adev->powerplay.pp_funcs->enable_bapm)
 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
 		mutex_unlock(&adev->pm.mutex);
+
+		if (is_support_sw_smu(adev))
+			smu_set_ac_dc(&adev->smu);
 	}
 }
 
@@ -3398,11 +3399,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 		DRM_ERROR("failed to create device file unique_id\n");
 		return ret;
 	}
-	ret = amdgpu_debugfs_pm_init(adev);
-	if (ret) {
-		DRM_ERROR("Failed to register debugfs file for dpm!\n");
-		return ret;
-	}
 
 	if ((adev->asic_type >= CHIP_VEGA10) &&
 	    !(adev->flags & AMD_IS_APU)) {
@@ -3669,7 +3665,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = {
 };
 #endif
 
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
 	return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 3da1da2..5db0ef8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
 
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 146f966..be50867 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,6 +24,7 @@
  */
 
 #include <linux/firmware.h>
+#include <linux/dma-mapping.h>
 
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
@@ -38,6 +39,42 @@
 
 static void psp_set_funcs(struct amdgpu_device *adev);
 
+static int psp_sysfs_init(struct amdgpu_device *adev);
+static void psp_sysfs_fini(struct amdgpu_device *adev);
+
+/*
+ * Due to DF Cstate management centralized to PMFW, the firmware
+ * loading sequence will be updated as below:
+ *   - Load KDB
+ *   - Load SYS_DRV
+ *   - Load tOS
+ *   - Load PMFW
+ *   - Setup TMR
+ *   - Load other non-psp fw
+ *   - Load ASD
+ *   - Load XGMI/RAS/HDCP/DTM TA if any
+ *
+ * This new sequence is required for
+ *   - Arcturus
+ *   - Navi12 and onwards
+ */
+static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
+{
+	struct amdgpu_device *adev = psp->adev;
+
+	psp->pmfw_centralized_cstate_management = false;
+
+	if (amdgpu_sriov_vf(adev))
+		return;
+
+	if (adev->flags & AMD_IS_APU)
+		return;
+
+	if ((adev->asic_type == CHIP_ARCTURUS) ||
+	    (adev->asic_type >= CHIP_NAVI12))
+		psp->pmfw_centralized_cstate_management = true;
+}
+
 static int psp_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -75,6 +112,8 @@ static int psp_early_init(void *handle)
 
 	psp->adev = adev;
 
+	psp_check_pmfw_centralized_cstate_management(psp);
+
 	return 0;
 }
 
@@ -101,6 +140,13 @@ static int psp_sw_init(void *handle)
 		return ret;
 	}
 
+	if (adev->asic_type == CHIP_NAVI10) {
+		ret= psp_sysfs_init(adev);
+		if (ret) {
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -117,6 +163,10 @@ static int psp_sw_fini(void *handle)
 		release_firmware(adev->psp.ta_fw);
 		adev->psp.ta_fw = NULL;
 	}
+
+	if (adev->asic_type == CHIP_NAVI10)
+		psp_sysfs_fini(adev);
+
 	return 0;
 }
 
@@ -150,6 +200,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
 	int ret;
 	int index;
 	int timeout = 2000;
+	bool ras_intr = false;
 
 	mutex_lock(&psp->mutex);
 
@@ -174,7 +225,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
 		 * because gpu reset thread triggered and lock resource should
 		 * be released for psp resume sequence.
 		 */
-		if (amdgpu_ras_intr_triggered())
+		ras_intr = amdgpu_ras_intr_triggered();
+		if (ras_intr)
 			break;
 		msleep(1);
 		amdgpu_asic_invalidate_hdp(psp->adev, NULL);
@@ -187,7 +239,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
 	 * during psp initialization to avoid breaking hw_init and it doesn't
 	 * return -EINVAL.
 	 */
-	if (psp->cmd_buf_mem->resp.status || !timeout) {
+	if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
 		if (ucode)
 			DRM_WARN("failed to load ucode id (%d) ",
 				  ucode->ucode_id);
@@ -558,7 +610,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 	return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
 }
 
-static int psp_xgmi_terminate(struct psp_context *psp)
+int psp_xgmi_terminate(struct psp_context *psp)
 {
 	int ret;
 
@@ -579,7 +631,7 @@ static int psp_xgmi_terminate(struct psp_context *psp)
 	return 0;
 }
 
-static int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp)
 {
 	struct ta_xgmi_shared_memory *xgmi_cmd;
 	int ret;
@@ -1081,7 +1133,7 @@ static int psp_hw_start(struct psp_context *psp)
 	struct amdgpu_device *adev = psp->adev;
 	int ret;
 
-	if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
+	if (!amdgpu_sriov_vf(adev)) {
 		if (psp->kdb_bin_size &&
 		    (psp->funcs->bootloader_load_kdb != NULL)) {
 			ret = psp_bootloader_load_kdb(psp);
@@ -1116,10 +1168,17 @@ static int psp_hw_start(struct psp_context *psp)
 		return ret;
 	}
 
-	ret = psp_tmr_load(psp);
-	if (ret) {
-		DRM_ERROR("PSP load tmr failed!\n");
-		return ret;
+	/*
+	 * For those ASICs with DF Cstate management centralized
+	 * to PMFW, TMR setup should be performed after PMFW
+	 * loaded and before other non-psp firmware loaded.
+	 */
+	if (!psp->pmfw_centralized_cstate_management) {
+		ret = psp_tmr_load(psp);
+		if (ret) {
+			DRM_ERROR("PSP load tmr failed!\n");
+			return ret;
+		}
 	}
 
 	return 0;
@@ -1316,9 +1375,10 @@ static int psp_np_fw_load(struct psp_context *psp)
 	struct amdgpu_firmware_info *ucode;
 	struct amdgpu_device* adev = psp->adev;
 
-	if (psp->autoload_supported) {
+	if (psp->autoload_supported ||
+	    psp->pmfw_centralized_cstate_management) {
 		ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
-		if (!ucode->fw)
+		if (!ucode->fw || amdgpu_sriov_vf(adev))
 			goto out;
 
 		ret = psp_execute_np_fw_load(psp, ucode);
@@ -1326,6 +1386,14 @@ static int psp_np_fw_load(struct psp_context *psp)
 			return ret;
 	}
 
+	if (psp->pmfw_centralized_cstate_management) {
+		ret = psp_tmr_load(psp);
+		if (ret) {
+			DRM_ERROR("PSP load tmr failed!\n");
+			return ret;
+		}
+	}
+
 out:
 	for (i = 0; i < adev->firmware.max_ucodes; i++) {
 		ucode = &adev->firmware.ucode[i];
@@ -1333,7 +1401,9 @@ static int psp_np_fw_load(struct psp_context *psp)
 			continue;
 
 		if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
-		    (psp_smu_reload_quirk(psp) || psp->autoload_supported))
+		    (psp_smu_reload_quirk(psp) ||
+		     psp->autoload_supported ||
+		     psp->pmfw_centralized_cstate_management))
 			continue;
 
 		if (amdgpu_sriov_vf(adev) &&
@@ -1444,16 +1514,6 @@ static int psp_load_fw(struct amdgpu_device *adev)
 		return ret;
 	}
 
-	if (adev->gmc.xgmi.num_physical_nodes > 1) {
-		ret = psp_xgmi_initialize(psp);
-		/* Warning the XGMI seesion initialize failure
-		 * Instead of stop driver initialization
-		 */
-		if (ret)
-			dev_err(psp->adev->dev,
-				"XGMI: Failed to initialize XGMI session\n");
-	}
-
 	if (psp->adev->psp.ta_fw) {
 		ret = psp_ras_initialize(psp);
 		if (ret)
@@ -1518,10 +1578,6 @@ static int psp_hw_fini(void *handle)
 	void *tmr_buf;
 	void **pptr;
 
-	if (adev->gmc.xgmi.num_physical_nodes > 1 &&
-	    psp->xgmi_context.initialized == 1)
-                psp_xgmi_terminate(psp);
-
 	if (psp->adev->psp.ta_fw) {
 		psp_ras_terminate(psp);
 		psp_dtm_terminate(psp);
@@ -1777,6 +1833,97 @@ static int psp_set_powergating_state(void *handle,
 	return 0;
 }
 
+static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	uint32_t fw_ver;
+	int ret;
+
+	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+		DRM_INFO("PSP block is not ready yet.");
+		return -EBUSY;
+	}
+
+	mutex_lock(&adev->psp.mutex);
+	ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
+	mutex_unlock(&adev->psp.mutex);
+
+	if (ret) {
+		DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+		return ret;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+}
+
+static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
+						       struct device_attribute *attr,
+						       const char *buf,
+						       size_t count)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct amdgpu_device *adev = ddev->dev_private;
+	void *cpu_addr;
+	dma_addr_t dma_addr;
+	int ret;
+	char fw_name[100];
+	const struct firmware *usbc_pd_fw;
+
+	if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+		DRM_INFO("PSP block is not ready yet.");
+		return -EBUSY;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
+	ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+	if (ret)
+		goto fail;
+
+	/* We need contiguous physical mem to place the FW  for psp to access */
+	cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
+
+	ret = dma_mapping_error(adev->dev, dma_addr);
+	if (ret)
+		goto rel_buf;
+
+	memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
+
+	/*
+	 * x86 specific workaround.
+	 * Without it the buffer is invisible in PSP.
+	 *
+	 * TODO Remove once PSP starts snooping CPU cache
+	 */
+#ifdef CONFIG_X86
+	clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
+#endif
+
+	mutex_lock(&adev->psp.mutex);
+	ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
+	mutex_unlock(&adev->psp.mutex);
+
+rel_buf:
+	dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
+	release_firmware(usbc_pd_fw);
+
+fail:
+	if (ret) {
+		DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+		return ret;
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+		   psp_usbc_pd_fw_sysfs_read,
+		   psp_usbc_pd_fw_sysfs_write);
+
+
+
 const struct amd_ip_funcs psp_ip_funcs = {
 	.name = "psp",
 	.early_init = psp_early_init,
@@ -1795,6 +1942,21 @@ const struct amd_ip_funcs psp_ip_funcs = {
 	.set_powergating_state = psp_set_powergating_state,
 };
 
+static int psp_sysfs_init(struct amdgpu_device *adev)
+{
+	int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
+
+	if (ret)
+		DRM_ERROR("Failed to create USBC PD FW control file!");
+
+	return ret;
+}
+
+static void psp_sysfs_fini(struct amdgpu_device *adev)
+{
+	device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
+}
+
 static const struct amdgpu_psp_funcs psp_funcs = {
 	.check_fw_loading_status = psp_check_fw_loading_status,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 6110215..297435c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -114,6 +114,8 @@ struct psp_funcs
 	int (*mem_training)(struct psp_context *psp, uint32_t ops);
 	uint32_t (*ring_get_wptr)(struct psp_context *psp);
 	void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
+	int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
+	int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
 };
 
 #define AMDGPU_XGMI_MAX_CONNECTED_NODES		64
@@ -264,6 +266,8 @@ struct psp_context
 	atomic_t			fence_value;
 	/* flag to mark whether gfx fw autoload is supported or not */
 	bool				autoload_supported;
+	/* flag to mark whether df cstate management centralized to PMFW */
+	bool				pmfw_centralized_cstate_management;
 
 	/* xgmi ta firmware and buffer */
 	const struct firmware		*ta_fw;
@@ -349,6 +353,14 @@ struct amdgpu_psp_funcs {
 #define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
 #define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
 
+#define psp_load_usbc_pd_fw(psp, dma_addr) \
+	((psp)->funcs->load_usbc_pd_fw ? \
+	(psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
+
+#define psp_read_usbc_pd_fw(psp, fw_ver) \
+	((psp)->funcs->read_usbc_pd_fw ? \
+	(psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
+
 extern const struct amd_ip_funcs psp_ip_funcs;
 
 extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -362,6 +374,8 @@ int psp_gpu_reset(struct amdgpu_device *adev);
 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
 			uint64_t cmd_gpu_addr, int cmd_size);
 
+int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_terminate(struct psp_context *psp);
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 
 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index cef94e2..3c32a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -31,6 +31,7 @@
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
 
 const char *ras_error_string[] = {
@@ -280,6 +281,11 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
 	struct ras_debug_if data;
 	int ret = 0;
 
+	if (amdgpu_ras_intr_triggered()) {
+		DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+		return size;
+	}
+
 	ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
 	if (ret)
 		return -EINVAL;
@@ -393,6 +399,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
 		.head = obj->head,
 	};
 
+	if (amdgpu_ras_intr_triggered())
+		return snprintf(buf, PAGE_SIZE,
+				"Query currently inaccessible\n");
+
 	if (amdgpu_ras_error_query(obj->adev, &info))
 		return -EINVAL;
 
@@ -720,6 +730,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
 		if (adev->nbio.funcs->query_ras_error_count)
 			adev->nbio.funcs->query_ras_error_count(adev, &err_data);
 		break;
+	case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+		amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+		break;
 	default:
 		break;
 	}
@@ -742,20 +755,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
 	return 0;
 }
 
-uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
-{
-	uint32_t df_inst_id;
-
-	if ((!adev->df.funcs)                 ||
-	    (!adev->df.funcs->get_df_inst_id) ||
-	    (!adev->df.funcs->get_dram_base_addr))
-		return addr;
-
-	df_inst_id = adev->df.funcs->get_df_inst_id(adev);
-
-	return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
-}
-
 /* wrapper of psp_ras_trigger_error */
 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 		struct ras_inject_if *info)
@@ -775,8 +774,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 
 	/* Calculate XGMI relative offset */
 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
-		block_info.address = get_xgmi_relative_phy_addr(adev,
-								block_info.address);
+		block_info.address =
+			amdgpu_xgmi_get_relative_phy_addr(adev,
+							  block_info.address);
 	}
 
 	switch (info->head.block) {
@@ -1122,6 +1122,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
 				       &amdgpu_ras_debugfs_ops);
 }
 
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+	struct ras_manager *obj;
+	struct ras_fs_if fs_info;
+
+	/*
+	 * it won't be called in resume path, no need to check
+	 * suspend and gpu reset status
+	 */
+	if (!con)
+		return;
+
+	amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+	list_for_each_entry(obj, &con->head, node) {
+		if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+			(obj->attr_inuse == 1)) {
+			sprintf(fs_info.debugfs_name, "%s_err_inject",
+					ras_block_str(obj->head.block));
+			fs_info.head = obj->head;
+			amdgpu_ras_debugfs_create(adev, &fs_info);
+		}
+	}
+}
+
 void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
 		struct ras_common_if *head)
 {
@@ -1154,7 +1180,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
 {
 	amdgpu_ras_sysfs_create_feature_node(adev);
-	amdgpu_ras_debugfs_create_ctrl_node(adev);
 
 	return 0;
 }
@@ -1319,6 +1344,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
 }
 /* ih end */
 
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+{
+	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+	struct ras_manager *obj;
+
+	if (!con)
+		return;
+
+	list_for_each_entry(obj, &con->head, node) {
+		struct ras_query_if info = {
+			.head = obj->head,
+		};
+
+		/*
+		 * PCIE_BIF IP has one different isr by ras controller
+		 * interrupt, the specific ras counter query will be
+		 * done in that isr. So skip such block from common
+		 * sync flood interrupt isr calling.
+		 */
+		if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+			continue;
+
+		amdgpu_ras_error_query(adev, &info);
+	}
+}
+
 /* recovery begin */
 
 /* return 0 on success.
@@ -1373,6 +1425,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
 	struct amdgpu_ras *ras =
 		container_of(work, struct amdgpu_ras, recovery_work);
 
+	/*
+	 * Query and print non zero error counter per IP block for
+	 * awareness before recovering GPU.
+	 */
+	amdgpu_ras_log_on_err_counter(ras->adev);
+
 	if (amdgpu_device_should_recover_gpu(ras->adev))
 		amdgpu_device_gpu_recover(ras->adev, 0);
 	atomic_set(&ras->in_recovery, 0);
@@ -1713,18 +1771,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
 	*hw_supported = 0;
 	*supported = 0;
 
-	if (amdgpu_sriov_vf(adev) ||
+	if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
 	    (adev->asic_type != CHIP_VEGA20 &&
 	     adev->asic_type != CHIP_ARCTURUS))
 		return;
 
-	if (adev->is_atom_fw &&
-			(amdgpu_atomfirmware_mem_ecc_supported(adev) ||
-			 amdgpu_atomfirmware_sram_ecc_supported(adev)))
-		*hw_supported = AMDGPU_RAS_BLOCK_MASK;
+	if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+		DRM_INFO("HBM ECC is active.\n");
+		*hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+				1 << AMDGPU_RAS_BLOCK__DF);
+	} else
+		DRM_INFO("HBM ECC is not presented.\n");
+
+	if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+		DRM_INFO("SRAM ECC is active.\n");
+		*hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+				1 << AMDGPU_RAS_BLOCK__DF);
+	} else
+		DRM_INFO("SRAM ECC is not presented.\n");
+
+	/* hw_supported needs to be aligned with RAS block mask. */
+	*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
 
 	*supported = amdgpu_ras_enable == 0 ?
-				0 : *hw_supported & amdgpu_ras_mask;
+			0 : *hw_supported & amdgpu_ras_mask;
 }
 
 int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1825,8 +1895,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
 			goto interrupt;
 	}
 
-	amdgpu_ras_debugfs_create(adev, fs_info);
-
 	r = amdgpu_ras_sysfs_create(adev, fs_info);
 	if (r)
 		goto sysfs;
@@ -1835,7 +1903,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
 cleanup:
 	amdgpu_ras_sysfs_remove(adev, ras_block);
 sysfs:
-	amdgpu_ras_debugfs_remove(adev, ras_block);
 	if (ih_info->cb)
 		amdgpu_ras_interrupt_remove_handler(adev, ih_info);
 interrupt:
@@ -1852,7 +1919,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
 		return;
 
 	amdgpu_ras_sysfs_remove(adev, ras_block);
-	amdgpu_ras_debugfs_remove(adev, ras_block);
 	if (ih_info->cb)
                 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
 	amdgpu_ras_feature_enable(adev, ras_block, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index a5fe29a..55c3ece 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
 void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
 		struct ras_fs_if *head);
 
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
+
 void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
 		struct ras_common_if *head);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2a8e0489..c009609 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -25,10 +25,11 @@
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
 #include <linux/bits.h>
-#include "smu_v11_0_i2c.h"
+#include "atom.h"
 
-#define EEPROM_I2C_TARGET_ADDR_ARCTURUS  0xA8
-#define EEPROM_I2C_TARGET_ADDR_VEGA20    0xA0
+#define EEPROM_I2C_TARGET_ADDR_VEGA20    	0xA0
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS  	0xA8
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342  	0xA0
 
 /*
  * The 2 macros bellow represent the actual size in bytes that
@@ -55,6 +56,45 @@
 
 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
 
+static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+				       uint16_t *i2c_addr)
+{
+	struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+	if (!i2c_addr || !atom_ctx)
+		return false;
+
+	if (strnstr(atom_ctx->vbios_version,
+	            "D342",
+		    sizeof(atom_ctx->vbios_version)))
+		*i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
+	else
+		*i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+
+	return true;
+}
+
+static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+				  uint16_t *i2c_addr)
+{
+	if (!i2c_addr)
+		return false;
+
+	switch (adev->asic_type) {
+	case CHIP_VEGA20:
+		*i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
+		break;
+
+	case CHIP_ARCTURUS:
+		return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+
+	default:
+		return false;
+	}
+
+	return true;
+}
+
 static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
 					  unsigned char *buff)
 {
@@ -83,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
 				 unsigned char *buff)
 {
 	int ret = 0;
+	struct amdgpu_device *adev = to_amdgpu_device(control);
 	struct i2c_msg msg = {
 			.addr	= 0,
 			.flags	= 0,
@@ -96,15 +137,13 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
 
 	msg.addr = control->i2c_address;
 
-	ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+	ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
 	if (ret < 1)
 		DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
 
 	return ret;
 }
 
-
-
 static uint32_t  __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
 {
 	int i;
@@ -212,32 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
 			.buf	= buff,
 	};
 
+	/* Verify i2c adapter is initialized */
+	if (!adev->pm.smu_i2c.algo)
+		return -ENOENT;
+
+	if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
+		return -EINVAL;
+
 	mutex_init(&control->tbl_mutex);
 
-	switch (adev->asic_type) {
-	case CHIP_VEGA20:
-		control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
-		ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
-		break;
-
-	case CHIP_ARCTURUS:
-		control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
-		ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
-		break;
-
-	default:
-		return 0;
-	}
-
-	if (ret) {
-		DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
-		return ret;
-	}
-
 	msg.addr = control->i2c_address;
-
 	/* Read/Create table header from EEPROM address 0 */
-	ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+	ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
 	if (ret < 1) {
 		DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
 		return ret;
@@ -263,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
 	return ret == 1 ? 0 : -EIO;
 }
 
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
-{
-	struct amdgpu_device *adev = to_amdgpu_device(control);
-
-	switch (adev->asic_type) {
-	case CHIP_VEGA20:
-		smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
-		break;
-	case CHIP_ARCTURUS:
-		smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
-		break;
-
-	default:
-		return;
-	}
-}
-
 static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
 					  struct eeprom_table_record *record,
 					  unsigned char *buff)
@@ -436,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
 		control->next_addr += EEPROM_TABLE_RECORD_SIZE;
 	}
 
-	ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
+	ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
 	if (ret < 1) {
 		DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ca78f81..7e8647a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header {
 
 struct amdgpu_ras_eeprom_control {
 	struct amdgpu_ras_eeprom_table_header tbl_hdr;
-	struct i2c_adapter eeprom_accessor;
 	uint32_t next_addr;
 	unsigned int num_recs;
 	struct mutex tbl_mutex;
@@ -79,7 +78,6 @@ struct eeprom_table_record {
 }__attribute__((__packed__));
 
 int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
 int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
 
 int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e5c83e1..a7e1d04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -48,9 +48,6 @@
  * wptr.  The GPU then starts fetching commands and executes
  * them until the pointers are equal again.
  */
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
-				    struct amdgpu_ring *ring);
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
 
 /**
  * amdgpu_ring_alloc - allocate space on the ring buffer
@@ -154,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
 }
 
 /**
- * amdgpu_ring_priority_put - restore a ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Release a request for executing at @priority
- */
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
-			      enum drm_sched_priority priority)
-{
-	int i;
-
-	if (!ring->funcs->set_priority)
-		return;
-
-	if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
-		return;
-
-	/* no need to restore if the job is already at the lowest priority */
-	if (priority == DRM_SCHED_PRIORITY_NORMAL)
-		return;
-
-	mutex_lock(&ring->priority_mutex);
-	/* something higher prio is executing, no need to decay */
-	if (ring->priority > priority)
-		goto out_unlock;
-
-	/* decay priority to the next level with a job available */
-	for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
-		if (i == DRM_SCHED_PRIORITY_NORMAL
-				|| atomic_read(&ring->num_jobs[i])) {
-			ring->priority = i;
-			ring->funcs->set_priority(ring, i);
-			break;
-		}
-	}
-
-out_unlock:
-	mutex_unlock(&ring->priority_mutex);
-}
-
-/**
- * amdgpu_ring_priority_get - change the ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Request a ring's priority to be raised to @priority (refcounted).
- */
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
-			      enum drm_sched_priority priority)
-{
-	if (!ring->funcs->set_priority)
-		return;
-
-	if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
-		return;
-
-	mutex_lock(&ring->priority_mutex);
-	if (priority <= ring->priority)
-		goto out_unlock;
-
-	ring->priority = priority;
-	ring->funcs->set_priority(ring, priority);
-
-out_unlock:
-	mutex_unlock(&ring->priority_mutex);
-}
-
-/**
  * amdgpu_ring_init - init driver ring struct.
  *
  * @adev: amdgpu_device pointer
@@ -334,10 +261,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 	for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
 		atomic_set(&ring->num_jobs[i], 0);
 
-	if (amdgpu_debugfs_ring_init(adev, ring)) {
-		DRM_ERROR("Failed to register debugfs file for rings !\n");
-	}
-
 	return 0;
 }
 
@@ -351,12 +274,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  */
 void amdgpu_ring_fini(struct amdgpu_ring *ring)
 {
-	ring->sched.ready = false;
 
 	/* Not to finish a ring which is not initialized */
 	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
 		return;
 
+	ring->sched.ready = false;
+
 	amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
 	amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
 
@@ -367,8 +291,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 			      &ring->gpu_addr,
 			      (void **)&ring->ring);
 
-	amdgpu_debugfs_ring_fini(ring);
-
 	dma_fence_put(ring->vmid_wait);
 	ring->vmid_wait = NULL;
 	ring->me = 0;
@@ -485,8 +407,8 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
 
 #endif
 
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
-				    struct amdgpu_ring *ring)
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+			     struct amdgpu_ring *ring)
 {
 #if defined(CONFIG_DEBUG_FS)
 	struct drm_minor *minor = adev->ddev->primary;
@@ -507,13 +429,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
 	return 0;
 }
 
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
-{
-#if defined(CONFIG_DEBUG_FS)
-	debugfs_remove(ring->ent);
-#endif
-}
-
 /**
  * amdgpu_ring_test_helper - tests ring and set sched readiness status
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 930316e..9a44301 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
 					uint32_t reg0, uint32_t reg1,
 					uint32_t ref, uint32_t mask);
 	void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
-	/* priority functions */
-	void (*set_priority) (struct amdgpu_ring *ring,
-			      enum drm_sched_priority priority);
 	/* Try to soft recover the ring to make the fence signal */
 	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
 	int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -222,6 +219,7 @@ struct amdgpu_ring {
 	struct mutex		priority_mutex;
 	/* protected by priority_mutex */
 	int			priority;
+	bool			has_high_prio;
 
 #if defined(CONFIG_DEBUG_FS)
 	struct dentry *ent;
@@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
-			      enum drm_sched_priority priority);
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
-			      enum drm_sched_priority priority);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
 		     unsigned ring_size, struct amdgpu_irq_src *irq_src,
 		     unsigned irq_type);
@@ -328,4 +322,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
 
 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
 
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+			     struct amdgpu_ring *ring);
+void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index d3d4707..60bb3e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs {
 	void (*stop)(struct amdgpu_device *adev);
 	void (*reset)(struct amdgpu_device *adev);
 	void (*start)(struct amdgpu_device *adev);
+	void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+	void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+	bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
 };
 
 struct amdgpu_rlc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index a2ee30b..250a309 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
 	uint32_t index = 0;
 	int r;
 
-	if (vmid == 0 || !amdgpu_mcbp)
+	/* don't enable OS preemption on SDMA under SRIOV */
+	if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
 		return 0;
 
 	r = amdgpu_sdma_get_index_from_ring(ring, &index);
@@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
 	struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
 	struct ras_fs_if fs_info = {
 		.sysfs_name = "sdma_err_count",
-		.debugfs_name = "sdma_err_inject",
 	};
 
 	if (!ih_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 4853352..4b35220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs {
 	void (*ras_fini)(struct amdgpu_device *adev);
 	int (*query_ras_error_count)(struct amdgpu_device *adev,
 			uint32_t instance, void *ras_error_status);
+	void (*reset_ras_error_count)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_sdma {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a09b6b9..b863922 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
  *
  * @sync: sync object to add fences from reservation object to
  * @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
  *
  * Sync to the fence
  */
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-		     struct amdgpu_sync *sync,
-		     struct dma_resv *resv,
-		     void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
+		     void *owner)
 {
 	struct dma_resv_list *flist;
 	struct dma_fence *f;
-	void *fence_owner;
 	unsigned i;
 	int r = 0;
 
@@ -229,30 +228,46 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 		return r;
 
 	for (i = 0; i < flist->shared_count; ++i) {
+		void *fence_owner;
+
 		f = rcu_dereference_protected(flist->shared[i],
 					      dma_resv_held(resv));
+
+		fence_owner = amdgpu_sync_get_owner(f);
+
+		/* Always sync to moves, no matter what */
+		if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
+			r = amdgpu_sync_fence(sync, f, false);
+			if (r)
+				break;
+		}
+
 		/* We only want to trigger KFD eviction fences on
 		 * evict or move jobs. Skip KFD fences otherwise.
 		 */
-		fence_owner = amdgpu_sync_get_owner(f);
 		if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
 		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
 			continue;
 
-		if (amdgpu_sync_same_dev(adev, f)) {
-			/* VM updates only sync with moves but not with user
-			 * command submissions or KFD evictions fences
-			 */
-			if (owner == AMDGPU_FENCE_OWNER_VM &&
-			    fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
-				continue;
+		/* Ignore fences depending on the sync mode */
+		switch (mode) {
+		case AMDGPU_SYNC_ALWAYS:
+			break;
 
-			/* Ignore fence from the same owner and explicit one as
-			 * long as it isn't undefined.
-			 */
-			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
-			    (fence_owner == owner || explicit_sync))
+		case AMDGPU_SYNC_NE_OWNER:
+			if (amdgpu_sync_same_dev(adev, f) &&
+			    fence_owner == owner)
 				continue;
+			break;
+
+		case AMDGPU_SYNC_EQ_OWNER:
+			if (amdgpu_sync_same_dev(adev, f) &&
+			    fence_owner != owner)
+				continue;
+			break;
+
+		case AMDGPU_SYNC_EXPLICIT:
+			continue;
 		}
 
 		r = amdgpu_sync_fence(sync, f, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index d62c2b8..cfbe578 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -31,6 +31,13 @@ struct dma_resv;
 struct amdgpu_device;
 struct amdgpu_ring;
 
+enum amdgpu_sync_mode {
+	AMDGPU_SYNC_ALWAYS,
+	AMDGPU_SYNC_NE_OWNER,
+	AMDGPU_SYNC_EQ_OWNER,
+	AMDGPU_SYNC_EXPLICIT
+};
+
 /*
  * Container for fences used to sync command submissions.
  */
@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
 		      bool explicit);
 int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
-		     struct amdgpu_sync *sync,
-		     struct dma_resv *resv,
-		     void *owner,
-		     bool explicit_sync);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
+		     void *owner);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 				     struct amdgpu_ring *ring);
 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c6e9885..9f44ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -60,20 +60,14 @@
 #include "amdgpu_ras.h"
 #include "bif/bif_4_1_d.h"
 
+#define AMDGPU_TTM_VRAM_MAX_DW_READ	(size_t)128
+
 static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 			     struct ttm_mem_reg *mem, unsigned num_pages,
 			     uint64_t offset, unsigned window,
 			     struct amdgpu_ring *ring,
 			     uint64_t *addr);
 
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-	return 0;
-}
-
 /**
  * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
  * memory request.
@@ -1034,7 +1028,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	int r;
 
-	if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+	if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
 		uint64_t page_idx = 1;
 
 		r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
@@ -1042,7 +1036,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
 		if (r)
 			goto gart_bind_fail;
 
-		/* Patch mtype of the second part BO */
+		/* The memory type of the first page defaults to UC. Now
+		 * modify the memory type to NC from the second page of
+		 * the BO onward.
+		 */
 		flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
 		flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 
@@ -1596,7 +1593,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 
 	while (len && pos < adev->gmc.mc_vram_size) {
 		uint64_t aligned_pos = pos & ~(uint64_t)3;
-		uint32_t bytes = 4 - (pos & 3);
+		uint64_t bytes = 4 - (pos & 3);
 		uint32_t shift = (pos & 3) * 8;
 		uint32_t mask = 0xffffffff << shift;
 
@@ -1605,20 +1602,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 			bytes = len;
 		}
 
-		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
-		WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
-		if (!write || mask != 0xffffffff)
-			value = RREG32_NO_KIQ(mmMM_DATA);
-		if (write) {
-			value &= ~mask;
-			value |= (*(uint32_t *)buf << shift) & mask;
-			WREG32_NO_KIQ(mmMM_DATA, value);
-		}
-		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-		if (!write) {
-			value = (value & mask) >> shift;
-			memcpy(buf, &value, bytes);
+		if (mask != 0xffffffff) {
+			spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+			WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+			WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+			if (!write || mask != 0xffffffff)
+				value = RREG32_NO_KIQ(mmMM_DATA);
+			if (write) {
+				value &= ~mask;
+				value |= (*(uint32_t *)buf << shift) & mask;
+				WREG32_NO_KIQ(mmMM_DATA, value);
+			}
+			spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+			if (!write) {
+				value = (value & mask) >> shift;
+				memcpy(buf, &value, bytes);
+			}
+		} else {
+			bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
+			bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
+
+			amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
+						  bytes, write);
 		}
 
 		ret += bytes;
@@ -1638,7 +1643,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
 	.ttm_tt_create = &amdgpu_ttm_tt_create,
 	.ttm_tt_populate = &amdgpu_ttm_tt_populate,
 	.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
-	.invalidate_caches = &amdgpu_invalidate_caches,
 	.init_mem_type = &amdgpu_init_mem_type,
 	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
 	.evict_flags = &amdgpu_evict_flags,
@@ -1836,9 +1840,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	 *The reserved vram for memory training must be pinned to the specified
 	 *place on the VRAM, so reserve it early.
 	 */
-	r = amdgpu_ttm_training_reserve_vram_init(adev);
-	if (r)
-		return r;
+	if (!amdgpu_sriov_vf(adev)) {
+		r = amdgpu_ttm_training_reserve_vram_init(adev);
+		if (r)
+			return r;
+	}
 
 	/* allocate memory as required for VGA
 	 * This is used for VGA emulation and pre-OS scanout buffers to
@@ -1911,12 +1917,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 		return r;
 	}
 
-	/* Register debugfs entries for amdgpu_ttm */
-	r = amdgpu_ttm_debugfs_init(adev);
-	if (r) {
-		DRM_ERROR("Failed to init debugfs\n");
-		return r;
-	}
 	return 0;
 }
 
@@ -1938,7 +1938,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 	if (!adev->mman.initialized)
 		return;
 
-	amdgpu_ttm_debugfs_fini(adev);
 	amdgpu_ttm_training_reserve_vram_fini(adev);
 	/* return the IP Discovery TMR memory back to VRAM */
 	amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2113,8 +2112,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 	}
 	if (resv) {
 		r = amdgpu_sync_resv(adev, &job->sync, resv,
-				     AMDGPU_FENCE_OWNER_UNDEFINED,
-				     false);
+				     AMDGPU_SYNC_ALWAYS,
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
 		if (r) {
 			DRM_ERROR("sync failed (%d).\n", r);
 			goto error_free;
@@ -2198,7 +2197,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
 	if (resv) {
 		r = amdgpu_sync_resv(adev, &job->sync, resv,
-				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
+				     AMDGPU_SYNC_ALWAYS,
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
 		if (r) {
 			DRM_ERROR("sync failed (%d).\n", r);
 			goto error_free;
@@ -2279,7 +2279,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
 {
 	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
-	int r;
 
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
@@ -2287,27 +2286,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
 	if (*pos >= adev->gmc.mc_vram_size)
 		return -ENXIO;
 
+	size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
 	while (size) {
-		unsigned long flags;
-		uint32_t value;
+		size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+		uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
 
-		if (*pos >= adev->gmc.mc_vram_size)
-			return result;
+		amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+		if (copy_to_user(buf, value, bytes))
+			return -EFAULT;
 
-		spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
-		WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
-		value = RREG32_NO_KIQ(mmMM_DATA);
-		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
-		r = put_user(value, (uint32_t *)buf);
-		if (r)
-			return r;
-
-		result += 4;
-		buf += 4;
-		*pos += 4;
-		size -= 4;
+		result += bytes;
+		buf += bytes;
+		*pos += bytes;
+		size -= bytes;
 	}
 
 	return result;
@@ -2544,7 +2535,7 @@ static const struct {
 
 #endif
 
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
 	unsigned count;
@@ -2579,13 +2570,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 	return 0;
 #endif
 }
-
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
-	unsigned i;
-
-	for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
-		debugfs_remove(adev->mman.debugfs_entries[i]);
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0dddedc..bd05bbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -133,4 +133,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
 
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f4d4085..9dd51f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
 	int r;
 	struct ras_fs_if fs_info = {
 		.sysfs_name = "umc_err_count",
-		.debugfs_name = "umc_err_inject",
 	};
 	struct ras_ih_if ih_info = {
 		.cb = amdgpu_umc_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a92f3b1..5fd32ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 			goto err_free;
 	} else {
 		r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
-				     AMDGPU_FENCE_OWNER_UNDEFINED, false);
+				     AMDGPU_SYNC_ALWAYS,
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
 		if (r)
 			goto err_free;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f96464e..a41272f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 
 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
-	struct amdgpu_device *adev = ring->adev;
 	struct dma_fence *fence;
 	long r;
 
-	/* temporarily disable ib test for sriov */
-	if (amdgpu_sriov_vf(adev))
-		return 0;
-
 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
 	if (r)
 		goto error;
@@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 	unsigned i;
 	int r;
 
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	r = amdgpu_ring_alloc(ring, 16);
 	if (r)
 		return r;
@@ -656,15 +654,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
-	struct amdgpu_device *adev = ring->adev;
 	struct dma_fence *fence = NULL;
 	struct amdgpu_bo *bo = NULL;
 	long r;
 
-	/* temporarily disable ib test for sriov */
-	if (amdgpu_sriov_vf(adev))
-		return 0;
-
 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
 				      AMDGPU_GEM_DOMAIN_VRAM,
 				      &bo, NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index daaf909..f0128f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -270,6 +270,9 @@ struct amdgpu_virt {
 #define amdgpu_sriov_runtime(adev) \
 ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
 
+#define amdgpu_sriov_fullaccess(adev) \
+(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+
 #define amdgpu_passthrough(adev) \
 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d16231d..6d9252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
 				      unsigned level)
 {
-	unsigned shift = 0xff;
-
 	switch (level) {
 	case AMDGPU_VM_PDB2:
 	case AMDGPU_VM_PDB1:
 	case AMDGPU_VM_PDB0:
-		shift = 9 * (AMDGPU_VM_PDB0 - level) +
+		return 9 * (AMDGPU_VM_PDB0 - level) +
 			adev->vm_manager.block_size;
-		break;
 	case AMDGPU_VM_PTB:
-		shift = 0;
-		break;
+		return 0;
 	default:
-		dev_err(adev->dev, "the level%d isn't supported.\n", level);
+		return ~0;
 	}
-
-	return shift;
 }
 
 /**
@@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
 	else
 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
 }
-
-/**
- * amdgpu_vm_bo_relocated - vm_bo is reloacted
- *
- * @vm_bo: vm_bo which is relocated
- *
- * State for PDs/PTs which needs to update their parent PD.
- */
-static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
-{
-	list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
-}
-
 /**
  * amdgpu_vm_bo_moved - vm_bo is moved
  *
@@ -291,6 +272,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
 }
 
 /**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ * For the root PD, just move to idle state.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+	if (vm_bo->bo->parent)
+		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+	else
+		amdgpu_vm_bo_idle(vm_bo);
+}
+
+/**
  * amdgpu_vm_bo_done - vm_bo is done
  *
  * @vm_bo: vm_bo which is now done
@@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 {
 	entry->priority = 0;
 	entry->tv.bo = &vm->root.base.bo->tbo;
-	/* One for TTM and one for the CS job */
-	entry->tv.num_shared = 2;
+	/* Two for VM updates, one for TTM and one for the CS job */
+	entry->tv.num_shared = 4;
 	entry->user_pages = NULL;
 	list_add(&entry->tv.head, validated);
 }
@@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 			amdgpu_vm_bo_moved(bo_base);
 		} else {
 			vm->update_funcs->map_table(bo);
-			if (bo->parent)
-				amdgpu_vm_bo_relocated(bo_base);
-			else
-				amdgpu_vm_bo_idle(bo_base);
+			amdgpu_vm_bo_relocated(bo_base);
 		}
 	}
 
@@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 	params.vm = vm;
 	params.direct = direct;
 
-	r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
+	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
 	if (r)
 		return r;
 
@@ -1086,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
 	struct dma_fence *fence = NULL;
 	bool pasid_mapping_needed = false;
 	unsigned patch_offset = 0;
+	bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
 	int r;
 
+	if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
 		gds_switch_needed = true;
 		vm_flush_needed = true;
@@ -1299,7 +1297,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
 	params.vm = vm;
 	params.direct = direct;
 
-	r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
 	if (r)
 		return r;
 
@@ -1448,21 +1446,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 		uint64_t incr, entry_end, pe_start;
 		struct amdgpu_bo *pt;
 
-		/* make sure that the page tables covering the address range are
-		 * actually allocated
-		 */
-		r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
-					params->direct);
-		if (r)
-			return r;
-
-		pt = cursor.entry->base.bo;
-
-		/* The root level can't be a huge page */
-		if (cursor.level == adev->vm_manager.root_level) {
-			if (!amdgpu_vm_pt_descendant(adev, &cursor))
-				return -ENOENT;
-			continue;
+		if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+			/* make sure that the page tables covering the
+			 * address range are actually allocated
+			 */
+			r = amdgpu_vm_alloc_pts(params->adev, params->vm,
+						&cursor, params->direct);
+			if (r)
+				return r;
 		}
 
 		shift = amdgpu_vm_level_shift(adev, cursor.level);
@@ -1480,25 +1471,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 			 * smaller than the address shift. Go to the next
 			 * child entry and try again.
 			 */
-			if (!amdgpu_vm_pt_descendant(adev, &cursor))
-				return -ENOENT;
-			continue;
-		} else if (frag >= parent_shift &&
-			   cursor.level - 1 != adev->vm_manager.root_level) {
+			if (amdgpu_vm_pt_descendant(adev, &cursor))
+				continue;
+		} else if (frag >= parent_shift) {
 			/* If the fragment size is even larger than the parent
-			 * shift we should go up one level and check it again
-			 * unless one level up is the root level.
+			 * shift we should go up one level and check it again.
 			 */
 			if (!amdgpu_vm_pt_ancestor(&cursor))
-				return -ENOENT;
+				return -EINVAL;
 			continue;
 		}
 
+		pt = cursor.entry->base.bo;
+		if (!pt) {
+			/* We need all PDs and PTs for mapping something, */
+			if (flags & AMDGPU_PTE_VALID)
+				return -ENOENT;
+
+			/* but unmapping something can happen at a higher
+			 * level.
+			 */
+			if (!amdgpu_vm_pt_ancestor(&cursor))
+				return -EINVAL;
+
+			pt = cursor.entry->base.bo;
+			shift = parent_shift;
+		}
+
 		/* Looks good so far, calculate parameters for the update */
 		incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
 		mask = amdgpu_vm_entries_mask(adev, cursor.level);
 		pe_start = ((cursor.pfn >> shift) & mask) * 8;
-		entry_end = (uint64_t)(mask + 1) << shift;
+		entry_end = ((uint64_t)mask + 1) << shift;
 		entry_end += cursor.pfn & ~(entry_end - 1);
 		entry_end = min(entry_end, end);
 
@@ -1506,6 +1510,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 			uint64_t upd_end = min(entry_end, frag_end);
 			unsigned nptes = (upd_end - frag_start) >> shift;
 
+			/* This can happen when we set higher level PDs to
+			 * silent to stop fault floods.
+			 */
+			nptes = max(nptes, 1u);
 			amdgpu_vm_update_flags(params, pt, cursor.level,
 					       pe_start, dst, nptes, incr,
 					       flags | AMDGPU_PTE_FRAG(frag));
@@ -1550,7 +1558,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  * @adev: amdgpu_device pointer
  * @vm: requested vm
  * @direct: direct submission in a page fault
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
  * @start: start of mapped range
  * @last: last mapped entry
  * @flags: flags for the entries
@@ -1565,14 +1573,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 				       struct amdgpu_vm *vm, bool direct,
-				       struct dma_fence *exclusive,
+				       struct dma_resv *resv,
 				       uint64_t start, uint64_t last,
 				       uint64_t flags, uint64_t addr,
 				       dma_addr_t *pages_addr,
 				       struct dma_fence **fence)
 {
 	struct amdgpu_vm_update_params params;
-	void *owner = AMDGPU_FENCE_OWNER_VM;
+	enum amdgpu_sync_mode sync_mode;
 	int r;
 
 	memset(&params, 0, sizeof(params));
@@ -1581,9 +1589,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	params.direct = direct;
 	params.pages_addr = pages_addr;
 
-	/* sync to everything except eviction fences on unmapping */
+	/* Implicitly sync to command submissions in the same VM before
+	 * unmapping. Sync to moving fences before mapping.
+	 */
 	if (!(flags & AMDGPU_PTE_VALID))
-		owner = AMDGPU_FENCE_OWNER_KFD;
+		sync_mode = AMDGPU_SYNC_EQ_OWNER;
+	else
+		sync_mode = AMDGPU_SYNC_EXPLICIT;
 
 	amdgpu_vm_eviction_lock(vm);
 	if (vm->evicting) {
@@ -1591,7 +1603,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 		goto error_unlock;
 	}
 
-	r = vm->update_funcs->prepare(&params, owner, exclusive);
+	if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+		struct amdgpu_bo *root = vm->root.base.bo;
+
+		if (!dma_fence_is_signaled(vm->last_direct))
+			amdgpu_bo_fence(root, vm->last_direct, true);
+	}
+
+	r = vm->update_funcs->prepare(&params, resv, sync_mode);
 	if (r)
 		goto error_unlock;
 
@@ -1610,7 +1629,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
  *
  * @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
@@ -1626,7 +1645,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
-				      struct dma_fence *exclusive,
+				      struct dma_resv *resv,
 				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
@@ -1696,13 +1715,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 			}
 
-		} else if (flags & AMDGPU_PTE_VALID) {
+		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
 			addr += bo_adev->vm_manager.vram_base_offset;
 			addr += pfn << PAGE_SHIFT;
 		}
 
 		last = min((uint64_t)mapping->last, start + max_entries - 1);
-		r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
+		r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
 						start, last, flags, addr,
 						dma_addr, fence);
 		if (r)
@@ -1741,7 +1760,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 	dma_addr_t *pages_addr = NULL;
 	struct ttm_mem_reg *mem;
 	struct drm_mm_node *nodes;
-	struct dma_fence *exclusive, **last_update;
+	struct dma_fence **last_update;
+	struct dma_resv *resv;
 	uint64_t flags;
 	struct amdgpu_device *bo_adev = adev;
 	int r;
@@ -1749,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 	if (clear || !bo) {
 		mem = NULL;
 		nodes = NULL;
-		exclusive = NULL;
+		resv = vm->root.base.bo->tbo.base.resv;
 	} else {
 		struct ttm_dma_tt *ttm;
 
@@ -1759,7 +1779,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
 			pages_addr = ttm->dma_address;
 		}
-		exclusive = bo->tbo.moving;
+		resv = bo->tbo.base.resv;
 	}
 
 	if (bo) {
@@ -1769,7 +1789,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 		flags = 0x0;
 	}
 
-	if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
+	if (clear || (bo && bo->tbo.base.resv ==
+		      vm->root.base.bo->tbo.base.resv))
 		last_update = &vm->last_update;
 	else
 		last_update = &bo_va->last_pt_update;
@@ -1783,7 +1804,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
 	}
 
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
-		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
+		r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
 					       mapping, flags, bo_adev, nodes,
 					       last_update);
 		if (r)
@@ -1978,6 +1999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 			  struct amdgpu_vm *vm,
 			  struct dma_fence **fence)
 {
+	struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
 	struct amdgpu_bo_va_mapping *mapping;
 	uint64_t init_pte_value = 0;
 	struct dma_fence *f = NULL;
@@ -1992,7 +2014,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
 		    mapping->start < AMDGPU_GMC_HOLE_START)
 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
 
-		r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
+		r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
 						mapping->start, mapping->last,
 						init_pte_value, 0, NULL, &f);
 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2563,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
 		return false;
 
 	/* Don't evict VM page tables while they are updated */
-	if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
-	    !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+	if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
 		amdgpu_vm_eviction_unlock(bo_base->vm);
 		return false;
 	}
@@ -2741,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 	if (timeout <= 0)
 		return timeout;
 
-	timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
-	if (timeout <= 0)
-		return timeout;
-
-	return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
+	return dma_fence_wait_timeout(vm->last_direct, true, timeout);
 }
 
 /**
@@ -2818,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
 	vm->last_update = NULL;
 	vm->last_direct = dma_fence_get_stub();
-	vm->last_delayed = dma_fence_get_stub();
 
 	mutex_init(&vm->eviction_lock);
 	vm->evicting = false;
@@ -2873,7 +2889,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 error_free_delayed:
 	dma_fence_put(vm->last_direct);
-	dma_fence_put(vm->last_delayed);
 	drm_sched_entity_destroy(&vm->delayed);
 
 error_free_direct:
@@ -3076,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
 	dma_fence_wait(vm->last_direct, false);
 	dma_fence_put(vm->last_direct);
-	dma_fence_wait(vm->last_delayed, false);
-	dma_fence_put(vm->last_delayed);
 
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3188,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 	union drm_amdgpu_vm *args = data;
 	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
+	long timeout = msecs_to_jiffies(2000);
 	int r;
 
 	switch (args->in.op) {
@@ -3199,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 			return r;
 		break;
 	case AMDGPU_VM_OP_UNRESERVE_VMID:
+		if (amdgpu_sriov_runtime(adev))
+			timeout = 8 * timeout;
+
+		/* Wait vm idle to make sure the vmid set in SPM_VMID is
+		 * not referenced anymore.
+		 */
+		r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
+		if (r)
+			return r;
+
+		r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
+		if (r < 0)
+			return r;
+
+		amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
 		break;
 	default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b4640ab..06fe30e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
 
 struct amdgpu_vm_update_funcs {
 	int (*map_table)(struct amdgpu_bo *bo);
-	int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
-		       struct dma_fence *exclusive);
+	int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
+		       enum amdgpu_sync_mode sync_mode);
 	int (*update)(struct amdgpu_vm_update_params *p,
 		      struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
 		      unsigned count, uint32_t incr, uint64_t flags);
@@ -276,7 +276,6 @@ struct amdgpu_vm {
 
 	/* Last submission to the scheduler entities */
 	struct dma_fence	*last_direct;
-	struct dma_fence	*last_delayed;
 
 	unsigned int		pasid;
 	/* dedicated to vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 73fec7a..e385163 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
  * Returns:
  * Negativ errno, 0 for success.
  */
-static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
-				 struct dma_fence *exclusive)
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
+				 struct dma_resv *resv,
+				 enum amdgpu_sync_mode sync_mode)
 {
-	int r;
-
-	/* Wait for any BO move to be completed */
-	if (exclusive) {
-		r = dma_fence_wait(exclusive, true);
-		if (unlikely(r))
-			return r;
-	}
-
-	/* Don't wait for submissions during page fault */
-	if (p->direct)
+	if (!resv)
 		return 0;
 
-	/* Wait for PT BOs to be idle. PTs share the same resv. object
-	 * as the root PD BO
-	 */
-	return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+	return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
 }
 
 /**
@@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
 {
 	unsigned int i;
 	uint64_t value;
+	int r;
+
+	if (bo->tbo.moving) {
+		r = dma_fence_wait(bo->tbo.moving, true);
+		if (r)
+			return r;
+	}
 
 	pe += (unsigned long)amdgpu_bo_kptr(bo);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 19b7f80..cf96c33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
  * Negativ errno, 0 for success.
  */
 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
-				  void *owner, struct dma_fence *exclusive)
+				  struct dma_resv *resv,
+				  enum amdgpu_sync_mode sync_mode)
 {
-	struct amdgpu_bo *root = p->vm->root.base.bo;
 	unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
 	int r;
 
@@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
 
 	p->num_dw_left = ndw;
 
-	/* Wait for moves to be completed */
-	r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
-	if (r)
-		return r;
-
-	/* Don't wait for any submissions during page fault handling */
-	if (p->direct)
+	if (!resv)
 		return 0;
 
-	return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
-				owner, false);
+	return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
 }
 
 /**
@@ -111,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
 	if (r)
 		goto error;
 
-	tmp = dma_fence_get(f);
-	if (p->direct)
+	if (p->direct) {
+		tmp = dma_fence_get(f);
 		swap(p->vm->last_direct, tmp);
-	else
-		swap(p->vm->last_delayed, tmp);
-	dma_fence_put(tmp);
+		dma_fence_put(tmp);
+	} else {
+		dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+	}
 
 	if (fence && !p->direct)
 		swap(*fence, f);
@@ -147,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
 
 	src += p->num_dw_left * 4;
 
-	pe += amdgpu_bo_gpu_offset(bo);
+	pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
 	trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
 
 	amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -174,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
 {
 	struct amdgpu_ib *ib = p->job->ibs;
 
-	pe += amdgpu_bo_gpu_offset(bo);
+	pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
 	if (count < 3) {
 		amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
@@ -208,6 +202,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 	uint64_t *pte;
 	int r;
 
+	/* Wait for PD/PT moves to be completed */
+	r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
+	if (r)
+		return r;
+
 	do {
 		ndw = p->num_dw_left;
 		ndw -= p->job->ibs->length_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a97af42..95b3327 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -26,7 +26,12 @@
 #include "amdgpu_xgmi.h"
 #include "amdgpu_smu.h"
 #include "amdgpu_ras.h"
+#include "soc15.h"
 #include "df/df_3_6_offset.h"
+#include "xgmi/xgmi_4_0_0_smn.h"
+#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "wafl/wafl2_4_0_0_smn.h"
+#include "wafl/wafl2_4_0_0_sh_mask.h"
 
 static DEFINE_MUTEX(xgmi_mutex);
 
@@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex);
 static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
 static unsigned hive_count = 0;
 
+static const int xgmi_pcs_err_status_reg_vg20[] = {
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int wafl_pcs_err_status_reg_vg20[] = {
+	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi_pcs_err_status_reg_arct[] = {
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
+	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
+};
+
+/* same as vg20*/
+static const int wafl_pcs_err_status_reg_arct[] = {
+	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
+	{"XGMI PCS DataLossErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
+	{"XGMI PCS TrainingErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
+	{"XGMI PCS CRCErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
+	{"XGMI PCS BERExceededErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
+	{"XGMI PCS TxMetaDataErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
+	{"XGMI PCS ReplayBufParityErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+	{"XGMI PCS DataParityErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
+	{"XGMI PCS ReplayFifoOverflowErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+	{"XGMI PCS ReplayFifoUnderflowErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+	{"XGMI PCS ElasticFifoOverflowErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+	{"XGMI PCS DeskewErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
+	{"XGMI PCS DataStartupLimitErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+	{"XGMI PCS FCInitTimeoutErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+	{"XGMI PCS RecoveryTimeoutErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+	{"XGMI PCS ReadySerialTimeoutErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+	{"XGMI PCS ReadySerialAttemptErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+	{"XGMI PCS RecoveryAttemptErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+	{"XGMI PCS RecoveryRelockAttemptErr",
+	 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
+	{"WAFL PCS DataLossErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
+	{"WAFL PCS TrainingErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
+	{"WAFL PCS CRCErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
+	{"WAFL PCS BERExceededErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
+	{"WAFL PCS TxMetaDataErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
+	{"WAFL PCS ReplayBufParityErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
+	{"WAFL PCS DataParityErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
+	{"WAFL PCS ReplayFifoOverflowErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+	{"WAFL PCS ReplayFifoUnderflowErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+	{"WAFL PCS ElasticFifoOverflowErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+	{"WAFL PCS DeskewErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
+	{"WAFL PCS DataStartupLimitErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
+	{"WAFL PCS FCInitTimeoutErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+	{"WAFL PCS RecoveryTimeoutErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+	{"WAFL PCS ReadySerialTimeoutErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+	{"WAFL PCS ReadySerialAttemptErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+	{"WAFL PCS RecoveryAttemptErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+	{"WAFL PCS RecoveryRelockAttemptErr",
+	 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
 void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
 {
 	return &hive->device_list;
@@ -365,6 +473,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 		return 0;
 
 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+		ret = psp_xgmi_initialize(&adev->psp);
+		if (ret) {
+			dev_err(adev->dev,
+				"XGMI: Failed to initialize xgmi session\n");
+			return ret;
+		}
+
 		ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
 		if (ret) {
 			dev_err(adev->dev,
@@ -451,16 +566,16 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 	return ret;
 }
 
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
 {
 	struct amdgpu_hive_info *hive;
 
 	if (!adev->gmc.xgmi.supported)
-		return;
+		return -EINVAL;
 
 	hive = amdgpu_get_xgmi_hive(adev, 1);
 	if (!hive)
-		return;
+		return -EINVAL;
 
 	if (!(hive->number_devices--)) {
 		amdgpu_xgmi_sysfs_destroy(adev, hive);
@@ -471,6 +586,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
 		amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
 		mutex_unlock(&hive->hive_lock);
 	}
+
+	return psp_xgmi_terminate(&adev->psp);
 }
 
 int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
@@ -481,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
 	};
 	struct ras_fs_if fs_info = {
 		.sysfs_name = "xgmi_wafl_err_count",
-		.debugfs_name = "xgmi_wafl_err_inject",
 	};
 
 	if (!adev->gmc.xgmi.supported ||
@@ -521,3 +637,129 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
 		kfree(ras_if);
 	}
 }
+
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+					   uint64_t addr)
+{
+	uint32_t df_inst_id;
+	uint64_t dram_base_addr = 0;
+	const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
+
+	if ((!df_funcs)                 ||
+	    (!df_funcs->get_df_inst_id) ||
+	    (!df_funcs->get_dram_base_addr)) {
+		dev_warn(adev->dev,
+			 "XGMI: relative phy_addr algorithm is not supported\n");
+		return addr;
+	}
+
+	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
+		dev_warn(adev->dev,
+			 "failed to disable DF-Cstate, DF register may not be accessible\n");
+		return addr;
+	}
+
+	df_inst_id = df_funcs->get_df_inst_id(adev);
+	dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+
+	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+		dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+
+	return addr + dram_base_addr;
+}
+
+static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
+					      uint32_t value,
+					      uint32_t *ue_count,
+					      uint32_t *ce_count,
+					      bool is_xgmi_pcs)
+{
+	int i;
+	int ue_cnt;
+
+	if (is_xgmi_pcs) {
+		/* query xgmi pcs error status,
+		 * only ue is supported */
+		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
+			ue_cnt = (value &
+				  xgmi_pcs_ras_fields[i].pcs_err_mask) >>
+				  xgmi_pcs_ras_fields[i].pcs_err_shift;
+			if (ue_cnt) {
+				dev_info(adev->dev, "%s detected\n",
+					 xgmi_pcs_ras_fields[i].err_name);
+				*ue_count += ue_cnt;
+			}
+		}
+	} else {
+		/* query wafl pcs error status,
+		 * only ue is supported */
+		for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
+			ue_cnt = (value &
+				  wafl_pcs_ras_fields[i].pcs_err_mask) >>
+				  wafl_pcs_ras_fields[i].pcs_err_shift;
+			if (ue_cnt) {
+				dev_info(adev->dev, "%s detected\n",
+					 wafl_pcs_ras_fields[i].err_name);
+				*ue_count += ue_cnt;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+				      void *ras_error_status)
+{
+	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+	int i;
+	uint32_t data;
+	uint32_t ue_cnt = 0, ce_cnt = 0;
+
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
+		return -EINVAL;
+
+	err_data->ue_count = 0;
+	err_data->ce_count = 0;
+
+	switch (adev->asic_type) {
+	case CHIP_ARCTURUS:
+		/* check xgmi pcs error */
+		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
+			data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
+			if (data)
+				amdgpu_xgmi_query_pcs_error_status(adev,
+						data, &ue_cnt, &ce_cnt, true);
+		}
+		/* check wafl pcs error */
+		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
+			data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
+			if (data)
+				amdgpu_xgmi_query_pcs_error_status(adev,
+						data, &ue_cnt, &ce_cnt, false);
+		}
+		break;
+	case CHIP_VEGA20:
+	default:
+		/* check xgmi pcs error */
+		for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
+			data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
+			if (data)
+				amdgpu_xgmi_query_pcs_error_status(adev,
+						data, &ue_cnt, &ce_cnt, true);
+		}
+		/* check wafl pcs error */
+		for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
+			data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
+			if (data)
+				amdgpu_xgmi_query_pcs_error_status(adev,
+						data, &ue_cnt, &ce_cnt, false);
+		}
+		break;
+	}
+
+	err_data->ue_count += ue_cnt;
+	err_data->ce_count += ce_cnt;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 74011fb..4a92067 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -37,15 +37,25 @@ struct amdgpu_hive_info {
 	struct task_barrier tb;
 };
 
+struct amdgpu_pcs_ras_field {
+	const char *err_name;
+	uint32_t pcs_err_mask;
+	uint32_t pcs_err_shift;
+};
+
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
 		struct amdgpu_device *peer_adev);
 int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
 void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+					   uint64_t addr);
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+				      void *ras_error_status);
 
 static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
 		struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index dd30f4e..cae426c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
 			cjiffies = jiffies;
 			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
 				cjiffies -= ctx->last_jump_jiffies;
-				if ((jiffies_to_msecs(cjiffies) > 5000)) {
-					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+				if ((jiffies_to_msecs(cjiffies) > 10000)) {
+					DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
 					ctx->abort = true;
 				}
 			} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index ea702a6..9b74cfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
 
 void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
 {
-	int ret;
-
 	amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
-	amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
 	amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
-	ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
-	if (!ret)
-		amdgpu_connector->ddc_bus->has_aux = true;
-
-	WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+	drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
+	amdgpu_connector->ddc_bus->has_aux = true;
 }
 
 /***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 40d2ac7..2512e7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2494,6 +2494,10 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
 	.set_config = amdgpu_display_crtc_set_config,
 	.destroy = dce_v10_0_crtc_destroy,
 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
+	.enable_vblank = amdgpu_enable_vblank_kms,
+	.disable_vblank = amdgpu_disable_vblank_kms,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2685,6 +2689,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
 	.prepare = dce_v10_0_crtc_prepare,
 	.commit = dce_v10_0_crtc_commit,
 	.disable = dce_v10_0_crtc_disable,
+	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 898ef72..0dde22d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2573,6 +2573,10 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
 	.set_config = amdgpu_display_crtc_set_config,
 	.destroy = dce_v11_0_crtc_destroy,
 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
+	.enable_vblank = amdgpu_enable_vblank_kms,
+	.disable_vblank = amdgpu_disable_vblank_kms,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2793,6 +2797,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
 	.prepare = dce_v11_0_crtc_prepare,
 	.commit = dce_v11_0_crtc_commit,
 	.disable = dce_v11_0_crtc_disable,
+	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db15a11..8421953 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2388,6 +2388,10 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
 	.set_config = amdgpu_display_crtc_set_config,
 	.destroy = dce_v6_0_crtc_destroy,
 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
+	.enable_vblank = amdgpu_enable_vblank_kms,
+	.disable_vblank = amdgpu_disable_vblank_kms,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2575,6 +2579,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
 	.prepare = dce_v6_0_crtc_prepare,
 	.commit = dce_v6_0_crtc_commit,
 	.disable = dce_v6_0_crtc_disable,
+	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f06c902..3a64070 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2395,6 +2395,10 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
 	.set_config = amdgpu_display_crtc_set_config,
 	.destroy = dce_v8_0_crtc_destroy,
 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
+	.enable_vblank = amdgpu_enable_vblank_kms,
+	.disable_vblank = amdgpu_disable_vblank_kms,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
 	.prepare = dce_v8_0_crtc_prepare,
 	.commit = dce_v8_0_crtc_commit,
 	.disable = dce_v8_0_crtc_disable,
+	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4f9486..13e12be 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -123,6 +123,10 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
 	.set_config = amdgpu_display_crtc_set_config,
 	.destroy = dce_virtual_crtc_destroy,
 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
+	.enable_vblank = amdgpu_enable_vblank_kms,
+	.disable_vblank = amdgpu_disable_vblank_kms,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -218,6 +222,7 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
 	.prepare = dce_virtual_crtc_prepare,
 	.commit = dce_virtual_crtc_commit,
 	.disable = dce_virtual_crtc_disable,
+	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
 static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
@@ -609,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
-	drm_connector_register(connector);
 
 	/* link them */
 	drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 0270259..c8f2aa1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -35,6 +35,8 @@
 
 #include "gc/gc_10_1_0_offset.h"
 #include "gc/gc_10_1_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
+#include "smuio/smuio_11_0_0_sh_mask.h"
 #include "navi10_enum.h"
 #include "hdp/hdp_5_0_0_offset.h"
 #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
@@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
 };
 
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+	static void *scratch_reg0;
+	static void *scratch_reg1;
+	static void *scratch_reg2;
+	static void *scratch_reg3;
+	static void *spare_int;
+	static uint32_t grbm_cntl;
+	static uint32_t grbm_idx;
+	uint32_t i = 0;
+	uint32_t retries = 50000;
+
+	scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+	scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+	scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+	scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+	spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+	grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+	grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+	if (amdgpu_sriov_runtime(adev)) {
+		pr_err("shouldn't call rlcg write register during runtime\n");
+		return;
+	}
+
+	writel(v, scratch_reg0);
+	writel(offset | 0x80000000, scratch_reg1);
+	writel(1, spare_int);
+	for (i = 0; i < retries; i++) {
+		u32 tmp;
+
+		tmp = readl(scratch_reg1);
+		if (!(tmp & 0x80000000))
+			break;
+
+		udelay(10);
+	}
+
+	if (i >= retries)
+		pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+}
+
 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
 {
 	/* Pending on emulation bring up */
@@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
 	struct dma_fence *f = NULL;
-	uint32_t scratch;
-	uint32_t tmp = 0;
+	unsigned index;
+	uint64_t gpu_addr;
+	uint32_t tmp;
 	long r;
 
-	r = amdgpu_gfx_scratch_get(adev, &scratch);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+	r = amdgpu_device_wb_get(adev, &index);
+	if (r)
 		return r;
-	}
 
-	WREG32(scratch, 0xCAFEDEAD);
-
+	gpu_addr = adev->wb.gpu_addr + (index * 4);
+	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
 	memset(&ib, 0, sizeof(ib));
-	r = amdgpu_ib_get(adev, NULL, 256, &ib);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+	r = amdgpu_ib_get(adev, NULL, 16, &ib);
+	if (r)
 		goto err1;
-	}
 
-	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
-	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
-	ib.ptr[2] = 0xDEADBEEF;
-	ib.length_dw = 3;
+	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+	ib.ptr[2] = lower_32_bits(gpu_addr);
+	ib.ptr[3] = upper_32_bits(gpu_addr);
+	ib.ptr[4] = 0xDEADBEEF;
+	ib.length_dw = 5;
 
 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 	if (r)
@@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 	r = dma_fence_wait_timeout(f, false, timeout);
 	if (r == 0) {
-		DRM_ERROR("amdgpu: IB test timed out.\n");
 		r = -ETIMEDOUT;
 		goto err2;
 	} else if (r < 0) {
-		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
 		goto err2;
 	}
 
-	tmp = RREG32(scratch);
+	tmp = adev->wb.wb[index];
 	if (tmp == 0xDEADBEEF)
 		r = 0;
 	else
@@ -547,8 +589,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
 err1:
-	amdgpu_gfx_scratch_free(adev, scratch);
-
+	amdgpu_device_wb_free(adev, index);
 	return r;
 }
 
@@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
 			return r;
 	}
 
+	/* init spm vmid with 0xf */
+	if (adev->gfx.rlc.funcs->update_spm_vmid)
+		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
 	return 0;
 }
 
@@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
 	adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
 
 	/* csib */
-	WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+	WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
 		     adev->gfx.rlc.clear_state_gpu_addr >> 32);
-	WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+	WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
 		     adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
-	WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+	WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
 
 	return 0;
 }
@@ -1895,6 +1940,11 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
 		if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
 			gfx_v10_0_rlc_enable_srm(adev);
 	} else {
+		if (amdgpu_sriov_vf(adev)) {
+			gfx_v10_0_init_csb(adev);
+			return 0;
+		}
+
 		adev->gfx.rlc.funcs->stop(adev);
 
 		/* disable CG */
@@ -2395,7 +2445,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
 		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
 			adev->gfx.gfx_ring[i].sched.ready = false;
 	}
-	WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+	WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
 
 	for (i = 0; i < adev->usec_timeout; i++) {
 		if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@@ -3168,12 +3218,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
 	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
 		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
 
-	r = amdgpu_ring_test_ring(kiq_ring);
-	if (r) {
-		DRM_ERROR("kfq enable failed\n");
-		kiq_ring->sched.ready = false;
-	}
-	return r;
+	return amdgpu_ring_test_helper(kiq_ring);
 }
 #endif
 
@@ -3216,6 +3261,22 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
 	return r;
 }
 
+static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+			ring->has_high_prio = true;
+			mqd->cp_hqd_queue_priority =
+				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+		} else {
+			ring->has_high_prio = false;
+		}
+	}
+}
+
 static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
@@ -3341,6 +3402,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
 	mqd->cp_hqd_ib_control = tmp;
 
+	/* set static priority for a compute queue/ring */
+	gfx_v10_0_compute_mqd_set_priority(ring, mqd);
+
 	/* map_queues packet doesn't need activate the queue,
 	 * so only kiq need set this field.
 	 */
@@ -3790,7 +3854,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
 		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
 					   PREEMPT_QUEUES, 0, 0);
 
-	return amdgpu_ring_test_ring(kiq_ring);
+	return amdgpu_ring_test_helper(kiq_ring);
 }
 #endif
 
@@ -3930,9 +3994,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 
 	amdgpu_gfx_off_ctrl(adev, false);
 	mutex_lock(&adev->gfx.gpu_clock_mutex);
-	WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
-	clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
-		((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+		((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
 	amdgpu_gfx_off_ctrl(adev, true);
 	return clock;
@@ -4220,6 +4283,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+	u32 data;
+
+	data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+	WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
+					uint32_t offset,
+					struct soc15_reg_rlcg *entries, int arr_size)
+{
+	int i;
+	uint32_t reg;
+
+	if (!entries)
+		return false;
+
+	for (i = 0; i < arr_size; i++) {
+		const struct soc15_reg_rlcg *entry;
+
+		entry = &entries[i];
+		reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+		if (offset == reg)
+			return true;
+	}
+
+	return false;
+}
+
+static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+	return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
+}
+
 static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
 	.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
 	.set_safe_mode = gfx_v10_0_set_safe_mode,
@@ -4230,7 +4332,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
 	.resume = gfx_v10_0_rlc_resume,
 	.stop = gfx_v10_0_rlc_stop,
 	.reset = gfx_v10_0_rlc_reset,
-	.start = gfx_v10_0_rlc_start
+	.start = gfx_v10_0_rlc_start,
+	.update_spm_vmid = gfx_v10_0_update_spm_vmid,
+	.rlcg_wreg = gfx_v10_rlcg_wreg,
+	.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
 };
 
 static int gfx_v10_0_set_powergating_state(void *handle,
@@ -4419,15 +4524,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 
 	control |= ib->length_dw | (vmid << 24);
 
-	if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+	if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
 		control |= INDIRECT_BUFFER_PRE_ENB(1);
 
 		if (flags & AMDGPU_IB_PREEMPTED)
 			control |= INDIRECT_BUFFER_PRE_RESUME(1);
 
-		if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
 			gfx_v10_0_ring_emit_de_meta(ring,
-				    flags & AMDGPU_IB_PREEMPTED ? true : false);
+				    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
 	}
 
 	amdgpu_ring_write(ring, header);
@@ -4574,9 +4679,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
 {
 	uint32_t dw2 = 0;
 
-	if (amdgpu_mcbp)
+	if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
 		gfx_v10_0_ring_emit_ce_meta(ring,
-				    flags & AMDGPU_IB_PREEMPTED ? true : false);
+				    (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
 
 	gfx_v10_0_ring_emit_tmz(ring, true);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8f20a5d..733d398 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 			return r;
 	}
 
+	/* init spm vmid with 0xf */
+	if (adev->gfx.rlc.funcs->update_spm_vmid)
+		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
 	return 0;
 }
 
@@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
 	return 0;
 }
 
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+	u32 data;
+
+	data = RREG32(mmRLC_SPM_VMID);
+
+	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+	WREG32(mmRLC_SPM_VMID, data);
+}
+
 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
 {
 	u32 data, orig, tmp, tmp2;
@@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
 	.resume = gfx_v7_0_rlc_resume,
 	.stop = gfx_v7_0_rlc_stop,
 	.reset = gfx_v7_0_rlc_reset,
-	.start = gfx_v7_0_rlc_start
+	.start = gfx_v7_0_rlc_start,
+	.update_spm_vmid = gfx_v7_0_update_spm_vmid
 };
 
 static int gfx_v7_0_early_init(void *handle)
@@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
 
+	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+				MC_ARB_RAMCFG, NOOFBANK);
+	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+				MC_ARB_RAMCFG, NOOFRANKS);
+
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
 	if (adev->flags & AMD_IS_APU) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fa24597..fc32586 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 			return r;
 	}
 
+	/* init spm vmid with 0xf */
+	if (adev->gfx.rlc.funcs->update_spm_vmid)
+		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
 	return 0;
 }
 
@@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
 	adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
 	mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
 
+	adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+				MC_ARB_RAMCFG, NOOFBANK);
+	adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+				MC_ARB_RAMCFG, NOOFRANKS);
+
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
 	if (adev->flags & AMD_IS_APU) {
@@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
 	return r;
 }
 
+static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+			ring->has_high_prio = true;
+			mqd->cp_hqd_queue_priority =
+				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+		} else {
+			ring->has_high_prio = false;
+		}
+	}
+}
+
 static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
@@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
 	/* defaults */
 	mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
 	mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
-	mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
-	mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
-	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
 	mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
 	mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
 	mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
@@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
 	mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
 	mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
 
+	/* set static priority for a queue/ring */
+	gfx_v8_0_mqd_set_priority(ring, mqd);
+	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
 	/* map_queues packet doesn't need activate the queue,
 	 * so only kiq need set this field.
 	 */
@@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
 	}
 }
 
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+	u32 data;
+
+	data = RREG32(mmRLC_SPM_VMID);
+
+	data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+	data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+	WREG32(mmRLC_SPM_VMID, data);
+}
+
 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
 	.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
 	.set_safe_mode = gfx_v8_0_set_safe_mode,
@@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
 	.resume = gfx_v8_0_rlc_resume,
 	.stop = gfx_v8_0_rlc_stop,
 	.reset = gfx_v8_0_rlc_reset,
-	.start = gfx_v8_0_rlc_start
+	.start = gfx_v8_0_rlc_start,
+	.update_spm_vmid = gfx_v8_0_update_spm_vmid
 };
 
 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
 		control |= INDIRECT_BUFFER_PRE_ENB(1);
 
-		if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
 			gfx_v8_0_ring_emit_de_meta(ring);
 	}
 
@@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
 	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
 }
 
-static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
-					   bool acquire)
-{
-	struct amdgpu_device *adev = ring->adev;
-	int pipe_num, tmp, reg;
-	int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
-	pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
-	/* first me only has 2 entries, GFX and HP3D */
-	if (ring->me > 0)
-		pipe_num -= 2;
-
-	reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
-	tmp = RREG32(reg);
-	tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
-	WREG32(reg, tmp);
-}
-
-static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
-					    struct amdgpu_ring *ring,
-					    bool acquire)
-{
-	int i, pipe;
-	bool reserve;
-	struct amdgpu_ring *iring;
-
-	mutex_lock(&adev->gfx.pipe_reserve_mutex);
-	pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
-	if (acquire)
-		set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-	else
-		clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
-	if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
-		/* Clear all reservations - everyone reacquires all resources */
-		for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
-			gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
-						       true);
-
-		for (i = 0; i < adev->gfx.num_compute_rings; ++i)
-			gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
-						       true);
-	} else {
-		/* Lower all pipes without a current reservation */
-		for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
-			iring = &adev->gfx.gfx_ring[i];
-			pipe = amdgpu_gfx_mec_queue_to_bit(adev,
-							   iring->me,
-							   iring->pipe,
-							   0);
-			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-			gfx_v8_0_ring_set_pipe_percent(iring, reserve);
-		}
-
-		for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
-			iring = &adev->gfx.compute_ring[i];
-			pipe = amdgpu_gfx_mec_queue_to_bit(adev,
-							   iring->me,
-							   iring->pipe,
-							   0);
-			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-			gfx_v8_0_ring_set_pipe_percent(iring, reserve);
-		}
-	}
-
-	mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
-				      struct amdgpu_ring *ring,
-				      bool acquire)
-{
-	uint32_t pipe_priority = acquire ? 0x2 : 0x0;
-	uint32_t queue_priority = acquire ? 0xf : 0x0;
-
-	mutex_lock(&adev->srbm_mutex);
-	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
-	WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
-	WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
-	vi_srbm_select(adev, 0, 0, 0, 0);
-	mutex_unlock(&adev->srbm_mutex);
-}
-static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
-					       enum drm_sched_priority priority)
-{
-	struct amdgpu_device *adev = ring->adev;
-	bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
-	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
-		return;
-
-	gfx_v8_0_hqd_set_priority(adev, ring, acquire);
-	gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
 					     u64 addr, u64 seq,
 					     unsigned flags)
@@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.set_priority = gfx_v8_0_ring_set_priority_compute,
 	.emit_wreg = gfx_v8_0_ring_emit_wreg,
 };
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 889154a..37c8231 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
 	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
 };
 
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
+	{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
+	{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
+};
+
 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
 {
 	mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
@@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
 	mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
 };
 
+void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+	static void *scratch_reg0;
+	static void *scratch_reg1;
+	static void *scratch_reg2;
+	static void *scratch_reg3;
+	static void *spare_int;
+	static uint32_t grbm_cntl;
+	static uint32_t grbm_idx;
+
+	scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+	scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+	scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+	scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+	spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+	grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+	grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+	if (amdgpu_sriov_runtime(adev)) {
+		pr_err("shouldn't call rlcg write register during runtime\n");
+		return;
+	}
+
+	if (offset == grbm_cntl || offset == grbm_idx) {
+		if (offset  == grbm_cntl)
+			writel(v, scratch_reg2);
+		else if (offset == grbm_idx)
+			writel(v, scratch_reg3);
+
+		writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+	} else {
+		uint32_t i = 0;
+		uint32_t retries = 50000;
+
+		writel(v, scratch_reg0);
+		writel(offset | 0x80000000, scratch_reg1);
+		writel(1, spare_int);
+		for (i = 0; i < retries; i++) {
+			u32 tmp;
+
+			tmp = readl(scratch_reg1);
+			if (!(tmp & 0x80000000))
+				break;
+
+			udelay(10);
+		}
+		if (i >= retries)
+			pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+	}
+
+}
+
 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
 					  void *ras_error_status);
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
 				     void *inject_if);
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
 
 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
 				uint64_t queue_mask)
@@ -1106,10 +1164,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
 	adev->gfx.me_fw_write_wait = false;
 	adev->gfx.mec_fw_write_wait = false;
 
-	if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+	if ((adev->asic_type != CHIP_ARCTURUS) &&
+	    ((adev->gfx.mec_fw_version < 0x000001a5) ||
 	    (adev->gfx.mec_feature_version < 46) ||
 	    (adev->gfx.pfp_fw_version < 0x000000b7) ||
-	    (adev->gfx.pfp_feature_version < 46))
+	    (adev->gfx.pfp_feature_version < 46)))
 		DRM_WARN_ONCE("CP firmware version too old, please update!");
 
 	switch (adev->asic_type) {
@@ -1846,6 +1905,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 		break;
 	}
 
+	/* init spm vmid with 0xf */
+	if (adev->gfx.rlc.funcs->update_spm_vmid)
+		adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
 	return 0;
 }
 
@@ -1916,7 +1979,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
 
 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
 {
-	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1928,7 +1991,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
 			   uint32_t wave, uint32_t thread,
 			   uint32_t regno, uint32_t num, uint32_t *out)
 {
-	WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+	WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1992,7 +2055,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
 	.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
 	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
 	.ras_error_inject = &gfx_v9_0_ras_error_inject,
-	.query_ras_error_count = &gfx_v9_0_query_ras_error_count
+	.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+	.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
 };
 
 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
@@ -2003,7 +2067,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
 	.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
 	.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
 	.ras_error_inject = &gfx_v9_4_ras_error_inject,
-	.query_ras_error_count = &gfx_v9_4_query_ras_error_count
+	.query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+	.reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
 };
 
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -3309,6 +3374,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
 	WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
 }
 
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+	struct amdgpu_device *adev = ring->adev;
+
+	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+			ring->has_high_prio = true;
+			mqd->cp_hqd_queue_priority =
+				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+		} else {
+			ring->has_high_prio = false;
+		}
+	}
+}
+
 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
@@ -3445,6 +3526,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
 	mqd->cp_hqd_ib_control = tmp;
 
+	/* set static priority for a queue/ring */
+	gfx_v9_0_mqd_set_priority(ring, mqd);
+	mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
 	/* map_queues packet doesn't need activate the queue,
 	 * so only kiq need set this field.
 	 */
@@ -3963,6 +4048,63 @@ static int gfx_v9_0_soft_reset(void *handle)
 	return 0;
 }
 
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+
+	BUG_ON(!ring->funcs->emit_rreg);
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+	amdgpu_ring_write(ring, 9 |	/* src: register*/
+				(5 << 8) |	/* dst: memory */
+				(1 << 16) |	/* count sel */
+				(1 << 20));	/* write confirm */
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, 0);
+	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+				kiq->reg_val_offs * 4));
+	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+				kiq->reg_val_offs * 4));
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq_read;
+
+	might_sleep();
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq_read;
+
+	return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
+		(uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+
+failed_kiq_read:
+	pr_err("failed to read gpu clock\n");
+	return ~0;
+}
+
 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
 	uint64_t clock;
@@ -3970,16 +4112,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 	amdgpu_gfx_off_ctrl(adev, false);
 	mutex_lock(&adev->gfx.gpu_clock_mutex);
 	if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
-		uint32_t tmp, lsb, msb, i = 0;
-		do {
-			if (i != 0)
-				udelay(1);
-			tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
-			lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
-			msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
-			i++;
-		} while (unlikely(tmp != msb) && (i < adev->usec_timeout));
-		clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+		clock = gfx_v9_0_kiq_read_clock(adev);
 	} else {
 		WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
 		clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
@@ -4053,6 +4186,101 @@ static const u32 sgpr_init_compute_shader[] =
 	0xbe800080, 0xbf810000,
 };
 
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+	0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
+	0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
+	0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
+	0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
+	0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
+	0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
+	0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
+	0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
+	0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
+	0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
+	0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
+	0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
+	0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
+	0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
+	0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
+	0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
+	0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
+	0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
+	0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
+	0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
+	0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
+	0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
+	0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
+	0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
+	0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
+	0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
+	0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
+	0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
+	0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
+	0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
+	0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
+	0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
+	0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
+	0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
+	0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
+	0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
+	0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
+	0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
+	0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
+	0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
+	0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
+	0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
+	0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
+	0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
+	0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
+	0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
+	0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
+	0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
+	0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
+	0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
+	0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
+	0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
+	0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
+	0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
+	0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
+	0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
+	0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
+	0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
+	0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
+	0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
+	0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
+	0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
+	0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
+	0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
+	0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
+	0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
+	0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
+	0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
+	0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
+	0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
+	0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
+	0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
+	0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
+	0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
+	0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
+	0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
+	0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
+	0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
+	0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
+	0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
+	0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
+	0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
+	0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
+	0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
+	0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
+	0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
+	0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
+	0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
+	0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
+	0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
+	0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
+	0xbf84fff8, 0xbf810000,
+};
+
 /* When below register arrays changed, please update gpr_reg_size,
   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
   to cover all gfx9 ASICs */
@@ -4073,6 +4301,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
 };
 
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
 static const struct soc15_reg_entry sgpr1_init_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
@@ -4141,7 +4386,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
-   { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
 };
 
 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4204,7 +4448,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 						adev->gfx.config.max_cu_per_sh *
 						adev->gfx.config.max_sh_per_se;
 	int sgpr_work_group_size = 5;
-	int gpr_reg_size = compute_dim_x / 16 + 6;
+	int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
+	int vgpr_init_shader_size;
+	const u32 *vgpr_init_shader_ptr;
+	const struct soc15_reg_entry *vgpr_init_regs_ptr;
 
 	/* only support when RAS is enabled */
 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
@@ -4214,6 +4461,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	if (!ring->sched.ready)
 		return 0;
 
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
+		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
+		vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
+	} else {
+		vgpr_init_shader_ptr = vgpr_init_compute_shader;
+		vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
+		vgpr_init_regs_ptr = vgpr_init_regs;
+	}
+
 	total_size =
 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
 	total_size +=
@@ -4222,7 +4479,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
 	total_size = ALIGN(total_size, 256);
 	vgpr_offset = total_size;
-	total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+	total_size += ALIGN(vgpr_init_shader_size, 256);
 	sgpr_offset = total_size;
 	total_size += sizeof(sgpr_init_compute_shader);
 
@@ -4235,8 +4492,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	}
 
 	/* load the compute shaders */
-	for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
-		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+	for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
+		ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
 
 	for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
 		ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
@@ -4248,9 +4505,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 	/* write the register state for the compute dispatch */
 	for (i = 0; i < gpr_reg_size; i++) {
 		ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
-		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+		ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
 								- PACKET3_SET_SH_REG_START;
-		ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+		ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
 	}
 	/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
 	gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
@@ -4262,7 +4519,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
 	/* write dispatch packet */
 	ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
-	ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
+	ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
 	ib.ptr[ib.length_dw++] = 1; /* y */
 	ib.ptr[ib.length_dw++] = 1; /* z */
 	ib.ptr[ib.length_dw++] =
@@ -4342,18 +4599,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 		goto fail;
 	}
 
-	switch (adev->asic_type)
-	{
-	case CHIP_VEGA20:
-		gfx_v9_0_clear_ras_edc_counter(adev);
-		break;
-	case CHIP_ARCTURUS:
-		gfx_v9_4_clear_ras_edc_counter(adev);
-		break;
-	default:
-		break;
-	}
-
 fail:
 	amdgpu_ib_free(adev, &ib, NULL);
 	dma_fence_put(f);
@@ -4401,6 +4646,10 @@ static int gfx_v9_0_ecc_late_init(void *handle)
 	if (r)
 		return r;
 
+	if (adev->gfx.funcs &&
+	    adev->gfx.funcs->reset_ras_error_count)
+		adev->gfx.funcs->reset_ras_error_count(adev);
+
 	r = amdgpu_gfx_ras_late_init(adev);
 	if (r)
 		return r;
@@ -4705,6 +4954,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 	return 0;
 }
 
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+	u32 data;
+
+	data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+	data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+	WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
+					uint32_t offset,
+					struct soc15_reg_rlcg *entries, int arr_size)
+{
+	int i;
+	uint32_t reg;
+
+	if (!entries)
+		return false;
+
+	for (i = 0; i < arr_size; i++) {
+		const struct soc15_reg_rlcg *entry;
+
+		entry = &entries[i];
+		reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+		if (offset == reg)
+			return true;
+	}
+
+	return false;
+}
+
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+	return gfx_v9_0_check_rlcg_range(adev, offset,
+					(void *)rlcg_access_gc_9_0,
+					ARRAY_SIZE(rlcg_access_gc_9_0));
+}
+
 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
 	.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
 	.set_safe_mode = gfx_v9_0_set_safe_mode,
@@ -4716,7 +5006,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
 	.resume = gfx_v9_0_rlc_resume,
 	.stop = gfx_v9_0_rlc_stop,
 	.reset = gfx_v9_0_rlc_reset,
-	.start = gfx_v9_0_rlc_start
+	.start = gfx_v9_0_rlc_start,
+	.update_spm_vmid = gfx_v9_0_update_spm_vmid,
+	.rlcg_wreg = gfx_v9_0_rlcg_wreg,
+	.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
 };
 
 static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4919,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
 		control |= INDIRECT_BUFFER_PRE_ENB(1);
 
-		if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+		if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
 			gfx_v9_0_ring_emit_de_meta(ring);
 	}
 
@@ -5044,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
 	return wptr;
 }
 
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
-					   bool acquire)
-{
-	struct amdgpu_device *adev = ring->adev;
-	int pipe_num, tmp, reg;
-	int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
-	pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
-	/* first me only has 2 entries, GFX and HP3D */
-	if (ring->me > 0)
-		pipe_num -= 2;
-
-	reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
-	tmp = RREG32(reg);
-	tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
-	WREG32(reg, tmp);
-}
-
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
-					    struct amdgpu_ring *ring,
-					    bool acquire)
-{
-	int i, pipe;
-	bool reserve;
-	struct amdgpu_ring *iring;
-
-	mutex_lock(&adev->gfx.pipe_reserve_mutex);
-	pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
-	if (acquire)
-		set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-	else
-		clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
-	if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
-		/* Clear all reservations - everyone reacquires all resources */
-		for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
-			gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
-						       true);
-
-		for (i = 0; i < adev->gfx.num_compute_rings; ++i)
-			gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
-						       true);
-	} else {
-		/* Lower all pipes without a current reservation */
-		for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
-			iring = &adev->gfx.gfx_ring[i];
-			pipe = amdgpu_gfx_mec_queue_to_bit(adev,
-							   iring->me,
-							   iring->pipe,
-							   0);
-			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-			gfx_v9_0_ring_set_pipe_percent(iring, reserve);
-		}
-
-		for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
-			iring = &adev->gfx.compute_ring[i];
-			pipe = amdgpu_gfx_mec_queue_to_bit(adev,
-							   iring->me,
-							   iring->pipe,
-							   0);
-			reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-			gfx_v9_0_ring_set_pipe_percent(iring, reserve);
-		}
-	}
-
-	mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
-				      struct amdgpu_ring *ring,
-				      bool acquire)
-{
-	uint32_t pipe_priority = acquire ? 0x2 : 0x0;
-	uint32_t queue_priority = acquire ? 0xf : 0x0;
-
-	mutex_lock(&adev->srbm_mutex);
-	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
-	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
-	WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
-	soc15_grbm_select(adev, 0, 0, 0, 0);
-	mutex_unlock(&adev->srbm_mutex);
-}
-
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
-					       enum drm_sched_priority priority)
-{
-	struct amdgpu_device *adev = ring->adev;
-	bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
-	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
-		return;
-
-	gfx_v9_0_hqd_set_priority(adev, ring, acquire);
-	gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
@@ -6322,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
 	return 0;
 }
 
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
 {
 	int i, j, k;
 
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+		return;
+
 	/* read back registers to clear the counters */
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
@@ -6513,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
 	.test_ib = gfx_v9_0_ring_test_ib,
 	.insert_nop = amdgpu_ring_insert_nop,
 	.pad_ib = amdgpu_ring_generic_pad_ib,
-	.set_priority = gfx_v9_0_ring_set_priority_compute,
 	.emit_wreg = gfx_v9_0_ring_emit_wreg,
 	.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
 	.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f099f13..cceb46f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -893,10 +893,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
 	return 0;
 }
 
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
 {
 	int i, j, k;
 
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+		return;
+
 	mutex_lock(&adev->grbm_idx_mutex);
 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
 		for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 2e3f6f7..1ffecc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
 int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
 				     void *inject_if);
 
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+
 #endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b70c7b4..cc866c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
 {
 	uint64_t value;
 
-	/* Disable AGP. */
-	WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
-	WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
-	WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
+	if (!amdgpu_sriov_vf(adev)) {
+		/*
+		 * the new L1 policy will block SRIOV guest from writing
+		 * these regs, and they will be programed at host.
+		 * so skip programing these regs.
+		 */
+		/* Disable AGP. */
+		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
+		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
+		WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
 
-	/* Program the system aperture low logical page number. */
-	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-		     adev->gmc.vram_start >> 18);
-	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-		     adev->gmc.vram_end >> 18);
+		/* Program the system aperture low logical page number. */
+		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			     adev->gmc.vram_start >> 18);
+		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			     adev->gmc.vram_end >> 18);
 
-	/* Set default page address. */
-	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
-		+ adev->vm_manager.vram_base_offset;
-	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
-		     (u32)(value >> 12));
-	WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
-		     (u32)(value >> 44));
+		/* Set default page address. */
+		value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+			+ adev->vm_manager.vram_base_offset;
+		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+			     (u32)(value >> 12));
+		WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+			     (u32)(value >> 44));
+	}
 
 	/* Program "protection fault". */
 	WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
@@ -135,6 +142,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
 {
 	uint32_t tmp;
 
+	/* These regs are not accessible for VF, PF will program these in SRIOV */
+	if (amdgpu_sriov_vf(adev))
+		return;
+
 	/* Setup L2 cache */
 	tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
 	tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -256,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
 
 int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
 {
-	if (amdgpu_sriov_vf(adev)) {
-		/*
-		 * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
-		 * VF copy registers so vbios post doesn't program them, for
-		 * SRIOV driver need to program them
-		 */
-		WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
-			     adev->gmc.vram_start >> 24);
-		WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
-			     adev->gmc.vram_end >> 24);
-	}
-
 	/* GART Enable. */
 	gfxhub_v2_0_init_gart_aperture_regs(adev);
 	gfxhub_v2_0_init_system_aperture_regs(adev);
@@ -298,9 +297,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
 			    ENABLE_ADVANCED_DRIVER_MODEL, 0);
 	WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
 
-	/* Setup L2 cache */
-	WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
-	WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+	if (!amdgpu_sriov_vf(adev)) {
+		/* Setup L2 cache */
+		WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+		WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+	}
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cc0c273..8606f87 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 {
 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
 	const unsigned eng = 17;
-	u32 j, inv_req, tmp;
+	u32 j, inv_req, inv_req2, tmp;
 	struct amdgpu_vmhub *hub;
 
 	BUG_ON(vmhub >= adev->num_vmhubs);
 
 	hub = &adev->vmhub[vmhub];
-	inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+	if (adev->gmc.xgmi.num_physical_nodes &&
+	    adev->asic_type == CHIP_VEGA20) {
+		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
+		 * heavy-weight TLB flush (type 2), which flushes
+		 * both. Due to a race condition with concurrent
+		 * memory accesses using the same TLB cache line, we
+		 * still need a second TLB flush after this.
+		 */
+		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
+		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+	} else {
+		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+		inv_req2 = 0;
+	}
 
 	/* This is necessary for a HW workaround under SRIOV as well
 	 * as GFXOFF under bare metal
@@ -521,21 +534,27 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
 	}
 
-	WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+	do {
+		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
 
-	/*
-	 * Issue a dummy read to wait for the ACK register to be cleared
-	 * to avoid a false ACK due to the new fast GRBM interface.
-	 */
-	if (vmhub == AMDGPU_GFXHUB_0)
-		RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
+		/*
+		 * Issue a dummy read to wait for the ACK register to
+		 * be cleared to avoid a false ACK due to the new fast
+		 * GRBM interface.
+		 */
+		if (vmhub == AMDGPU_GFXHUB_0)
+			RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
 
-	for (j = 0; j < adev->usec_timeout; j++) {
-		tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
-		if (tmp & (1 << vmid))
-			break;
-		udelay(1);
-	}
+		for (j = 0; j < adev->usec_timeout; j++) {
+			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+			if (tmp & (1 << vmid))
+				break;
+			udelay(1);
+		}
+
+		inv_req = inv_req2;
+		inv_req2 = 0;
+	} while (inv_req);
 
 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
 	if (use_semaphore)
@@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
 		return -EIO;
 
 	if (ring->sched.ready) {
+		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
+		 * heavy-weight TLB flush (type 2), which flushes
+		 * both. Due to a race condition with concurrent
+		 * memory accesses using the same TLB cache line, we
+		 * still need a second TLB flush after this.
+		 */
+		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
+				       adev->asic_type == CHIP_VEGA20);
+		/* 2 dwords flush + 8 dwords fence */
+		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
+
+		if (vega20_xgmi_wa)
+			ndw += kiq->pmf->invalidate_tlbs_size;
+
 		spin_lock(&adev->gfx.kiq.ring_lock);
 		/* 2 dwords flush + 8 dwords fence */
-		amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+		amdgpu_ring_alloc(ring, ndw);
+		if (vega20_xgmi_wa)
+			kiq->pmf->kiq_invalidate_tlbs(ring,
+						      pasid, 2, all_hub);
 		kiq->pmf->kiq_invalidate_tlbs(ring,
 					pasid, flush_type, all_hub);
 		amdgpu_fence_emit_polling(ring, &seq);
@@ -886,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle)
 	if (r)
 		return r;
 	/* Check if ecc is available */
-	if (!amdgpu_sriov_vf(adev)) {
-		switch (adev->asic_type) {
-		case CHIP_VEGA10:
-		case CHIP_VEGA20:
-		case CHIP_ARCTURUS:
-			r = amdgpu_atomfirmware_mem_ecc_supported(adev);
-			if (!r) {
-				DRM_INFO("ECC is not present.\n");
-				if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
-					adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
-			} else {
-				DRM_INFO("ECC is active.\n");
-			}
+	if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+		r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+		if (!r) {
+			DRM_INFO("ECC is not present.\n");
+			if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
+		} else
+			DRM_INFO("ECC is active.\n");
 
-			r = amdgpu_atomfirmware_sram_ecc_supported(adev);
-			if (!r) {
-				DRM_INFO("SRAM ECC is not present.\n");
-			} else {
-				DRM_INFO("SRAM ECC is active.\n");
-			}
-			break;
-		default:
-			break;
-		}
+		r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+		if (!r)
+			DRM_INFO("SRAM ECC is not present.\n");
+		else
+			DRM_INFO("SRAM ECC is active.\n");
 	}
 
+	if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+		adev->mmhub.funcs->reset_ras_error_count(adev);
+
 	r = amdgpu_gmc_ras_late_init(adev);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 49a3a56..396c2a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
 	err_data->ue_count += ded_count;
 }
 
+static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+	uint32_t i;
+
+	/* read back edc counter registers to reset the counters to 0 */
+	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+		for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
+			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+	}
+}
+
 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
 	.ras_late_init = amdgpu_mmhub_ras_late_init,
 	.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+	.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index bde1896..fb3f228 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
 	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
 	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
 
-	/* Program the system aperture low logical page number. */
-	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-		     adev->gmc.vram_start >> 18);
-	WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-		     adev->gmc.vram_end >> 18);
+	if (!amdgpu_sriov_vf(adev)) {
+		/*
+		 * the new L1 policy will block SRIOV guest from writing
+		 * these regs, and they will be programed at host.
+		 * so skip programing these regs.
+		 */
+		/* Program the system aperture low logical page number. */
+		WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			     adev->gmc.vram_start >> 18);
+		WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			     adev->gmc.vram_end >> 18);
+	}
 
 	/* Set default page address. */
 	value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
@@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
 
 int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
 {
-	if (amdgpu_sriov_vf(adev)) {
-		/*
-		 * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
-		 * VF copy registers so vbios post doesn't program them, for
-		 * SRIOV driver need to program them
-		 */
-		WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
-			     adev->gmc.vram_start >> 24);
-		WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
-			     adev->gmc.vram_end >> 24);
-	}
-
 	/* GART Enable. */
 	mmhub_v2_0_init_gart_aperture_regs(adev);
 	mmhub_v2_0_init_system_aperture_regs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index a5281df..0d413fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
 	err_data->ue_count += ded_count;
 }
 
+static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+	uint32_t i;
+
+	/* read back edc counter registers to reset the counters to 0 */
+	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+		for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
+			RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+	}
+}
+
 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
 	.ras_late_init = amdgpu_mmhub_ras_late_init,
 	.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+	.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
new file mode 100644
index 0000000..1b5086c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V2_0_H__
+#define __MMSCH_V2_0_H__
+
+// addressBlock: uvd0_mmsch_dec
+// base address: 0x1e000
+#define mmMMSCH_UCODE_ADDR                                                                             0x0000
+#define mmMMSCH_UCODE_ADDR_BASE_IDX                                                                    0
+#define mmMMSCH_UCODE_DATA                                                                             0x0001
+#define mmMMSCH_UCODE_DATA_BASE_IDX                                                                    0
+#define mmMMSCH_SRAM_ADDR                                                                              0x0002
+#define mmMMSCH_SRAM_ADDR_BASE_IDX                                                                     0
+#define mmMMSCH_SRAM_DATA                                                                              0x0003
+#define mmMMSCH_SRAM_DATA_BASE_IDX                                                                     0
+#define mmMMSCH_VF_SRAM_OFFSET                                                                         0x0004
+#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX                                                                0
+#define mmMMSCH_DB_SRAM_OFFSET                                                                         0x0005
+#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX                                                                0
+#define mmMMSCH_CTX_SRAM_OFFSET                                                                        0x0006
+#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX                                                               0
+#define mmMMSCH_CTL                                                                                    0x0007
+#define mmMMSCH_CTL_BASE_IDX                                                                           0
+#define mmMMSCH_INTR                                                                                   0x0008
+#define mmMMSCH_INTR_BASE_IDX                                                                          0
+#define mmMMSCH_INTR_ACK                                                                               0x0009
+#define mmMMSCH_INTR_ACK_BASE_IDX                                                                      0
+#define mmMMSCH_INTR_STATUS                                                                            0x000a
+#define mmMMSCH_INTR_STATUS_BASE_IDX                                                                   0
+#define mmMMSCH_VF_VMID                                                                                0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX                                                                       0
+#define mmMMSCH_VF_CTX_ADDR_LO                                                                         0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX                                                                0
+#define mmMMSCH_VF_CTX_ADDR_HI                                                                         0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX                                                                0
+#define mmMMSCH_VF_CTX_SIZE                                                                            0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX                                                                   0
+#define mmMMSCH_VF_GPCOM_ADDR_LO                                                                       0x000f
+#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX                                                              0
+#define mmMMSCH_VF_GPCOM_ADDR_HI                                                                       0x0010
+#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX                                                              0
+#define mmMMSCH_VF_GPCOM_SIZE                                                                          0x0011
+#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX                                                                 0
+#define mmMMSCH_VF_MAILBOX_HOST                                                                        0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX                                                               0
+#define mmMMSCH_VF_MAILBOX_RESP                                                                        0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX                                                               0
+#define mmMMSCH_VF_MAILBOX_0                                                                           0x0014
+#define mmMMSCH_VF_MAILBOX_0_BASE_IDX                                                                  0
+#define mmMMSCH_VF_MAILBOX_0_RESP                                                                      0x0015
+#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX                                                             0
+#define mmMMSCH_VF_MAILBOX_1                                                                           0x0016
+#define mmMMSCH_VF_MAILBOX_1_BASE_IDX                                                                  0
+#define mmMMSCH_VF_MAILBOX_1_RESP                                                                      0x0017
+#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX                                                             0
+#define mmMMSCH_CNTL                                                                                   0x001c
+#define mmMMSCH_CNTL_BASE_IDX                                                                          0
+#define mmMMSCH_NONCACHE_OFFSET0                                                                       0x001d
+#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX                                                              0
+#define mmMMSCH_NONCACHE_SIZE0                                                                         0x001e
+#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX                                                                0
+#define mmMMSCH_NONCACHE_OFFSET1                                                                       0x001f
+#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX                                                              0
+#define mmMMSCH_NONCACHE_SIZE1                                                                         0x0020
+#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX                                                                0
+#define mmMMSCH_PDEBUG_STATUS                                                                          0x0021
+#define mmMMSCH_PDEBUG_STATUS_BASE_IDX                                                                 0
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS                                                                0x0022
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX                                                       0
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS                                                                0x0023
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX                                                       0
+#define mmMMSCH_PDEBUG_EPC                                                                             0x0024
+#define mmMMSCH_PDEBUG_EPC_BASE_IDX                                                                    0
+#define mmMMSCH_PDEBUG_EXCCAUSE                                                                        0x0025
+#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX                                                               0
+#define mmMMSCH_PROC_STATE1                                                                            0x0026
+#define mmMMSCH_PROC_STATE1_BASE_IDX                                                                   0
+#define mmMMSCH_LAST_MC_ADDR                                                                           0x0027
+#define mmMMSCH_LAST_MC_ADDR_BASE_IDX                                                                  0
+#define mmMMSCH_LAST_MEM_ACCESS_HI                                                                     0x0028
+#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX                                                            0
+#define mmMMSCH_LAST_MEM_ACCESS_LO                                                                     0x0029
+#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX                                                            0
+#define mmMMSCH_IOV_ACTIVE_FCN_ID                                                                      0x002a
+#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX                                                             0
+#define mmMMSCH_SCRATCH_0                                                                              0x002b
+#define mmMMSCH_SCRATCH_0_BASE_IDX                                                                     0
+#define mmMMSCH_SCRATCH_1                                                                              0x002c
+#define mmMMSCH_SCRATCH_1_BASE_IDX                                                                     0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0                                                                     0x002d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX                                                            0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0                                                                   0x002e
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX                                                          0
+#define mmMMSCH_GPUIOV_CMD_STATUS_0                                                                    0x002f
+#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX                                                           0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0                                                                0x0030
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX                                                       0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0                                                                   0x0031
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX                                                          0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0                                                                 0x0032
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX                                                        0
+#define mmMMSCH_GPUIOV_DW6_0                                                                           0x0033
+#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_DW7_0                                                                           0x0034
+#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_DW8_0                                                                           0x0035
+#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1                                                                     0x0036
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX                                                            0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1                                                                   0x0037
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX                                                          0
+#define mmMMSCH_GPUIOV_CMD_STATUS_1                                                                    0x0038
+#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX                                                           0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1                                                                0x0039
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX                                                       0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1                                                                   0x003a
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX                                                          0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1                                                                 0x003b
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX                                                        0
+#define mmMMSCH_GPUIOV_DW6_1                                                                           0x003c
+#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_DW7_1                                                                           0x003d
+#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_DW8_1                                                                           0x003e
+#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_CNTXT                                                                           0x003f
+#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX                                                                  0
+#define mmMMSCH_SCRATCH_2                                                                              0x0040
+#define mmMMSCH_SCRATCH_2_BASE_IDX                                                                     0
+#define mmMMSCH_SCRATCH_3                                                                              0x0041
+#define mmMMSCH_SCRATCH_3_BASE_IDX                                                                     0
+#define mmMMSCH_SCRATCH_4                                                                              0x0042
+#define mmMMSCH_SCRATCH_4_BASE_IDX                                                                     0
+#define mmMMSCH_SCRATCH_5                                                                              0x0043
+#define mmMMSCH_SCRATCH_5_BASE_IDX                                                                     0
+#define mmMMSCH_SCRATCH_6                                                                              0x0044
+#define mmMMSCH_SCRATCH_6_BASE_IDX                                                                     0
+#define mmMMSCH_SCRATCH_7                                                                              0x0045
+#define mmMMSCH_SCRATCH_7_BASE_IDX                                                                     0
+#define mmMMSCH_VFID_FIFO_HEAD_0                                                                       0x0046
+#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX                                                              0
+#define mmMMSCH_VFID_FIFO_TAIL_0                                                                       0x0047
+#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX                                                              0
+#define mmMMSCH_VFID_FIFO_HEAD_1                                                                       0x0048
+#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX                                                              0
+#define mmMMSCH_VFID_FIFO_TAIL_1                                                                       0x0049
+#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX                                                              0
+#define mmMMSCH_NACK_STATUS                                                                            0x004a
+#define mmMMSCH_NACK_STATUS_BASE_IDX                                                                   0
+#define mmMMSCH_VF_MAILBOX0_DATA                                                                       0x004b
+#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX                                                              0
+#define mmMMSCH_VF_MAILBOX1_DATA                                                                       0x004c
+#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX                                                              0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0                                                                  0x004d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX                                                         0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0                                                                 0x004e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX                                                        0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0                                                              0x004f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX                                                     0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1                                                                  0x0050
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX                                                         0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1                                                                 0x0051
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX                                                        0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1                                                              0x0052
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX                                                     0
+#define mmMMSCH_GPUIOV_CNTXT_IP                                                                        0x0053
+#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX                                                               0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2                                                                     0x0054
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX                                                            0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2                                                                   0x0055
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX                                                          0
+#define mmMMSCH_GPUIOV_CMD_STATUS_2                                                                    0x0056
+#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX                                                           0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2                                                                0x0057
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX                                                       0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2                                                                   0x0058
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX                                                          0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2                                                                 0x0059
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX                                                        0
+#define mmMMSCH_GPUIOV_DW6_2                                                                           0x005a
+#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_DW7_2                                                                           0x005b
+#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_DW8_2                                                                           0x005c
+#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX                                                                  0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2                                                                  0x005d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX                                                         0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2                                                                 0x005e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX                                                        0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2                                                              0x005f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX                                                     0
+#define mmMMSCH_VFID_FIFO_HEAD_2                                                                       0x0060
+#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX                                                              0
+#define mmMMSCH_VFID_FIFO_TAIL_2                                                                       0x0061
+#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX                                                              0
+#define mmMMSCH_VM_BUSY_STATUS_0                                                                       0x0062
+#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX                                                              0
+#define mmMMSCH_VM_BUSY_STATUS_1                                                                       0x0063
+#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX                                                              0
+#define mmMMSCH_VM_BUSY_STATUS_2                                                                       0x0064
+#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX                                                              0
+
+#define MMSCH_VERSION_MAJOR	2
+#define MMSCH_VERSION_MINOR	0
+#define MMSCH_VERSION	(MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+enum mmsch_v2_0_command_type {
+	MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+	MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+	MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+	MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+	MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v2_0_init_header {
+	uint32_t version;
+	uint32_t header_size;
+	uint32_t vcn_init_status;
+	uint32_t vcn_table_offset;
+	uint32_t vcn_table_size;
+};
+
+struct mmsch_v2_0_cmd_direct_reg_header {
+	uint32_t reg_offset   : 28;
+	uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_indirect_reg_header {
+	uint32_t reg_offset    : 20;
+	uint32_t reg_idx_space : 8;
+	uint32_t command_type  : 4;
+};
+
+struct mmsch_v2_0_cmd_direct_write {
+	struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+	uint32_t reg_value;
+};
+
+struct mmsch_v2_0_cmd_direct_read_modify_write {
+	struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+	uint32_t write_data;
+	uint32_t mask_value;
+};
+
+struct mmsch_v2_0_cmd_direct_polling {
+	struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+	uint32_t mask_value;
+	uint32_t wait_value;
+};
+
+struct mmsch_v2_0_cmd_end {
+	struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v2_0_cmd_indirect_write {
+	struct mmsch_v2_0_cmd_indirect_reg_header cmd_header;
+	uint32_t reg_value;
+};
+
+static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt,
+					       uint32_t *init_table,
+					       uint32_t reg_offset,
+					       uint32_t value)
+{
+	direct_wt->cmd_header.reg_offset = reg_offset;
+	direct_wt->reg_value = value;
+	memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
+						      uint32_t *init_table,
+						      uint32_t reg_offset,
+						      uint32_t mask, uint32_t data)
+{
+	direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
+	direct_rd_mod_wt->mask_value = mask;
+	direct_rd_mod_wt->write_data = data;
+	memcpy((void *)init_table, direct_rd_mod_wt,
+	       sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll,
+						 uint32_t *init_table,
+						 uint32_t reg_offset,
+						 uint32_t mask, uint32_t wait)
+{
+	direct_poll->cmd_header.reg_offset = reg_offset;
+	direct_poll->mask_value = mask;
+	direct_poll->wait_value = wait;
+	memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling));
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+	mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
+					   init_table, (reg), \
+					   (mask), (data)); \
+	init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+	table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \
+	mmsch_v2_0_insert_direct_wt(&direct_wt, \
+				    init_table, (reg), \
+				    (value)); \
+	init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+	table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+	mmsch_v2_0_insert_direct_poll(&direct_poll, \
+				      init_table, (reg), \
+				      (mask), (wait)); \
+	init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+	table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index cf557a42..e08245a 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -32,6 +32,7 @@
 #include "soc15_common.h"
 #include "navi10_ih.h"
 
+#define MAX_REARM_RETRY 10
 
 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
 
@@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
 }
 
 /**
+ * navi10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
+			       struct amdgpu_ih_ring *ih)
+{
+	uint32_t reg_rptr = 0;
+	uint32_t v = 0;
+	uint32_t i = 0;
+
+	if (ih == &adev->irq.ih)
+		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+	else if (ih == &adev->irq.ih1)
+		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+	else if (ih == &adev->irq.ih2)
+		reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+	else
+		return;
+
+	/* Rearm IRQ / re-write doorbell if doorbell write is lost */
+	for (i = 0; i < MAX_REARM_RETRY; i++) {
+		v = RREG32_NO_KIQ(reg_rptr);
+		if ((v < ih->ring_size) && (v != ih->rptr))
+			WDOORBELL32(ih->doorbell_index, ih->rptr);
+		else
+			break;
+	}
+}
+
+/**
  * navi10_ih_set_rptr - set the IH ring buffer rptr
  *
  * @adev: amdgpu_device pointer
@@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
 		/* XXX check if swapping is necessary on BE */
 		*ih->rptr_cpu = ih->rptr;
 		WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+		if (amdgpu_sriov_vf(adev))
+			navi10_ih_irq_rearm(adev, ih);
 	} else
 		WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 65eb378..149d386 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 {
 	uint32_t bif_doorbell_intr_cntl;
 	struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+	struct ras_err_data err_data = {0, 0, 0, NULL};
 
 	bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
 	if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
 		 * clear error status after ras_controller_intr according to
 		 * hw team and count ue number for query
 		 */
-		nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+		nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+		/* logging on error counter and printing for awareness */
+		obj->err_data.ue_count += err_data.ue_count;
+		obj->err_data.ce_count += err_data.ce_count;
+
+		if (err_data.ce_count)
+			DRM_INFO("%ld correctable errors detected in %s block\n",
+				obj->err_data.ce_count, adev->nbio.ras_if->name);
+
+		if (err_data.ue_count)
+			DRM_INFO("%ld uncorrectable errors detected in %s block\n",
+				obj->err_data.ue_count, adev->nbio.ras_if->name);
 
 		DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
 
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2d1bebd..033cbbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
 		    !amdgpu_sriov_vf(adev))
 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
-		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+		if (!amdgpu_sriov_vf(adev))
+			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 36b6579..a44fd60 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -31,6 +31,9 @@
 #define GFX_CMD_RESERVED_MASK       0x7FF00000
 #define GFX_CMD_RESPONSE_MASK       0x80000000
 
+/* USBC PD FW version retrieval command */
+#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000
+
 /* TEE Gfx Command IDs for the register interface.
 *  Command ID must be between 0x00010000 and 0x000F0000.
 */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0829188..0afd610 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -26,6 +26,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
+#include "amdgpu_ras.h"
 #include "amdgpu_ucode.h"
 #include "soc15_common.h"
 #include "psp_v11_0.h"
@@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
 /* memory training timeout define */
 #define MEM_TRAIN_SEND_MSG_TIMEOUT_US	3000000
 
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
 static int psp_v11_0_init_microcode(struct psp_context *psp)
 {
 	struct amdgpu_device *adev = psp->adev;
@@ -420,7 +424,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
 	struct psp_ring *ring;
 	struct amdgpu_device *adev = psp->adev;
 
-	psp_v11_0_reroute_ih(psp);
+	if (!amdgpu_sriov_vf(adev))
+		psp_v11_0_reroute_ih(psp);
 
 	ring = &psp->km_ring;
 
@@ -864,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
 	if (ret)
 		return -EINVAL;
 
+	/* If err_event_athub occurs error inject was successful, however
+	   return status from TA is no long reliable */
+	if (amdgpu_ras_intr_triggered())
+		return 0;
+
 	return ras_cmd->ras_status;
 }
 
@@ -1108,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
 		WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
 
+static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
+{
+	struct amdgpu_device *adev = psp->adev;
+	uint32_t reg_status;
+	int ret, i = 0;
+
+	/* Write lower 32-bit address of the PD Controller FW */
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
+	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+			     0x80000000, 0x80000000, false);
+	if (ret)
+		return ret;
+
+	/* Fireup interrupt so PSP can pick up the lower address */
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
+	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+			     0x80000000, 0x80000000, false);
+	if (ret)
+		return ret;
+
+	reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+	if ((reg_status & 0xFFFF) != 0) {
+		DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
+				reg_status & 0xFFFF);
+		return -EIO;
+	}
+
+	/* Write upper 32-bit address of the PD Controller FW */
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
+
+	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+			     0x80000000, 0x80000000, false);
+	if (ret)
+		return ret;
+
+	/* Fireup interrupt so PSP can pick up the upper address */
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
+
+	/* FW load takes very long time */
+	do {
+		msleep(1000);
+		reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+		if (reg_status & 0x80000000)
+			goto done;
+
+	} while (++i < USBC_PD_POLLING_LIMIT_S);
+
+	return -ETIME;
+done:
+
+	if ((reg_status & 0xFFFF) != 0) {
+		DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
+				reg_status & 0xFFFF);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+	struct amdgpu_device *adev = psp->adev;
+	int ret;
+
+	WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+	ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+				     0x80000000, 0x80000000, false);
+	if (!ret)
+		*fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
+
+	return ret;
+}
+
 static const struct psp_funcs psp_v11_0_funcs = {
 	.init_microcode = psp_v11_0_init_microcode,
 	.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1132,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
 	.mem_training = psp_v11_0_memory_training,
 	.ring_get_wptr = psp_v11_0_ring_get_wptr,
 	.ring_set_wptr = psp_v11_0_ring_set_wptr,
+	.load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
+	.read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
 };
 
 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 52a858c..5f3a5ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle)
 	struct ras_ih_if ih_info = {
 		.cb = sdma_v4_0_process_ras_data_cb,
 	};
-	int i;
 
-	/* read back edc counter registers to clear the counters */
-	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
-		for (i = 0; i < adev->sdma.num_instances; i++)
-			RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
-	}
+	if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
+		adev->sdma.funcs->reset_ras_error_count(adev);
 
 	if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
 		return adev->sdma.funcs->ras_late_init(adev, &ih_info);
@@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
 	return 0;
 };
 
+static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+	int i;
+
+	/* read back edc counter registers to clear the counters */
+	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+		for (i = 0; i < adev->sdma.num_instances; i++)
+			RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+	}
+}
+
 static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
 	.ras_late_init = amdgpu_sdma_ras_late_init,
 	.ras_fini = amdgpu_sdma_ras_fini,
 	.query_ras_error_count = sdma_v4_0_query_ras_error_count,
+	.reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
 };
 
 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 67b9830b..ebfd2cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -746,11 +746,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
 			sdma_v5_0_enable(adev, true);
 		}
 
-		r = amdgpu_ring_test_ring(ring);
-		if (r) {
-			ring->sched.ready = false;
+		r = amdgpu_ring_test_helper(ring);
+		if (r)
 			return r;
-		}
 
 		if (adev->mman.buffer_funcs_ring == ring)
 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 4cb4c89..0860e85 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3439,7 +3439,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
 
 	if (adev->asic_type == CHIP_HAINAN) {
 		if ((adev->pdev->revision == 0x81) ||
-		    (adev->pdev->revision == 0x83) ||
 		    (adev->pdev->revision == 0xC3) ||
 		    (adev->pdev->device == 0x6664) ||
 		    (adev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c902f26..9bffbab 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -46,8 +46,7 @@
 #define I2C_NO_STOP	1
 #define I2C_RESTART	2
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
-#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
 
 static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
 {
@@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
 
 static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
 {
-	struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+	struct amdgpu_device *adev = to_amdgpu_device(i2c);
+	struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
 
 	if (!smu_v11_0_i2c_bus_lock(i2c)) {
 		DRM_ERROR("Failed to lock the bus from SMU");
@@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
 
 static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
 {
-	struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+	struct amdgpu_device *adev = to_amdgpu_device(i2c);
+	struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
 
 	if (!smu_v11_0_i2c_bus_unlock(i2c)) {
 		DRM_ERROR("Failed to unlock the bus from SMU");
@@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
 			      struct i2c_msg *msgs, int num)
 {
 	int i, ret;
-	struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
+	struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
+	struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
 
 	if (!control->bus_locked) {
 		DRM_ERROR("I2C bus unlocked, stopping transaction!");
@@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
 	control->class = I2C_CLASS_SPD;
 	control->dev.parent = &adev->pdev->dev;
 	control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
-	snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+	snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
 	control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
 
 	res = i2c_add_adapter(control);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d8945c3..a40499d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -852,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
 	/* change this when we implement soft reset */
 	return true;
 }
+
+static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
+{
+	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+		return;
+	/*read back hdp ras counter to reset it to 0 */
+	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
 				 uint64_t *count1)
 {
@@ -1019,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
 	.get_config_memsize = &soc15_get_config_memsize,
 	.flush_hdp = &soc15_flush_hdp,
 	.invalidate_hdp = &soc15_invalidate_hdp,
+	.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
 	.need_full_reset = &soc15_need_full_reset,
 	.init_doorbell_index = &vega20_doorbell_index_init,
 	.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1264,6 +1274,10 @@ static int soc15_common_late_init(void *handle)
 	if (amdgpu_sriov_vf(adev))
 		xgpu_ai_mailbox_get_irq(adev);
 
+	if (adev->asic_funcs &&
+	    adev->asic_funcs->reset_hdp_ras_error_count)
+		adev->asic_funcs->reset_hdp_ras_error_count(adev);
+
 	if (adev->nbio.funcs->ras_late_init)
 		r = adev->nbio.funcs->ras_late_init(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index d0fb7a6..b03f950 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -42,6 +42,13 @@ struct soc15_reg_golden {
 	u32	or_mask;
 };
 
+struct soc15_reg_rlcg {
+	u32	hwip;
+	u32	instance;
+	u32	segment;
+	u32	reg;
+};
+
 struct soc15_reg_entry {
 	uint32_t hwip;
 	uint32_t inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 19e870c..c893c64 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -70,10 +70,9 @@
 		}						\
 	} while (0)
 
-#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
 #define WREG32_RLC(reg, value) \
 	do {							\
-		if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) {    \
+		if (amdgpu_sriov_fullaccess(adev)) {    \
 			uint32_t i = 0;	\
 			uint32_t retries = 50000;	\
 			uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0;	\
@@ -98,7 +97,7 @@
 #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
 	do {							\
 		uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
-		if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) {    \
+		if (amdgpu_sriov_fullaccess(adev)) {    \
 			uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2;	\
 			uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3;	\
 			uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;   \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 793bf70..14d3463 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
 	if (rsmu_umc_index_state)
 		umc_v6_1_disable_umc_index_mode(adev);
 
+	if ((adev->asic_type == CHIP_ARCTURUS) &&
+		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+		DRM_WARN("Fail to disable DF-Cstate.\n");
+
 	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
 		umc_reg_offset = get_umc_6_reg_offset(adev,
 						      umc_inst,
@@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
 							  &(err_data->ue_count));
 	}
 
+	if ((adev->asic_type == CHIP_ARCTURUS) &&
+		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+		DRM_WARN("Fail to enable DF-Cstate\n");
+
 	if (rsmu_umc_index_state)
 		umc_v6_1_enable_umc_index_mode(adev);
 }
@@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 			SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
 	}
 
-	/* skip error address process if -ENOMEM */
+	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+	if (mc_umc_status == 0)
+		return;
+
 	if (!err_data->err_addr) {
 		/* clear umc status */
 		WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
@@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 	}
 
 	err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-	mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 
 	/* calculate error address if ue/ce error is detected */
 	if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
 	if (rsmu_umc_index_state)
 		umc_v6_1_disable_umc_index_mode(adev);
 
+	if ((adev->asic_type == CHIP_ARCTURUS) &&
+		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+		DRM_WARN("Fail to disable DF-Cstate.\n");
+
 	LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
 		umc_reg_offset = get_umc_6_reg_offset(adev,
 						      umc_inst,
@@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
 					     umc_inst);
 	}
 
+	if ((adev->asic_type == CHIP_ARCTURUS) &&
+		amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+		DRM_WARN("Fail to enable DF-Cstate\n");
+
 	if (rsmu_umc_index_state)
 		umc_v6_1_enable_umc_index_mode(adev);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index b7f1734..ec8091a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -29,6 +29,7 @@
 #include "soc15d.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_psp.h"
+#include "mmsch_v2_0.h"
 
 #include "vcn/vcn_2_0_0_offset.h"
 #include "vcn/vcn_2_0_0_sh_mask.h"
@@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
 				enum amd_powergating_state state);
 static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
 				int inst_idx, struct dpg_pause_state *new_state);
-
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
 /**
  * vcn_v2_0_early_init - set function pointers
  *
@@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	adev->vcn.num_vcn_inst = 1;
-	adev->vcn.num_enc_rings = 2;
+	if (amdgpu_sriov_vf(adev))
+		adev->vcn.num_enc_rings = 1;
+	else
+		adev->vcn.num_enc_rings = 2;
 
 	vcn_v2_0_set_dec_ring_funcs(adev);
 	vcn_v2_0_set_enc_ring_funcs(adev);
@@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
 	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
 		ring = &adev->vcn.inst->ring_enc[i];
 		ring->use_doorbell = true;
-		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+		if (!amdgpu_sriov_vf(adev))
+			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+		else
+			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
 		sprintf(ring->name, "vcn_enc%d", i);
 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
 		if (r)
@@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
 
 	adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
 
+	r = amdgpu_virt_alloc_mm_table(adev);
+	if (r)
+		return r;
+
 	return 0;
 }
 
@@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)
 	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_virt_free_mm_table(adev);
+
 	r = amdgpu_vcn_suspend(adev);
 	if (r)
 		return r;
@@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
 	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
 					     ring->doorbell_index, 0);
 
+	if (amdgpu_sriov_vf(adev))
+		vcn_v2_0_start_sriov(adev);
+
 	r = amdgpu_ring_test_helper(ring);
 	if (r)
 		goto done;
@@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
 	uint32_t offset;
 
+	if (amdgpu_sriov_vf(adev))
+		return;
+
 	/* cache window 0: fw */
 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
 {
 	uint32_t data;
 
+	if (amdgpu_sriov_vf(adev))
+		return;
+
 	/* UVD disable CGC */
 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
 {
 	uint32_t data = 0;
 
+	if (amdgpu_sriov_vf(adev))
+		return;
+
 	/* enable UVD CGC */
 	data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
 	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
 	uint32_t data = 0;
 	int ret;
 
+	if (amdgpu_sriov_vf(adev))
+		return;
+
 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 		data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
 			| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
@@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
 	uint32_t data = 0;
 	int ret;
 
+	if (amdgpu_sriov_vf(adev))
+		return;
+
 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
 		/* Before power off, this indicator has to be turned on */
 		data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
@@ -1215,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	bool enable = (state == AMD_CG_STATE_GATE);
 
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	if (enable) {
 		/* wait for STATUS to clear */
 		if (!vcn_v2_0_is_idle(handle))
@@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
 	unsigned i;
 	int r;
 
+	if (amdgpu_sriov_vf(adev))
+		return 0;
+
 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
 	r = amdgpu_ring_alloc(ring, 4);
 	if (r)
@@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
 	int ret;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	if (amdgpu_sriov_vf(adev)) {
+		adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+		return 0;
+	}
+
 	if (state == adev->vcn.cur_state)
 		return 0;
 
@@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
 	return ret;
 }
 
+static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
+				struct amdgpu_mm_table *table)
+{
+	uint32_t data = 0, loop;
+	uint64_t addr = table->gpu_addr;
+	struct mmsch_v2_0_init_header *header;
+	uint32_t size;
+	int i;
+
+	header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
+	size = header->header_size + header->vcn_table_size;
+
+	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+	 * of memory descriptor location
+	 */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+	/* 2, update vmid of descriptor */
+	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+	/* use domain0 for MM scheduler */
+	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+	/* 3, notify mmsch about the size of this descriptor */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+	/* 4, set resp to zero */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+	adev->vcn.inst->ring_dec.wptr = 0;
+	adev->vcn.inst->ring_dec.wptr_old = 0;
+	vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
+
+	for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+		adev->vcn.inst->ring_enc[i].wptr = 0;
+		adev->vcn.inst->ring_enc[i].wptr_old = 0;
+		vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
+	}
+
+	/* 5, kick off the initialization and wait until
+	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+	 */
+	WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+	data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+	loop = 1000;
+	while ((data & 0x10000002) != 0x10000002) {
+		udelay(10);
+		data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+		loop--;
+		if (!loop)
+			break;
+	}
+
+	if (!loop) {
+		DRM_ERROR("failed to init MMSCH, " \
+			"mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
+{
+	int r;
+	uint32_t tmp;
+	struct amdgpu_ring *ring;
+	uint32_t offset, size;
+	uint32_t table_size = 0;
+	struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
+	struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
+	struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
+	struct mmsch_v2_0_cmd_end end = { {0} };
+	struct mmsch_v2_0_init_header *header;
+	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+	uint8_t i = 0;
+
+	header = (struct mmsch_v2_0_init_header *)init_table;
+	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+	direct_rd_mod_wt.cmd_header.command_type =
+		MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+	direct_poll.cmd_header.command_type =
+		MMSCH_COMMAND__DIRECT_REG_POLLING;
+	end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+	if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
+		header->version = MMSCH_VERSION;
+		header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
+
+		header->vcn_table_offset = header->header_size;
+
+		init_table += header->vcn_table_offset;
+
+		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+		MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+			0xFFFFFFFF, 0x00000004);
+
+		/* mc resume*/
+		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+			tmp = AMDGPU_UCODE_ID_VCN;
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+				adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+				adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+			offset = 0;
+		} else {
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+				lower_32_bits(adev->vcn.inst->gpu_addr));
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i,
+					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+				upper_32_bits(adev->vcn.inst->gpu_addr));
+			offset = size;
+		}
+
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+			0);
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+			size);
+
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+			lower_32_bits(adev->vcn.inst->gpu_addr + offset));
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+			upper_32_bits(adev->vcn.inst->gpu_addr + offset));
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+			0);
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+			AMDGPU_VCN_STACK_SIZE);
+
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+			lower_32_bits(adev->vcn.inst->gpu_addr + offset +
+				AMDGPU_VCN_STACK_SIZE));
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+			upper_32_bits(adev->vcn.inst->gpu_addr + offset +
+				AMDGPU_VCN_STACK_SIZE));
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+			0);
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+			AMDGPU_VCN_CONTEXT_SIZE);
+
+		for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+			ring = &adev->vcn.inst->ring_enc[r];
+			ring->wptr = 0;
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+				lower_32_bits(ring->gpu_addr));
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+				upper_32_bits(ring->gpu_addr));
+			MMSCH_V2_0_INSERT_DIRECT_WT(
+				SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+				ring->ring_size / 4);
+		}
+
+		ring = &adev->vcn.inst->ring_dec;
+		ring->wptr = 0;
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+			lower_32_bits(ring->gpu_addr));
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i,
+				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+			upper_32_bits(ring->gpu_addr));
+		/* force RBC into idle state */
+		tmp = order_base_2(ring->ring_size);
+		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+		MMSCH_V2_0_INSERT_DIRECT_WT(
+			SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+		/* add end packet */
+		tmp = sizeof(struct mmsch_v2_0_cmd_end);
+		memcpy((void *)init_table, &end, tmp);
+		table_size += (tmp / 4);
+		header->vcn_table_size = table_size;
+
+	}
+	return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
+}
+
 static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
 	.name = "vcn_v2_0",
 	.early_init = vcn_v2_0_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 678253d..c6363f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = {
 static int vcn_v2_5_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-	if (adev->asic_type == CHIP_ARCTURUS) {
-		u32 harvest;
-		int i;
-
-		adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
-		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-			harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
-			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
-				adev->vcn.harvest_config |= 1 << i;
-		}
-
-		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
-						 AMDGPU_VCN_HARVEST_VCN1))
-			/* both instances are harvested, disable the block */
-			return -ENOENT;
-	} else
-		adev->vcn.num_vcn_inst = 1;
 
 	if (amdgpu_sriov_vf(adev)) {
 		adev->vcn.num_vcn_inst = 2;
 		adev->vcn.harvest_config = 0;
 		adev->vcn.num_enc_rings = 1;
 	} else {
+		if (adev->asic_type == CHIP_ARCTURUS) {
+			u32 harvest;
+			int i;
+
+			adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+			for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+				harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+				if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+					adev->vcn.harvest_config |= 1 << i;
+			}
+
+			if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+						AMDGPU_VCN_HARVEST_VCN1))
+				/* both instances are harvested, disable the block */
+				return -ENOENT;
+		} else
+			adev->vcn.num_vcn_inst = 1;
+
 		adev->vcn.num_enc_rings = 2;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 3f0300e..0ec5f25 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
 		return PTR_ERR(process);
 
 	if (kfd_is_locked()) {
+		dev_dbg(kfd_device, "kfd is locked!\n"
+				"process %d unreferenced", process->pasid);
 		kfd_unref_process(process);
 		return -EAGAIN;
 	}
@@ -1167,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
 	if (!dev)
 		return -EINVAL;
 
-	dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+	amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
 
 	args->gb_addr_config = config.gb_addr_config;
 	args->num_banks = config.num_banks;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 2a9e401..d5386f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 	if (kfd->kfd2kgd->get_hive_id)
 		kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
 
+	if (kfd->kfd2kgd->get_unique_id)
+		kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
+
 	if (kfd_interrupt_init(kfd)) {
 		dev_err(kfd_device, "Error initializing interrupts\n");
 		goto kfd_interrupt_error;
@@ -710,7 +713,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 void kgd2kfd_device_exit(struct kfd_dev *kfd)
 {
 	if (kfd->init_complete) {
-		kgd2kfd_suspend(kfd);
+		kgd2kfd_suspend(kfd, false);
 		device_queue_manager_uninit(kfd->dqm);
 		kfd_interrupt_exit(kfd);
 		kfd_topology_remove_device(kfd);
@@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 
 	kfd->dqm->ops.pre_reset(kfd->dqm);
 
-	kgd2kfd_suspend(kfd);
+	kgd2kfd_suspend(kfd, false);
 
 	kfd_signal_reset_event(kfd);
 	return 0;
@@ -765,21 +768,23 @@ bool kfd_is_locked(void)
 	return  (atomic_read(&kfd_locked) > 0);
 }
 
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 	if (!kfd->init_complete)
 		return;
 
-	/* For first KFD device suspend all the KFD processes */
-	if (atomic_inc_return(&kfd_locked) == 1)
-		kfd_suspend_all_processes();
+	/* for runtime suspend, skip locking kfd */
+	if (!run_pm) {
+		/* For first KFD device suspend all the KFD processes */
+		if (atomic_inc_return(&kfd_locked) == 1)
+			kfd_suspend_all_processes();
+	}
 
 	kfd->dqm->ops.stop(kfd->dqm);
-
 	kfd_iommu_suspend(kfd);
 }
 
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
 	int ret, count;
 
@@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
 	if (ret)
 		return ret;
 
-	count = atomic_dec_return(&kfd_locked);
-	WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
-	if (count == 0)
-		ret = kfd_resume_all_processes();
+	/* for runtime resume, skip unlocking kfd */
+	if (!run_pm) {
+		count = atomic_dec_return(&kfd_locked);
+		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+		if (count == 0)
+			ret = kfd_resume_all_processes();
+	}
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 80d22bf..77ea0f0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
 	/* queue is available for KFD usage if bit is 1 */
 	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
 		if (test_bit(pipe_offset + i,
-			      dqm->dev->shared_resources.queue_bitmap))
+			      dqm->dev->shared_resources.cp_queue_bitmap))
 			return true;
 	return false;
 }
 
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
 {
-	return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+	return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
 				KGD_MAX_QUEUES);
 }
 
@@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
 	return dqm->dev->device_info->num_xgmi_sdma_engines;
 }
 
+static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
+{
+	return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+}
+
 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
 {
 	return dqm->dev->device_info->num_sdma_engines
@@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
 						qpd->sh_mem_bases);
 }
 
+void increment_queue_count(struct device_queue_manager *dqm,
+			enum kfd_queue_type type)
+{
+	dqm->active_queue_count++;
+	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+		dqm->active_cp_queue_count++;
+}
+
+void decrement_queue_count(struct device_queue_manager *dqm,
+			enum kfd_queue_type type)
+{
+	dqm->active_queue_count--;
+	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+		dqm->active_cp_queue_count--;
+}
+
 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
 {
 	struct kfd_dev *dev = qpd->dqm->dev;
@@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 	struct mqd_manager *mqd_mgr;
 	int retval;
 
-	print_queue(q);
-
 	dqm_lock(dqm);
 
 	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -359,12 +378,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
 	if (q->properties.is_active)
-		dqm->queue_count++;
-
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
-		dqm->sdma_queue_count++;
-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
-		dqm->xgmi_sdma_queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 
 	/*
 	 * Unconditionally increment this counter, regardless of the queue's
@@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 			q->properties.type)];
 
-	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
 		deallocate_hqd(dqm, q);
-	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-		dqm->sdma_queue_count--;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		deallocate_sdma_queue(dqm, q);
-	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-		dqm->xgmi_sdma_queue_count--;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 		deallocate_sdma_queue(dqm, q);
-	} else {
+	else {
 		pr_debug("q->properties.type %d is invalid\n",
 				q->properties.type);
 		return -EINVAL;
@@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 	}
 	qpd->queue_count--;
 	if (q->properties.is_active)
-		dqm->queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 
 	return retval;
 }
@@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 	/*
 	 * check active state vs. the previous state and modify
 	 * counter accordingly. map_queues_cpsch uses the
-	 * dqm->queue_count to determine whether a new runlist must be
+	 * dqm->active_queue_count to determine whether a new runlist must be
 	 * uploaded.
 	 */
 	if (q->properties.is_active && !prev_active)
-		dqm->queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 	else if (!q->properties.is_active && prev_active)
-		dqm->queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 
 	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
 		retval = map_queues_cpsch(dqm);
@@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 				q->properties.type)];
 		q->properties.is_active = false;
-		dqm->queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 
 		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
 			continue;
@@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
 			continue;
 
 		q->properties.is_active = false;
-		dqm->queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 	}
 	retval = execute_queues_cpsch(dqm,
 				qpd->is_debug ?
@@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
 		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
 				q->properties.type)];
 		q->properties.is_active = true;
-		dqm->queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 
 		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
 			continue;
@@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
 			continue;
 
 		q->properties.is_active = true;
-		dqm->queue_count++;
+		increment_queue_count(dqm, q->properties.type);
 	}
 	retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
 
 	mutex_init(&dqm->lock_hidden);
 	INIT_LIST_HEAD(&dqm->queues);
-	dqm->queue_count = dqm->next_pipe_to_allocate = 0;
-	dqm->sdma_queue_count = 0;
-	dqm->xgmi_sdma_queue_count = 0;
+	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
+	dqm->active_cp_queue_count = 0;
 
 	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
 		int pipe_offset = pipe * get_queues_per_pipe(dqm);
 
 		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
 			if (test_bit(pipe_offset + queue,
-				     dqm->dev->shared_resources.queue_bitmap))
+				     dqm->dev->shared_resources.cp_queue_bitmap))
 				dqm->allocated_queues[pipe] |= 1 << queue;
 	}
 
@@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm)
 {
 	int i;
 
-	WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+	WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
 
 	kfree(dqm->allocated_queues);
 	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
 	int bit;
 
 	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-		if (dqm->sdma_bitmap == 0)
+		if (dqm->sdma_bitmap == 0) {
+			pr_err("No more SDMA queue to allocate\n");
 			return -ENOMEM;
+		}
+
 		bit = __ffs64(dqm->sdma_bitmap);
 		dqm->sdma_bitmap &= ~(1ULL << bit);
 		q->sdma_id = bit;
@@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
 		q->properties.sdma_queue_id = q->sdma_id /
 				get_num_sdma_engines(dqm);
 	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-		if (dqm->xgmi_sdma_bitmap == 0)
+		if (dqm->xgmi_sdma_bitmap == 0) {
+			pr_err("No more XGMI SDMA queue to allocate\n");
 			return -ENOMEM;
+		}
 		bit = __ffs64(dqm->xgmi_sdma_bitmap);
 		dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
 		q->sdma_id = bit;
@@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
 		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
 			/ dqm->dev->shared_resources.num_pipe_per_mec;
 
-		if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+		if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
 			continue;
 
 		/* only acquire queues from the first MEC */
@@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
 
 	mutex_init(&dqm->lock_hidden);
 	INIT_LIST_HEAD(&dqm->queues);
-	dqm->queue_count = dqm->processes_count = 0;
-	dqm->sdma_queue_count = 0;
-	dqm->xgmi_sdma_queue_count = 0;
+	dqm->active_queue_count = dqm->processes_count = 0;
+	dqm->active_cp_queue_count = 0;
+
 	dqm->active_runlist = false;
 	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
 	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
 			dqm->total_queue_count);
 
 	list_add(&kq->list, &qpd->priv_queue_list);
-	dqm->queue_count++;
+	increment_queue_count(dqm, kq->queue->properties.type);
 	qpd->is_debug = true;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
 {
 	dqm_lock(dqm);
 	list_del(&kq->list);
-	dqm->queue_count--;
+	decrement_queue_count(dqm, kq->queue->properties.type);
 	qpd->is_debug = false;
 	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
 	/*
@@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 	list_add(&q->list, &qpd->queues_list);
 	qpd->queue_count++;
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
-		dqm->sdma_queue_count++;
-	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
-		dqm->xgmi_sdma_queue_count++;
-
 	if (q->properties.is_active) {
-		dqm->queue_count++;
+		increment_queue_count(dqm, q->properties.type);
+
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 	}
@@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
 	return 0;
 }
 
-static int unmap_sdma_queues(struct device_queue_manager *dqm)
-{
-	int i, retval = 0;
-
-	for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
-		dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
-		retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
-			KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
-		if (retval)
-			return retval;
-	}
-	return retval;
-}
-
 /* dqm->lock mutex has to be locked before calling this function */
 static int map_queues_cpsch(struct device_queue_manager *dqm)
 {
@@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
 
 	if (!dqm->sched_running)
 		return 0;
-	if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+	if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
 		return 0;
 	if (dqm->active_runlist)
 		return 0;
@@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
 	if (!dqm->active_runlist)
 		return retval;
 
-	pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
-		dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
-
-	if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
-		unmap_sdma_queues(dqm);
-
 	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
 			filter, filter_param, false, 0);
 	if (retval)
@@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
 	deallocate_doorbell(qpd, q);
 
-	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-		dqm->sdma_queue_count--;
+	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 		deallocate_sdma_queue(dqm, q);
-	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-		dqm->xgmi_sdma_queue_count--;
+	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 		deallocate_sdma_queue(dqm, q);
-	}
 
 	list_del(&q->list);
 	qpd->queue_count--;
 	if (q->properties.is_active) {
-		dqm->queue_count--;
+		decrement_queue_count(dqm, q->properties.type);
 		retval = execute_queues_cpsch(dqm,
 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 		if (retval == -ETIME)
@@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 	/* Clean all kernel queues */
 	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
 		list_del(&kq->list);
-		dqm->queue_count--;
+		decrement_queue_count(dqm, kq->queue->properties.type);
 		qpd->is_debug = false;
 		dqm->total_queue_count--;
 		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 
 	/* Clear all user mode queues */
 	list_for_each_entry(q, &qpd->queues_list, list) {
-		if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-			dqm->sdma_queue_count--;
+		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
 			deallocate_sdma_queue(dqm, q);
-		} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-			dqm->xgmi_sdma_queue_count--;
+		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
 			deallocate_sdma_queue(dqm, q);
-		}
 
 		if (q->properties.is_active)
-			dqm->queue_count--;
+			decrement_queue_count(dqm, q->properties.type);
 
 		dqm->total_queue_count--;
 	}
@@ -1742,14 +1728,13 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
 	struct kfd_dev *dev = dqm->dev;
 	struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
 	uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
-		(dev->device_info->num_sdma_engines +
-		dev->device_info->num_xgmi_sdma_engines) *
+		get_num_all_sdma_engines(dqm) *
 		dev->device_info->num_sdma_queues_per_engine +
 		dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
 
 	retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
 		&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
-		(void *)&(mem_obj->cpu_ptr), true);
+		(void *)&(mem_obj->cpu_ptr), false);
 
 	return retval;
 }
@@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
 
 		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
 			if (!test_bit(pipe_offset + queue,
-				      dqm->dev->shared_resources.queue_bitmap))
+				      dqm->dev->shared_resources.cp_queue_bitmap))
 				continue;
 
 			r = dqm->dev->kfd2kgd->hqd_dump(
@@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
 		}
 	}
 
-	for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
-			get_num_xgmi_sdma_engines(dqm); pipe++) {
+	for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
 		for (queue = 0;
 		     queue < dqm->dev->device_info->num_sdma_queues_per_engine;
 		     queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 871d3b6..50d919f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,9 +180,8 @@ struct device_queue_manager {
 	struct list_head	queues;
 	unsigned int		saved_flags;
 	unsigned int		processes_count;
-	unsigned int		queue_count;
-	unsigned int		sdma_queue_count;
-	unsigned int		xgmi_sdma_queue_count;
+	unsigned int		active_queue_count;
+	unsigned int		active_cp_queue_count;
 	unsigned int		total_queue_count;
 	unsigned int		next_pipe_to_allocate;
 	unsigned int		*allocated_queues;
@@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10(
 		struct device_queue_manager_asic_ops *asic_ops);
 void program_sh_mem_settings(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 1f83655..15476fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd,
 	if (p->signal_mapped_size &&
 	    p->signal_event_count == p->signal_mapped_size / 8) {
 		if (!p->signal_event_limit_reached) {
-			pr_warn("Signal event wasn't created because limit was reached\n");
+			pr_debug("Signal event wasn't created because limit was reached\n");
 			p->signal_event_limit_reached = true;
 		}
 		return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index bb77b88..78714f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
 {
 	/*
 	 * node id couldn't be 0 - the three MSB bits of
-	 * aperture shoudn't be 0
+	 * aperture shouldn't be 0
 	 */
 	pdd->lds_base = MAKE_LDS_APP_BASE_VI();
 	pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 436b7f5..48cda30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
 	int retval;
 	struct kfd_mem_obj *mqd_mem_obj = NULL;
 
-	/* From V9,  for CWSR, the control stack is located on the next page
-	 * boundary after the mqd, we will use the gtt allocation function
-	 * instead of sub-allocation function.
+	/* For V9 only, due to a HW bug, the control stack of a user mode
+	 * compute queue needs to be allocated just behind the page boundary
+	 * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
+	 * the first page of the buffer serves as the regular MQD buffer
+	 * purpose and the remaining is for control stack. Although the two
+	 * parts are in the same buffer object, they need different memory
+	 * types: MQD part needs UC (uncached) as usual, while control stack
+	 * needs NC (non coherent), which is different from the UC type which
+	 * is used when control stack is allocated in user space.
+	 *
+	 * Because of all those, we use the gtt allocation function instead
+	 * of sub-allocation function for this enlarged MQD buffer. Moreover,
+	 * in order to achieve two memory types in a single buffer object, we
+	 * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
+	 * amdgpu memory functions to do so.
 	 */
 	if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
 		mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index dc406e6..efdb75e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 	struct kfd_dev *dev = pm->dqm->dev;
 
 	process_count = pm->dqm->processes_count;
-	queue_count = pm->dqm->queue_count;
-	compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
-				pm->dqm->xgmi_sdma_queue_count;
+	queue_count = pm->dqm->active_queue_count;
+	compute_queue_count = pm->dqm->active_cp_queue_count;
 
 	/* check if there is over subscription
 	 * Note: the arbitration between the number of VMIDs and
@@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
 		max_proc_per_quantum = dev->max_proc_per_quantum;
 
 	if ((process_count > max_proc_per_quantum) ||
-	    compute_queue_count > get_queues_num(pm->dqm)) {
+	    compute_queue_count > get_cp_queues_num(pm->dqm)) {
 		*over_subscription = true;
 		pr_debug("Over subscribed runlist\n");
 	}
@@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
 	pm->ib_size_bytes = alloc_size_bytes;
 
 	pr_debug("Building runlist ib process count: %d queues count %d\n",
-		pm->dqm->processes_count, pm->dqm->queue_count);
+		pm->dqm->processes_count, pm->dqm->active_queue_count);
 
 	/* build the run list ib packet */
 	list_for_each_entry(cur, queues, list) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6af1b58..4a30498 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -41,6 +41,7 @@
 #include <drm/drm_drv.h>
 #include <drm/drm_device.h>
 #include <kgd_kfd_interface.h>
+#include <linux/swap.h>
 
 #include "amd_shared.h"
 
@@ -293,6 +294,9 @@ struct kfd_dev {
 
 	/* xGMI */
 	uint64_t hive_id;
+    
+	/* UUID */
+	uint64_t unique_id;
 
 	bool pci_atomic_requested;
 
@@ -502,6 +506,9 @@ struct queue {
 	struct kfd_process	*process;
 	struct kfd_dev		*device;
 	void *gws;
+
+	/* procfs */
+	struct kobject kobj;
 };
 
 /*
@@ -646,6 +653,7 @@ struct kfd_process_device {
 	 * function.
 	 */
 	bool already_dequeued;
+	bool runtime_inuse;
 
 	/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
 	enum kfd_pdd_bound bound;
@@ -729,6 +737,7 @@ struct kfd_process {
 
 	/* Kobj for our procfs */
 	struct kobject *kobj;
+	struct kobject *kobj_queues;
 	struct attribute attr_pasid;
 };
 
@@ -835,6 +844,8 @@ extern struct device *kfd_device;
 /* KFD's procfs */
 void kfd_procfs_init(void);
 void kfd_procfs_shutdown(void);
+int kfd_procfs_add_queue(struct queue *q);
+void kfd_procfs_del_queue(struct queue *q);
 
 /* Topology */
 int kfd_topology_init(void);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 25b90f7..fe0cd49 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
 #include <linux/compat.h>
 #include <linux/mman.h>
 #include <linux/file.h>
+#include <linux/pm_runtime.h>
 #include "amdgpu_amdkfd.h"
 #include "amdgpu.h"
 
@@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void)
 	}
 }
 
+static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
+				     struct attribute *attr, char *buffer)
+{
+	struct queue *q = container_of(kobj, struct queue, kobj);
+
+	if (!strcmp(attr->name, "size"))
+		return snprintf(buffer, PAGE_SIZE, "%llu",
+				q->properties.queue_size);
+	else if (!strcmp(attr->name, "type"))
+		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
+	else if (!strcmp(attr->name, "gpuid"))
+		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
+	else
+		pr_err("Invalid attribute");
+
+	return 0;
+}
+
+static struct attribute attr_queue_size = {
+	.name = "size",
+	.mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_type = {
+	.name = "type",
+	.mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_gpuid = {
+	.name = "gpuid",
+	.mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute *procfs_queue_attrs[] = {
+	&attr_queue_size,
+	&attr_queue_type,
+	&attr_queue_gpuid,
+	NULL
+};
+
+static const struct sysfs_ops procfs_queue_ops = {
+	.show = kfd_procfs_queue_show,
+};
+
+static struct kobj_type procfs_queue_type = {
+	.sysfs_ops = &procfs_queue_ops,
+	.default_attrs = procfs_queue_attrs,
+};
+
+int kfd_procfs_add_queue(struct queue *q)
+{
+	struct kfd_process *proc;
+	int ret;
+
+	if (!q || !q->process)
+		return -EINVAL;
+	proc = q->process;
+
+	/* Create proc/<pid>/queues/<queue id> folder */
+	if (!proc->kobj_queues)
+		return -EFAULT;
+	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
+			proc->kobj_queues, "%u", q->properties.queue_id);
+	if (ret < 0) {
+		pr_warn("Creating proc/<pid>/queues/%u failed",
+			q->properties.queue_id);
+		kobject_put(&q->kobj);
+		return ret;
+	}
+
+	return 0;
+}
+
+void kfd_procfs_del_queue(struct queue *q)
+{
+	if (!q)
+		return;
+
+	kobject_del(&q->kobj);
+	kobject_put(&q->kobj);
+}
+
 int kfd_process_create_wq(void)
 {
 	if (!kfd_process_wq)
@@ -244,10 +327,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
 {
 	struct qcm_process_device *qpd = &pdd->qpd;
-	uint32_t flags = ALLOC_MEM_FLAGS_GTT |
-			 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
-			 ALLOC_MEM_FLAGS_WRITABLE |
-			 ALLOC_MEM_FLAGS_EXECUTABLE;
+	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
+			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 	void *kaddr;
 	int ret;
 
@@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
 		if (ret)
 			pr_warn("Creating pasid for pid %d failed",
 					(int)process->lead_thread->pid);
+
+		process->kobj_queues = kobject_create_and_add("queues",
+							process->kobj);
+		if (!process->kobj_queues)
+			pr_warn("Creating KFD proc/queues folder failed");
 	}
 out:
 	if (!IS_ERR(process))
@@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
 		kfree(pdd->qpd.doorbell_bitmap);
 		idr_destroy(&pdd->alloc_idr);
 
+		/*
+		 * before destroying pdd, make sure to report availability
+		 * for auto suspend
+		 */
+		if (pdd->runtime_inuse) {
+			pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
+			pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
+			pdd->runtime_inuse = false;
+		}
+
 		kfree(pdd);
 	}
 }
@@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work)
 	/* Remove the procfs files */
 	if (p->kobj) {
 		sysfs_remove_file(p->kobj, &p->attr_pasid);
+		kobject_del(p->kobj_queues);
+		kobject_put(p->kobj_queues);
+		p->kobj_queues = NULL;
 		kobject_del(p->kobj);
 		kobject_put(p->kobj);
 		p->kobj = NULL;
@@ -540,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
 
 	/* Indicate to other users that MM is no longer valid */
 	p->mm = NULL;
+	/* Signal the eviction fence after user mode queues are
+	 * destroyed. This allows any BOs to be freed without
+	 * triggering pointless evictions or waiting for fences.
+	 */
+	dma_fence_signal(p->ef);
 
 	mutex_unlock(&p->mutex);
 
@@ -591,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
 {
 	struct kfd_dev *dev = pdd->dev;
 	struct qcm_process_device *qpd = &pdd->qpd;
-	uint32_t flags = ALLOC_MEM_FLAGS_GTT |
-		ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
+			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
+			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 	void *kaddr;
 	int ret;
 
@@ -754,6 +861,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
 	pdd->process = p;
 	pdd->bound = PDD_UNBOUND;
 	pdd->already_dequeued = false;
+	pdd->runtime_inuse = false;
 	list_add(&pdd->per_device_list, &p->per_device_data);
 
 	/* Init idr used for memory handle translation */
@@ -843,15 +951,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
 		return ERR_PTR(-ENOMEM);
 	}
 
+	/*
+	 * signal runtime-pm system to auto resume and prevent
+	 * further runtime suspend once device pdd is created until
+	 * pdd is destroyed.
+	 */
+	if (!pdd->runtime_inuse) {
+		err = pm_runtime_get_sync(dev->ddev->dev);
+		if (err < 0)
+			return ERR_PTR(err);
+	}
+
 	err = kfd_iommu_bind_process_to_device(pdd);
 	if (err)
-		return ERR_PTR(err);
+		goto out;
 
 	err = kfd_process_device_init_vm(pdd, NULL);
 	if (err)
-		return ERR_PTR(err);
+		goto out;
+
+	/*
+	 * make sure that runtime_usage counter is incremented just once
+	 * per pdd
+	 */
+	pdd->runtime_inuse = true;
 
 	return pdd;
+
+out:
+	/* balance runpm reference count and exit with error */
+	if (!pdd->runtime_inuse) {
+		pm_runtime_mark_last_busy(dev->ddev->dev);
+		pm_runtime_put_autosuspend(dev->ddev->dev);
+	}
+
+	return ERR_PTR(err);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 31fcd1b..084c35f5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	switch (type) {
 	case KFD_QUEUE_TYPE_SDMA:
 	case KFD_QUEUE_TYPE_SDMA_XGMI:
-		if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
-			>= get_num_sdma_queues(dev->dqm)) ||
-			(type == KFD_QUEUE_TYPE_SDMA_XGMI &&
-			dev->dqm->xgmi_sdma_queue_count
-			>= get_num_xgmi_sdma_queues(dev->dqm))) {
-			pr_debug("Over-subscription is not allowed for SDMA.\n");
-			retval = -EPERM;
-			goto err_create_queue;
-		}
-
+		/* SDMA queues are always allocated statically no matter
+		 * which scheduler mode is used. We also do not need to
+		 * check whether a SDMA queue can be allocated here, because
+		 * allocate_sdma_queue() in create_queue() has the
+		 * corresponding check logic.
+		 */
 		retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
 		if (retval != 0)
 			goto err_create_queue;
 		pqn->q = q;
 		pqn->kq = NULL;
 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
-		pr_debug("DQM returned %d for create_queue\n", retval);
 		print_queue(q);
 		break;
 
@@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		if ((dev->dqm->sched_policy ==
 		     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
 		((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
-		(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+		(dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
 			pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
 			retval = -EPERM;
 			goto err_create_queue;
@@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 		pqn->q = q;
 		pqn->kq = NULL;
 		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
-		pr_debug("DQM returned %d for create_queue\n", retval);
 		print_queue(q);
 		break;
 	case KFD_QUEUE_TYPE_DIQ:
@@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 	}
 
 	if (retval != 0) {
-		pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
+		pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
 			pqm->process->pasid, type, retval);
 		goto err_create_queue;
 	}
@@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 
 	if (q) {
 		pr_debug("PQM done creating queue\n");
+		kfd_procfs_add_queue(q);
 		print_queue_properties(&q->properties);
 	}
 
 	return retval;
 
 err_create_queue:
+	uninit_queue(q);
+	if (kq)
+		kernel_queue_uninit(kq, false);
 	kfree(pqn);
 err_allocate_pqn:
 	/* check if queues list is empty unregister process from device */
@@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
 	}
 
 	if (pqn->q) {
+		kfd_procfs_del_queue(pqn->q);
 		dqm = pqn->q->device->dqm;
 		retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
 		if (retval) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 203c823..aa0bfa7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
 			dev->node_props.num_sdma_queues_per_engine);
 	sysfs_show_32bit_prop(buffer, "num_cp_queues",
 			dev->node_props.num_cp_queues);
+	sysfs_show_64bit_prop(buffer, "unique_id",
+			dev->node_props.unique_id);
 
 	if (dev->gpu) {
 		log_max_watch_addr =
@@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
 	dev->node_props.num_gws = (hws_gws_support &&
 		dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
 		amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
-	dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+	dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
+	dev->node_props.unique_id = gpu->unique_id;
 
 	kfd_fill_mem_clk_max_info(dev);
 	kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 74e9b16..46eeecaf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -54,6 +54,7 @@
 
 struct kfd_node_properties {
 	uint64_t hive_id;
+	uint64_t unique_id;
 	uint32_t cpu_cores_count;
 	uint32_t simd_count;
 	uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6240259..d3674d8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 
+#define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
 /* Number of bytes in PSP header for firmware. */
 #define PSP_HEADER_BYTES 0x100
 
@@ -129,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
 /* removes and deallocates the drm structures, created by the above function */
 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
 
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
-
 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
 				struct drm_plane *plane,
 				unsigned long possible_crtcs,
@@ -383,8 +383,8 @@ static void dm_pflip_high_irq(void *interrupt_params)
 	 * of pageflip completion, so last_flip_vblank is the forbidden count
 	 * for queueing new pageflips if vsync + VRR is enabled.
 	 */
-	amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
-							amdgpu_crtc->crtc_id);
+	amdgpu_crtc->last_flip_vblank =
+		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
 
 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
 	if (acrtc) {
 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
 
-		DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
-				 amdgpu_dm_vrr_active(acrtc_state));
+		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+			      acrtc->crtc_id,
+			      amdgpu_dm_vrr_active(acrtc_state));
 
 		/* Core vblank handling is done here after end of front-porch in
 		 * vrr mode, as vblank timestamping will give valid results
@@ -458,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
 	if (acrtc) {
 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
 
-		DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
-				 amdgpu_dm_vrr_active(acrtc_state));
+		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+			      acrtc->crtc_id,
+			      amdgpu_dm_vrr_active(acrtc_state));
 
 		/* Core vblank handling at start of front-porch is only possible
 		 * in non-vrr mode, as only there vblank timestamping will give
@@ -522,7 +524,7 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
 
 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
 
-	DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
 			 amdgpu_dm_vrr_active(acrtc_state),
 			 acrtc_state->active_planes);
 
@@ -813,10 +815,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 
 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
 
-	memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
-	       fw_inst_const_size);
+	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+	 * amdgpu_ucode_init_single_fw will load dmub firmware
+	 * fw_inst_const part to cw0; otherwise, the firmware back door load
+	 * will be done by dm_dmub_hw_init
+	 */
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+				fw_inst_const_size);
+	}
+
 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
 	       fw_bss_data_size);
+
+	/* Copy firmware bios info into FB memory. */
 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
 	       adev->bios_size);
 
@@ -835,6 +847,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 	hw_params.fb_base = adev->gmc.fb_start;
 	hw_params.fb_offset = adev->gmc.aper_base;
 
+	/* backdoor load firmware and trigger dmub running */
+	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+		hw_params.load_inst_const = true;
+
 	if (dmcu)
 		hw_params.psp_version = dmcu->psp_version;
 
@@ -897,7 +913,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 	init_data.asic_id.chip_family = adev->family;
 
-	init_data.asic_id.pci_revision_id = adev->rev_id;
+	init_data.asic_id.pci_revision_id = adev->pdev->revision;
 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
 
 	init_data.asic_id.vram_width = adev->gmc.vram_width;
@@ -972,7 +988,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 	if (adev->asic_type >= CHIP_RAVEN) {
-		adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
 
 		if (!adev->dm.hdcp_workqueue)
 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
@@ -1003,11 +1019,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 		goto error;
 	}
 
-#if defined(CONFIG_DEBUG_FS)
-	if (dtn_debugfs_init(adev))
-		DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
-#endif
-
 	DRM_DEBUG_DRIVER("KMS initialized.\n");
 
 	return 0;
@@ -1091,9 +1102,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
 	case CHIP_VEGA20:
 	case CHIP_NAVI10:
 	case CHIP_NAVI14:
-	case CHIP_NAVI12:
 	case CHIP_RENOIR:
 		return 0;
+	case CHIP_NAVI12:
+		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+		break;
 	case CHIP_RAVEN:
 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@ -1204,23 +1217,22 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
 		return 0;
 	}
 
-	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
-		DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
-		return 0;
+	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+
+	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+			AMDGPU_UCODE_ID_DMCUB;
+		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+			adev->dm.dmub_fw;
+		adev->firmware.fw_size +=
+			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+			 adev->dm.dmcub_fw_version);
 	}
 
-	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
-	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
-		AMDGPU_UCODE_ID_DMCUB;
-	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
-	adev->firmware.fw_size +=
-		ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
-
 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
-	DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
-		 adev->dm.dmcub_fw_version);
-
 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
 	dmub_srv = adev->dm.dmub_srv;
 
@@ -1839,8 +1851,63 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
 };
 
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+	u32 max_cll, min_cll, max, min, q, r;
+	struct amdgpu_dm_backlight_caps *caps;
+	struct amdgpu_display_manager *dm;
+	struct drm_connector *conn_base;
+	struct amdgpu_device *adev;
+	static const u8 pre_computed_values[] = {
+		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+
+	if (!aconnector || !aconnector->dc_link)
+		return;
+
+	conn_base = &aconnector->base;
+	adev = conn_base->dev->dev_private;
+	dm = &adev->dm;
+	caps = &dm->backlight_caps;
+	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+	caps->aux_support = false;
+	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
+
+	if (caps->ext_caps->bits.oled == 1 ||
+	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+		caps->aux_support = true;
+
+	/* From the specification (CTA-861-G), for calculating the maximum
+	 * luminance we need to use:
+	 *	Luminance = 50*2**(CV/32)
+	 * Where CV is a one-byte value.
+	 * For calculating this expression we may need float point precision;
+	 * to avoid this complexity level, we take advantage that CV is divided
+	 * by a constant. From the Euclids division algorithm, we know that CV
+	 * can be written as: CV = 32*q + r. Next, we replace CV in the
+	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+	 * need to pre-compute the value of r/32. For pre-computing the values
+	 * We just used the following Ruby line:
+	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+	 * The results of the above expressions can be verified at
+	 * pre_computed_values.
+	 */
+	q = max_cll >> 5;
+	r = max_cll % 32;
+	max = (1 << q) * pre_computed_values[r];
+
+	// min luminance: maxLum * (CV/255)^2 / 100
+	q = DIV_ROUND_CLOSEST(min_cll, 255);
+	min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+	caps->aux_max_input_signal = max;
+	caps->aux_min_input_signal = min;
+}
+
+void amdgpu_dm_update_connector_after_detect(
+		struct amdgpu_dm_connector *aconnector)
 {
 	struct drm_connector *connector = &aconnector->base;
 	struct drm_device *dev = connector->dev;
@@ -1953,7 +2020,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
 					    aconnector->edid);
 		}
 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
-
+		update_connector_ext_caps(aconnector);
 	} else {
 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
 		amdgpu_dm_update_freesync_caps(connector, NULL);
@@ -2169,10 +2236,10 @@ static void handle_hpd_rx_irq(void *param)
 		}
 	}
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-	    if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
-		    if (adev->dm.hdcp_workqueue)
-			    hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
-	    }
+	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+		if (adev->dm.hdcp_workqueue)
+			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
+	}
 #endif
 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
 	    (dc_link->type == dc_connection_mst_branch))
@@ -2567,6 +2634,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
 
 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
 
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -2581,9 +2649,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
 
 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
 	if (caps.caps_valid) {
+		dm->backlight_caps.caps_valid = true;
+		if (caps.aux_support)
+			return;
 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
-		dm->backlight_caps.caps_valid = true;
 	} else {
 		dm->backlight_caps.min_input_signal =
 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
@@ -2591,40 +2661,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
 	}
 #else
+	if (dm->backlight_caps.aux_support)
+		return;
+
 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
 #endif
 }
 
+static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
+{
+	bool rc;
+
+	if (!link)
+		return 1;
+
+	rc = dc_link_set_backlight_level_nits(link, true, brightness,
+					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+
+	return rc ? 0 : 1;
+}
+
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+			      const uint32_t user_brightness)
+{
+	u32 min, max, conversion_pace;
+	u32 brightness = user_brightness;
+
+	if (!caps)
+		goto out;
+
+	if (!caps->aux_support) {
+		max = caps->max_input_signal;
+		min = caps->min_input_signal;
+		/*
+		 * The brightness input is in the range 0-255
+		 * It needs to be rescaled to be between the
+		 * requested min and max input signal
+		 * It also needs to be scaled up by 0x101 to
+		 * match the DC interface which has a range of
+		 * 0 to 0xffff
+		 */
+		conversion_pace = 0x101;
+		brightness =
+			user_brightness
+			* conversion_pace
+			* (max - min)
+			/ AMDGPU_MAX_BL_LEVEL
+			+ min * conversion_pace;
+	} else {
+		/* TODO
+		 * We are doing a linear interpolation here, which is OK but
+		 * does not provide the optimal result. We probably want
+		 * something close to the Perceptual Quantizer (PQ) curve.
+		 */
+		max = caps->aux_max_input_signal;
+		min = caps->aux_min_input_signal;
+
+		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
+			       + user_brightness * max;
+		// Multiple the value by 1000 since we use millinits
+		brightness *= 1000;
+		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+	}
+
+out:
+	return brightness;
+}
+
 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
 	struct amdgpu_display_manager *dm = bl_get_data(bd);
 	struct amdgpu_dm_backlight_caps caps;
-	uint32_t brightness = bd->props.brightness;
+	struct dc_link *link = NULL;
+	u32 brightness;
+	bool rc;
 
 	amdgpu_dm_update_backlight_caps(dm);
 	caps = dm->backlight_caps;
-	/*
-	 * The brightness input is in the range 0-255
-	 * It needs to be rescaled to be between the
-	 * requested min and max input signal
-	 *
-	 * It also needs to be scaled up by 0x101 to
-	 * match the DC interface which has a range of
-	 * 0 to 0xffff
-	 */
-	brightness =
-		brightness
-		* 0x101
-		* (caps.max_input_signal - caps.min_input_signal)
-		/ AMDGPU_MAX_BL_LEVEL
-		+ caps.min_input_signal * 0x101;
 
-	if (dc_link_set_backlight_level(dm->backlight_link,
-			brightness, 0))
-		return 0;
-	else
-		return 1;
+	link = (struct dc_link *)dm->backlight_link;
+
+	brightness = convert_brightness(&caps, bd->props.brightness);
+	// Change brightness based on AUX property
+	if (caps.aux_support)
+		return set_backlight_via_aux(link, brightness);
+
+	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+
+	return rc ? 0 : 1;
 }
 
 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -2909,6 +3034,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
 
+	/* No userspace support. */
+	dm->dc->debug.disable_tri_buf = true;
+
 	return 0;
 fail:
 	kfree(aencoder);
@@ -4200,9 +4328,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
 
 			stream->psr_version = dmcu->dmcu_version.psr_version;
-			mod_build_vsc_infopacket(stream,
-					&stream->vsc_infopacket,
-					&stream->use_vsc_sdp_for_colorimetry);
+
+			//
+			// should decide stream support vsc sdp colorimetry capability
+			// before building vsc info packet
+			//
+			stream->use_vsc_sdp_for_colorimetry = false;
+			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+				stream->use_vsc_sdp_for_colorimetry =
+					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+			} else {
+				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+					stream->use_vsc_sdp_for_colorimetry = true;
+				}
+			}
+			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
 		}
 	}
 finish:
@@ -4352,8 +4493,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
+	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
 	.enable_vblank = dm_enable_vblank,
 	.disable_vblank = dm_disable_vblank,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static enum drm_connector_status
@@ -4574,6 +4717,19 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 	return &new_state->base;
 }
 
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *amdgpu_dm_connector =
+		to_amdgpu_dm_connector(connector);
+
+#if defined(CONFIG_DEBUG_FS)
+	connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+	return 0;
+}
+
 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
 	.reset = amdgpu_dm_connector_funcs_reset,
 	.detect = amdgpu_dm_connector_detect,
@@ -4583,6 +4739,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+	.late_register = amdgpu_dm_connector_late_register,
 	.early_unregister = amdgpu_dm_connector_unregister
 };
 
@@ -4959,7 +5116,8 @@ static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
 	.disable = dm_crtc_helper_disable,
 	.atomic_check = dm_crtc_helper_atomic_check,
-	.mode_fixup = dm_crtc_helper_mode_fixup
+	.mode_fixup = dm_crtc_helper_mode_fixup,
+	.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
@@ -5922,13 +6080,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
 	drm_connector_attach_encoder(
 		&aconnector->base, &aencoder->base);
 
-	drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
-	connector_debugfs_init(aconnector);
-	aconnector->debugfs_dpcd_address = 0;
-	aconnector->debugfs_dpcd_size = 0;
-#endif
-
 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
@@ -6414,7 +6565,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 	uint32_t target_vblank, last_flip_vblank;
 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
 	bool pflip_present = false;
-	bool swizzle = true;
 	struct {
 		struct dc_surface_update surface_updates[MAX_SURFACES];
 		struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -6460,9 +6610,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
 		dc_plane = dm_new_plane_state->dc_state;
 
-		if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
-			swizzle = false;
-
 		bundle->surface_updates[planes_count].surface = dc_plane;
 		if (new_pcrtc_state->color_mgmt_changed) {
 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -6563,7 +6710,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			 * clients using the GLX_OML_sync_control extension or
 			 * DRI3/Present extension with defined target_msc.
 			 */
-			last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
+			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
 		}
 		else {
 			/* For variable refresh rate mode only:
@@ -6592,7 +6739,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 			(int)(target_vblank -
-			  amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
+			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
 			usleep_range(1000, 1100);
 		}
 
@@ -6671,8 +6818,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
 						acrtc_state->stream->link->psr_feature_enabled &&
-						!acrtc_state->stream->link->psr_allow_active &&
-						swizzle) {
+						!acrtc_state->stream->link->psr_allow_active) {
 			amdgpu_dm_psr_enable(acrtc_state->stream);
 		}
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 7ea9acb..5cab3e6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -90,15 +90,41 @@ struct dm_comressor_info {
 };
 
 /**
- * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
- * @min_input_signal: minimum possible input in range 0-255
- * @max_input_signal: maximum possible input in range 0-255
- * @caps_valid: true if these values are from the ACPI interface
+ * struct amdgpu_dm_backlight_caps - Information about backlight
+ *
+ * Describe the backlight support for ACPI or eDP AUX.
  */
 struct amdgpu_dm_backlight_caps {
+	/**
+	 * @ext_caps: Keep the data struct with all the information about the
+	 * display support for HDR.
+	 */
+	union dpcd_sink_ext_caps *ext_caps;
+	/**
+	 * @aux_min_input_signal: Min brightness value supported by the display
+	 */
+	u32 aux_min_input_signal;
+	/**
+	 * @aux_max_input_signal: Max brightness value supported by the display
+	 * in nits.
+	 */
+	u32 aux_max_input_signal;
+	/**
+	 * @min_input_signal: minimum possible input in range 0-255.
+	 */
 	int min_input_signal;
+	/**
+	 * @max_input_signal: maximum possible input in range 0-255.
+	 */
 	int max_input_signal;
+	/**
+	 * @caps_valid: true if these values are from the ACPI interface.
+	 */
 	bool caps_valid;
+	/**
+	 * @aux_support: Describes if the display supports AUX backlight.
+	 */
+	bool aux_support;
 };
 
 /**
@@ -457,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
 				      struct dc_plane_state *dc_plane_state);
 
+void amdgpu_dm_update_connector_after_detect(
+		struct amdgpu_dm_connector *aconnector);
+
 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
 
 #endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f81d343..0461fec 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,6 +32,19 @@
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_debugfs.h"
 #include "dm_helpers.h"
+#include "dmub/inc/dmub_srv.h"
+
+struct dmub_debugfs_trace_header {
+	uint32_t entry_count;
+	uint32_t reserved[3];
+};
+
+struct dmub_debugfs_trace_entry {
+	uint32_t trace_code;
+	uint32_t tick_count;
+	uint32_t param0;
+	uint32_t param1;
+};
 
 /* function description
  * get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
 	return bytes_from_user;
 }
 
+/**
+ * Returns the DMCUB tracebuffer contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
+ */
+static int dmub_tracebuffer_show(struct seq_file *m, void *data)
+{
+	struct amdgpu_device *adev = m->private;
+	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+	struct dmub_debugfs_trace_entry *entries;
+	uint8_t *tbuf_base;
+	uint32_t tbuf_size, max_entries, num_entries, i;
+
+	if (!fb_info)
+		return 0;
+
+	tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
+	if (!tbuf_base)
+		return 0;
+
+	tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+	max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
+		      sizeof(struct dmub_debugfs_trace_entry);
+
+	num_entries =
+		((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+
+	num_entries = min(num_entries, max_entries);
+
+	entries = (struct dmub_debugfs_trace_entry
+			   *)(tbuf_base +
+			      sizeof(struct dmub_debugfs_trace_header));
+
+	for (i = 0; i < num_entries; ++i) {
+		struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+		seq_printf(m,
+			   "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+			   entry->trace_code, entry->tick_count, entry->param0,
+			   entry->param1);
+	}
+
+	return 0;
+}
+
+/**
+ * Returns the DMCUB firmware state contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
+ */
+static int dmub_fw_state_show(struct seq_file *m, void *data)
+{
+	struct amdgpu_device *adev = m->private;
+	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+	uint8_t *state_base;
+	uint32_t state_size;
+
+	if (!fb_info)
+		return 0;
+
+	state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
+	if (!state_base)
+		return 0;
+
+	state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
+
+	return seq_write(m, state_base, state_size);
+}
+
 /*
  * Returns the current and maximum output bpc for the connector.
  * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
 	return read_size - r;
 }
 
+DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
+DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
 DEFINE_SHOW_ATTRIBUTE(vrr_range);
 
@@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
 	debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
 				   &force_yuv420_output_fops);
 
+	connector->debugfs_dpcd_address = 0;
+	connector->debugfs_dpcd_size = 0;
+
 }
 
 /*
@@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
 	debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
 				   &visual_confirm_fops);
 
+	debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
+				   adev, &dmub_tracebuffer_fops);
+
+	debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
+				   adev, &dmub_fw_state_fops);
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 0acd340..5b70ed3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -28,6 +28,13 @@
 #include "amdgpu_dm.h"
 #include "dm_helpers.h"
 #include <drm/drm_hdcp.h>
+#include "hdcp_psp.h"
+
+/*
+ * If the SRM version being loaded is less than or equal to the
+ * currently loaded SRM, psp will return 0xFFFF as the version
+ */
+#define PSP_SRM_VERSION_MAX 0xFFFF
 
 static bool
 lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
@@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
 	return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
 }
 
+static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
+{
+
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+
+	if (!psp->hdcp_context.hdcp_initialized) {
+		DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+		return NULL;
+	}
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+		return NULL;
+
+	*srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
+	*srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
+
+
+	return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
+}
+
+static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+{
+
+	struct ta_hdcp_shared_memory *hdcp_cmd;
+
+	if (!psp->hdcp_context.hdcp_initialized) {
+		DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+		return -EINVAL;
+	}
+
+	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+	memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
+	hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
+	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
+
+	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+	    hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
+		return -EINVAL;
+
+	*srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
+	return 0;
+}
+
 static void process_output(struct hdcp_workqueue *hdcp_work)
 {
 	struct mod_hdcp_output output = hdcp_work->output;
@@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
 	schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
 }
 
+static void link_lock(struct hdcp_workqueue *work, bool lock)
+{
+
+	int i = 0;
+
+	for (i = 0; i < work->max_link; i++) {
+		if (lock)
+			mutex_lock(&work[i].mutex);
+		else
+			mutex_unlock(&work[i].mutex);
+	}
+}
 void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
 			 unsigned int link_index,
 			 struct amdgpu_dm_connector *aconnector,
@@ -112,6 +184,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
 		hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
 
 		if (enable_encryption) {
+			/* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
+			 * (s3 resume case)
+			 */
+			if (hdcp_work->srm_size > 0)
+				psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+					    &hdcp_work->srm_version);
+
 			display->adjust.disable = 0;
 			if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
 				hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -301,8 +380,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
 		cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
 	}
 
+	kfree(hdcp_work->srm);
+	kfree(hdcp_work->srm_temp);
 	kfree(hdcp_work);
-
 }
 
 static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -332,26 +412,170 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
 	link->dig_be = config->link_enc_inst;
 	link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
 	link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+	link->dp.mst_supported = config->mst_supported;
 	display->adjust.disable = 1;
 	link->adjust.auth_delay = 2;
 
 	hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
 }
 
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+
+/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+ *      will automatically call once or twice depending on the size
+ *
+ * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
+ *
+ * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
+ * srm_data_write can be called multiple times.
+ *
+ * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
+ * the last call we will send the full SRM. PSP will fail on every call before the last.
+ *
+ * This means we don't know if the SRM is good until the last call. And because of this limitation we
+ * cannot throw errors early as it will stop the kernel from writing to sysfs
+ *
+ * Example 1:
+ * 	Good SRM size = 5096
+ * 	first call to write 4096 -> PSP fails
+ * 	Second call to write 1000 -> PSP Pass -> SRM is set
+ *
+ * Example 2:
+ * 	Bad SRM size = 4096
+ * 	first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * 	is the last call)
+ *
+ * Solution?:
+ * 	1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 	2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * 	below
+ *
+ * Easy Solution:
+ * Always call get after Set to verify if set was successful.
+ * +----------------------+
+ * |   Why it works:      |
+ * +----------------------+
+ * PSP will only update its srm if its older than the one we are trying to load.
+ * Always do set first than get.
+ * 	-if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * 	version and save it
+ *
+ * 	-if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * 	same(newer) version back and save it
+ *
+ * 	-if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * 	incorrect/corrupted and we should correct our SRM by getting it from PSP
+ */
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+			      loff_t pos, size_t count)
+{
+	struct hdcp_workqueue *work;
+	uint32_t srm_version = 0;
+
+	work = container_of(bin_attr, struct hdcp_workqueue, attr);
+	link_lock(work, true);
+
+	memcpy(work->srm_temp + pos, buffer, count);
+
+	if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
+		DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
+		memcpy(work->srm, work->srm_temp, pos + count);
+		work->srm_size = pos + count;
+		work->srm_version = srm_version;
+	}
+
+
+	link_lock(work, false);
+
+	return count;
+}
+
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+			     loff_t pos, size_t count)
+{
+	struct hdcp_workqueue *work;
+	uint8_t *srm = NULL;
+	uint32_t srm_version;
+	uint32_t srm_size;
+	size_t ret = count;
+
+	work = container_of(bin_attr, struct hdcp_workqueue, attr);
+
+	link_lock(work, true);
+
+	srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
+
+	if (!srm)
+		return -EINVAL;
+
+	if (pos >= srm_size)
+		ret = 0;
+
+	if (srm_size - pos < count) {
+		memcpy(buffer, srm + pos, srm_size - pos);
+		ret = srm_size - pos;
+		goto ret;
+	}
+
+	memcpy(buffer, srm + pos, count);
+
+ret:
+	link_lock(work, false);
+	return ret;
+}
+
+/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
+ *
+ * For example,
+ * 	if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * 	needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * 	across boot/reboots/suspend/resume/shutdown
+ *
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
+ * to make the SRM persistent.
+ *
+ * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
+ * -The kernel cannot write to the file systems.
+ * -So we need usermode to do this for us, which is why an interface for usermode is needed
+ *
+ *
+ *
+ * Usermode can read/write to/from PSP using the sysfs interface
+ * For example:
+ * 	to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * 	to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ */
+static const struct bin_attribute data_attr = {
+	.attr = {.name = "hdcp_srm", .mode = 0664},
+	.size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
+	.write = srm_data_write,
+	.read = srm_data_read,
+};
+
+
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
 {
 
 	int max_caps = dc->caps.max_links;
-	struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+	struct hdcp_workqueue *hdcp_work;
 	int i = 0;
 
+	hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
 	if (hdcp_work == NULL)
+		return NULL;
+
+	hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+
+	if (hdcp_work->srm == NULL)
+		goto fail_alloc_context;
+
+	hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+
+	if (hdcp_work->srm_temp == NULL)
 		goto fail_alloc_context;
 
 	hdcp_work->max_link = max_caps;
 
 	for (i = 0; i < max_caps; i++) {
-
 		mutex_init(&hdcp_work[i].mutex);
 
 		INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
@@ -360,7 +584,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
 		INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
 		INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
 
-		hdcp_work[i].hdcp.config.psp.handle =  psp_context;
+		hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
 		hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
 		hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
 		hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
@@ -371,9 +595,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
 	cp_psp->funcs.update_stream_config = update_config;
 	cp_psp->handle = hdcp_work;
 
+	/* File created at /sys/class/drm/card0/device/hdcp_srm*/
+	hdcp_work[0].attr = data_attr;
+
+	if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+		DRM_WARN("Failed to create device file hdcp_srm");
+
 	return hdcp_work;
 
 fail_alloc_context:
+	kfree(hdcp_work->srm);
+	kfree(hdcp_work->srm_temp);
 	kfree(hdcp_work);
 
 	return NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 6abde86..5159b3a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -30,6 +30,7 @@
 #include "hdcp.h"
 #include "dc.h"
 #include "dm_cp_psp.h"
+#include "amdgpu.h"
 
 struct mod_hdcp;
 struct mod_hdcp_link;
@@ -52,6 +53,12 @@ struct hdcp_workqueue {
 
 	enum mod_hdcp_encryption_status encryption_status;
 	uint8_t max_link;
+
+	uint8_t *srm;
+	uint8_t *srm_temp;
+	uint32_t srm_version;
+	uint32_t srm_size;
+	struct bin_attribute attr;
 };
 
 void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
@@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
 void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
 void hdcp_destroy(struct hdcp_workqueue *work);
 
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
 
 #endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 318b474..c20fb08 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr(
 	struct amdgpu_dm_connector *aconnector = link->priv;
 
 	if (!aconnector) {
-			DRM_ERROR("Failed to found connector for link!");
-			return false;
+		DRM_ERROR("Failed to find connector for link!");
+		return false;
 	}
 
 	if (boot) {
@@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr(
 	struct amdgpu_dm_connector *aconnector = link->priv;
 
 	if (!aconnector) {
-			DRM_ERROR("Failed to found connector for link!");
-			return;
+		DRM_ERROR("Failed to find connector for link!");
+		return;
 	}
 
 	DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
 	struct amdgpu_dm_connector *aconnector = link->priv;
 
 	if (!aconnector) {
-		DRM_ERROR("Failed to found connector for link!");
+		DRM_ERROR("Failed to find connector for link!");
 		return false;
 	}
 
@@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd(
 	struct amdgpu_dm_connector *aconnector = link->priv;
 
 	if (!aconnector) {
-		DRM_ERROR("Failed to found connector for link!");
+		DRM_ERROR("Failed to find connector for link!");
 		return false;
 	}
 
@@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c(
 	bool result;
 
 	if (!aconnector) {
-		DRM_ERROR("Failed to found connector for link!");
+		DRM_ERROR("Failed to find connector for link!");
 		return false;
 	}
 
@@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
 	struct amdgpu_dm_connector *aconnector = link->priv;
 
 	if (!aconnector) {
-		BUG_ON("Failed to found connector for link!");
+		BUG_ON("Failed to find connector for link!");
 		return true;
 	}
 
@@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid(
 		/* We don't need the original edid anymore */
 		kfree(edid);
 
+		/* connector->display_info will be parsed from EDID and saved
+		 * into drm_connector->display_info from edid by call stack
+		 * below:
+		 * drm_parse_ycbcr420_deep_color_info
+		 * drm_parse_hdmi_forum_vsdb
+		 * drm_parse_cea_ext
+		 * drm_add_display_info
+		 * drm_connector_update_edid_property
+		 *
+		 * drm_connector->display_info will be used by amdgpu_dm funcs,
+		 * like fill_stream_properties_from_drm_display_mode
+		 */
+		amdgpu_dm_update_connector_after_detect(aconnector);
+
 		edid_status = dm_helpers_parse_edid_caps(
 						ctx,
 						&sink->dc_edid,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index da73161..e8208df 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
 {
 	struct amdgpu_dm_connector *amdgpu_dm_connector =
 		to_amdgpu_dm_connector(connector);
-	struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+	int r;
+
+	amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+	r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+	if (r)
+		return r;
 
 #if defined(CONFIG_DEBUG_FS)
 	connector_debugfs_init(amdgpu_dm_connector);
-	amdgpu_dm_connector->debugfs_dpcd_address = 0;
-	amdgpu_dm_connector->debugfs_dpcd_size = 0;
 #endif
 
-	return drm_dp_mst_connector_late_register(connector, port);
+	return r;
 }
 
 static void
@@ -204,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
 
 	if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
 				   dsc_caps, NULL,
-				   &dc_sink->sink_dsc_caps.dsc_dec_caps))
+				   &dc_sink->dsc_caps.dsc_dec_caps))
 		return false;
 
 	return true;
@@ -259,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 			if (!validate_dsc_caps_on_connector(aconnector))
-				memset(&aconnector->dc_sink->sink_dsc_caps,
-				       0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+				memset(&aconnector->dc_sink->dsc_caps,
+				       0, sizeof(aconnector->dc_sink->dsc_caps));
 #endif
 		}
 	}
@@ -437,9 +440,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 					struct drm_connector *connector)
 {
-	struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
-	struct drm_device *dev = master->base.dev;
-	struct amdgpu_device *adev = dev->dev_private;
 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
 	DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -455,39 +455,22 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 	}
 
 	drm_connector_unregister(connector);
-	if (adev->mode_info.rfbdev)
-		drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
 	drm_connector_put(connector);
 }
 
-static void dm_dp_mst_register_connector(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct amdgpu_device *adev = dev->dev_private;
-
-	if (adev->mode_info.rfbdev)
-		drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
-	else
-		DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
-
-	drm_connector_register(connector);
-}
-
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
 	.add_connector = dm_dp_add_mst_connector,
 	.destroy_connector = dm_dp_destroy_mst_connector,
-	.register_connector = dm_dp_mst_register_connector
 };
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
 				       struct amdgpu_dm_connector *aconnector)
 {
 	aconnector->dm_dp_aux.aux.name = "dmdc";
-	aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
 	aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
 	aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
 
-	drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+	drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
 	drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
 				      &aconnector->base);
 
@@ -548,7 +531,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
 		memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
 		if (vars[i].dsc_enabled && dc_dsc_compute_config(
 					params[i].sink->ctx->dc->res_pool->dscs[0],
-					&params[i].sink->sink_dsc_caps.dsc_dec_caps,
+					&params[i].sink->dsc_caps.dsc_dec_caps,
 					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
 					0,
 					params[i].timing,
@@ -569,7 +552,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
 	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
 	dc_dsc_compute_config(
 			param.sink->ctx->dc->res_pool->dscs[0],
-			&param.sink->sink_dsc_caps.dsc_dec_caps,
+			&param.sink->dsc_caps.dsc_dec_caps,
 			param.sink->ctx->dc->debug.dsc_min_slice_height_override,
 			(int) kbps, param.timing, &dsc_config);
 
@@ -766,14 +749,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
 		params[count].sink = stream->sink;
 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
 		params[count].port = aconnector->port;
-		params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+		params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
 		dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
 		if (!dc_dsc_compute_bandwidth_range(
 				stream->sink->ctx->dc->res_pool->dscs[0],
 				stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
 				dsc_policy.min_target_bpp,
 				dsc_policy.max_target_bpp,
-				&stream->sink->sink_dsc_caps.dsc_dec_caps,
+				&stream->sink->dsc_caps.dsc_dec_caps,
 				&stream->timing, &params[count].bw_range))
 			params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
 
@@ -855,7 +838,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
 		if (!aconnector || !aconnector->dc_sink)
 			continue;
 
-		if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+		if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
 			continue;
 
 		if (computed_streams[i])
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 2f1c958..37fa7b4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
 					&& id.enum_id == obj_id.enum_id)
 				return &bp->object_info_tbl.v1_4->display_path[i];
 		}
-		/* fall through */
+		fallthrough;
 	case OBJECT_TYPE_CONNECTOR:
 	case OBJECT_TYPE_GENERIC:
 		/* Both Generic and Connector Object ID
@@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
 					&& id.enum_id == obj_id.enum_id)
 				return &bp->object_info_tbl.v1_4->display_path[i];
 		}
-		/* fall through */
+		fallthrough;
 	default:
 		return NULL;
 	}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index c4ba6e8..8edc250 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp)
 	uint8_t frev;
 	uint8_t crev;
 
-	if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
-		BREAK_TO_DEBUGGER();
+	BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
+
 	switch (crev) {
 	case 6:
 		bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7388c98..204d7942 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
 
 	case DCE_VERSION_11_2:
 	case DCE_VERSION_11_22:
+	case DCE_VERSION_12_0:
+	case DCE_VERSION_12_1:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 	case DCN_VERSION_1_0:
 	case DCN_VERSION_1_01:
-		*h = dal_cmd_tbl_helper_dce112_get_table2();
-		return true;
-#endif
-
 	case DCN_VERSION_2_0:
-		*h = dal_cmd_tbl_helper_dce112_get_table2();
-		return true;
 	case DCN_VERSION_2_1:
 		*h = dal_cmd_tbl_helper_dce112_get_table2();
 		return true;
-	case DCE_VERSION_12_0:
-	case DCE_VERSION_12_1:
-		*h = dal_cmd_tbl_helper_dce112_get_table2();
-		return true;
+#endif
 
 	default:
 		/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 5d081c4..2c6db37 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx,
 				bw_fixed_to_int(bw_mul(data->
 					stutter_exit_watermark[9], bw_int_to_fixed(1000)));
 
-		calcs_output->stutter_entry_wm_ns[0].b_mark =
-			bw_fixed_to_int(bw_mul(data->
-				stutter_entry_watermark[4], bw_int_to_fixed(1000)));
-		calcs_output->stutter_entry_wm_ns[1].b_mark =
-			bw_fixed_to_int(bw_mul(data->
-				stutter_entry_watermark[5], bw_int_to_fixed(1000)));
-		calcs_output->stutter_entry_wm_ns[2].b_mark =
-			bw_fixed_to_int(bw_mul(data->
-				stutter_entry_watermark[6], bw_int_to_fixed(1000)));
-		if (ctx->dc->caps.max_slave_planes) {
-			calcs_output->stutter_entry_wm_ns[3].b_mark =
+			calcs_output->stutter_entry_wm_ns[0].b_mark =
 				bw_fixed_to_int(bw_mul(data->
-					stutter_entry_watermark[0], bw_int_to_fixed(1000)));
-			calcs_output->stutter_entry_wm_ns[4].b_mark =
+					stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[1].b_mark =
 				bw_fixed_to_int(bw_mul(data->
-					stutter_entry_watermark[1], bw_int_to_fixed(1000)));
-		} else {
-			calcs_output->stutter_entry_wm_ns[3].b_mark =
+					stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+			calcs_output->stutter_entry_wm_ns[2].b_mark =
 				bw_fixed_to_int(bw_mul(data->
-					stutter_entry_watermark[7], bw_int_to_fixed(1000)));
-			calcs_output->stutter_entry_wm_ns[4].b_mark =
+					stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+			if (ctx->dc->caps.max_slave_planes) {
+				calcs_output->stutter_entry_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+				calcs_output->stutter_entry_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+			} else {
+				calcs_output->stutter_entry_wm_ns[3].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+				calcs_output->stutter_entry_wm_ns[4].b_mark =
+					bw_fixed_to_int(bw_mul(data->
+						stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+			}
+			calcs_output->stutter_entry_wm_ns[5].b_mark =
 				bw_fixed_to_int(bw_mul(data->
-					stutter_entry_watermark[8], bw_int_to_fixed(1000)));
-		}
-		calcs_output->stutter_entry_wm_ns[5].b_mark =
-			bw_fixed_to_int(bw_mul(data->
-				stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+					stutter_entry_watermark[9], bw_int_to_fixed(1000)));
 
 			calcs_output->urgent_wm_ns[0].b_mark =
 				bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1a37550..3960a8d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -703,11 +703,24 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
 }
 
 
-unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
+unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
 {
-	/* for dali & pollock, the highest voltage level we want is 0 */
-	if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
-		return 0;
+	/* for low power RV2 variants, the highest voltage level we want is 0 */
+	if (ASICREV_IS_RAVEN2(hw_internal_rev))
+		switch (pci_revision_id) {
+		case PRID_DALI_DE:
+		case PRID_DALI_DF:
+		case PRID_DALI_E3:
+		case PRID_DALI_E4:
+		case PRID_POLLOCK_94:
+		case PRID_POLLOCK_95:
+		case PRID_POLLOCK_E9:
+		case PRID_POLLOCK_EA:
+		case PRID_POLLOCK_EB:
+			return 0;
+		default:
+			break;
+		}
 
 	/* we are ok with all levels */
 	return 4;
@@ -1277,7 +1290,9 @@ bool dcn_validate_bandwidth(
 	PERFORMANCE_TRACE_END();
 	BW_VAL_TRACE_FINISH();
 
-	if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
+	if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
+							dc->ctx->asic_id.hw_internal_rev,
+							dc->ctx->asic_id.pci_revision_id))
 		return true;
 	else
 		return false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a78e5c7..8ec2dfe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt(
 	return display_count;
 }
 
+int clk_mgr_helper_get_active_plane_cnt(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	int i, total_plane_count;
+
+	total_plane_count = 0;
+	for (i = 0; i < context->stream_count; i++) {
+		const struct dc_stream_status stream_status = context->stream_status[i];
+
+		/*
+		 * Sum up plane_count for all streams ( active and virtual ).
+		 */
+		total_plane_count += stream_status.plane_count;
+	}
+
+	return total_plane_count;
+}
+
 void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
 {
 	struct dc_link *edp_link = get_edp_link(dc);
@@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 	case FAMILY_RV:
-		if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
-				ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
-			/* TEMP: this check has to come before ASICREV_IS_RENOIR */
-			/* which also incorrectly returns true for Dali/Pollock*/
-			rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
-			break;
-		}
 		if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
 			rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
 			break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 49ce46b..55d09ad 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
 		dpp_inst = i;
 		dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
 
-		prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+		prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
 
-		if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
+		if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
 			clk_mgr->dccg->funcs->update_dpp_dto(
 							clk_mgr->dccg, dpp_inst, dppclk_khz);
-		}
 	}
 }
 
@@ -158,6 +157,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 	bool dpp_clock_lowered = false;
 	struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
 	bool force_reset = false;
+	bool p_state_change_support;
+	int total_plane_count;
 
 	if (dc->work_arounds.skip_clock_update)
 		return;
@@ -213,9 +214,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 			pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
 	}
 
-	if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+	total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+	p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
 		clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-		clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+		clk_mgr_base->clks.p_state_change_support = p_state_change_support;
 		if (pp_smu && pp_smu->set_pstate_handshake_support)
 			pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
 	}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 9ef3f7b..ab267dd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -46,6 +46,7 @@
 /* Constants */
 
 #define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
+#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
 
 /* Macros */
 
@@ -405,7 +406,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
 	clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
 }
 
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
 {
 	int i, num_valid_sets;
 
@@ -465,16 +466,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
 static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
 {
 	struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
-	struct pp_smu_wm_range_sets ranges = {0};
 	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 	struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
 
 	if (!debug->disable_pplib_wm_range) {
-		build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+		build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
 
 		/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
 		if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
-			pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+			pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
 	}
 
 }
@@ -504,7 +504,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
 	.notify_wm_ranges = rn_notify_wm_ranges
 };
 
-struct clk_bw_params rn_bw_params = {
+static struct clk_bw_params rn_bw_params = {
 	.vram_type = Ddr4MemType,
 	.num_channels = 1,
 	.clk_table = {
@@ -544,7 +544,7 @@ struct clk_bw_params rn_bw_params = {
 
 };
 
-struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table = {
 	.entries = {
 		{
 			.wm_inst = WM_A,
@@ -581,7 +581,7 @@ struct wm_table ddr4_wm_table = {
 	}
 };
 
-struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table = {
 	.entries = {
 		{
 			.wm_inst = WM_A,
@@ -721,6 +721,13 @@ void rn_clk_mgr_construct(
 	} else {
 		struct clk_log_info log_info = {0};
 
+		clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
+
+		/* SMU Version 55.51.0 and up no longer have an issue
+		 * that needs to limit minimum dispclk */
+		if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
+			debug->min_disp_clk_khz = 0;
+
 		/* TODO: Check we get what we expect during bringup */
 		clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04441db..2ffb221 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -701,7 +701,7 @@ static bool dc_construct(struct dc *dc,
 		dc_ctx->created_bios = true;
 	}
 
-
+	dc->vendor_signature = init_params->vendor_signature;
 
 	/* Create GPIO service */
 	dc_ctx->gpio_service = dal_gpio_service_create(
@@ -761,6 +761,28 @@ static bool disable_all_writeback_pipes_for_stream(
 	return true;
 }
 
+void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+{
+	int i = 0;
+
+	/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+	if (dc->hwss.interdependent_update_lock)
+		dc->hwss.interdependent_update_lock(dc, context, lock);
+	else {
+		for (i = 0; i < dc->res_pool->pipe_count; i++) {
+			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+			struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+			// Copied conditions that were previously in dce110_apply_ctx_for_surface
+			if (stream == pipe_ctx->stream) {
+				if (!pipe_ctx->top_pipe &&
+					(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+					dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+			}
+		}
+	}
+}
+
 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 {
 	int i, j;
@@ -786,11 +808,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 		if (should_disable && old_stream) {
 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
-			if (dc->hwss.apply_ctx_for_surface)
+
+			if (dc->hwss.apply_ctx_for_surface) {
+				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
 				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+			}
+			if (dc->hwss.program_front_end_for_ctx) {
+				dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+				dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+				dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+			}
 		}
-		if (dc->hwss.program_front_end_for_ctx)
-			dc->hwss.program_front_end_for_ctx(dc, dangling_context);
 	}
 
 	current_ctx = dc->current_state;
@@ -1210,16 +1241,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	/* re-program planes for existing stream, in case we need to
 	 * free up plane resource for later use
 	 */
-	if (dc->hwss.apply_ctx_for_surface)
+	if (dc->hwss.apply_ctx_for_surface) {
 		for (i = 0; i < context->stream_count; i++) {
 			if (context->streams[i]->mode_changed)
 				continue;
-
+			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
 			dc->hwss.apply_ctx_for_surface(
 				dc, context->streams[i],
 				context->stream_status[i].plane_count,
 				context); /* use new pipe config in new context */
+			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+			dc->hwss.post_unlock_program_front_end(dc, context);
 		}
+	}
 
 	/* Program hardware */
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1238,19 +1272,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	}
 
 	/* Program all planes within new context*/
-	if (dc->hwss.program_front_end_for_ctx)
+	if (dc->hwss.program_front_end_for_ctx) {
+		dc->hwss.interdependent_update_lock(dc, context, true);
 		dc->hwss.program_front_end_for_ctx(dc, context);
+		dc->hwss.interdependent_update_lock(dc, context, false);
+		dc->hwss.post_unlock_program_front_end(dc, context);
+	}
 	for (i = 0; i < context->stream_count; i++) {
 		const struct dc_link *link = context->streams[i]->link;
 
 		if (!context->streams[i]->mode_changed)
 			continue;
 
-		if (dc->hwss.apply_ctx_for_surface)
+		if (dc->hwss.apply_ctx_for_surface) {
+			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
 			dc->hwss.apply_ctx_for_surface(
 					dc, context->streams[i],
 					context->stream_status[i].plane_count,
 					context);
+			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+			dc->hwss.post_unlock_program_front_end(dc, context);
+		}
 
 		/*
 		 * enable stereo
@@ -1318,18 +1360,12 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
 	return (result == DC_OK);
 }
 
-bool dc_is_hw_initialized(struct dc *dc)
-{
-	struct dc_bios *dcb = dc->ctx->dc_bios;
-	return dcb->funcs->is_accelerated_mode(dcb);
-}
-
 bool dc_post_update_surfaces_to_stream(struct dc *dc)
 {
 	int i;
 	struct dc_state *context = dc->current_state;
 
-	if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
+	if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
 		return true;
 
 	post_surface_trace(dc);
@@ -1341,9 +1377,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 			dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
 		}
 
-	dc->optimized_required = false;
-
 	dc->hwss.optimize_bandwidth(dc, context);
+
+	dc->optimized_required = false;
+	dc->wm_optimized_required = false;
+
 	return true;
 }
 
@@ -1734,14 +1772,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
 
 		if (stream_update->wb_update)
 			su_flags->bits.wb_update = 1;
+
+		if (stream_update->dsc_config)
+			su_flags->bits.dsc_changed = 1;
+
 		if (su_flags->raw != 0)
 			overall_type = UPDATE_TYPE_FULL;
 
 		if (stream_update->output_csc_transform || stream_update->output_color_space)
 			su_flags->bits.out_csc = 1;
-
-		if (stream_update->dsc_config)
-			overall_type = UPDATE_TYPE_FULL;
 	}
 
 	for (i = 0 ; i < surface_count; i++) {
@@ -1776,8 +1815,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
 
 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
 	if (type == UPDATE_TYPE_FULL) {
-		if (stream_update)
+		if (stream_update) {
+			uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
 			stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+			stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+		}
 		for (i = 0; i < surface_count; i++)
 			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
 	}
@@ -1790,7 +1832,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
 		// Else we fallback to mem compare.
 		} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
 			dc->optimized_required = true;
-		}
+		} else if (dc->wm_optimized_required)
+			dc->optimized_required = true;
 	}
 
 	return type;
@@ -1829,6 +1872,8 @@ static void copy_surface_update_to_plane(
 		surface->time.index++;
 		if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
 			surface->time.index = 0;
+
+		surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
 	}
 
 	if (srf_update->scaling_info) {
@@ -2093,18 +2138,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
 				}
 			}
 
-			if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
-				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
-				dp_update_dsc_config(pipe_ctx);
-				dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
-			}
 			/* Full fe update*/
 			if (update_type == UPDATE_TYPE_FAST)
 				continue;
 
-			if (stream_update->dpms_off) {
-				dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+			if (stream_update->dsc_config)
+				dp_update_dsc_config(pipe_ctx);
 
+			if (stream_update->dpms_off) {
 				if (*stream_update->dpms_off) {
 					core_link_disable_stream(pipe_ctx);
 					/* for dpms, keep acquired resources*/
@@ -2118,8 +2159,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
 
 					core_link_enable_stream(dc->current_state, pipe_ctx);
 				}
-
-				dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
 			}
 
 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2175,6 +2214,32 @@ static void commit_planes_for_stream(struct dc *dc,
 		context_clock_trace(dc, context);
 	}
 
+	for (j = 0; j < dc->res_pool->pipe_count; j++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+		if (!pipe_ctx->top_pipe &&
+			!pipe_ctx->prev_odm_pipe &&
+			pipe_ctx->stream &&
+			pipe_ctx->stream == stream) {
+			top_pipe_to_program = pipe_ctx;
+		}
+	}
+
+	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+		if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
+			top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+					top_pipe_to_program->stream_res.tg);
+
+	if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+		dc->hwss.interdependent_update_lock(dc, context, true);
+	else
+		/* Lock the top pipe while updating plane addrs, since freesync requires
+		 *  plane addr update event triggers to be synchronized.
+		 *  top_pipe_to_program is expected to never be NULL
+		 */
+		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+
 	// Stream updates
 	if (stream_update)
 		commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2189,6 +2254,12 @@ static void commit_planes_for_stream(struct dc *dc,
 		if (dc->hwss.program_front_end_for_ctx)
 			dc->hwss.program_front_end_for_ctx(dc, context);
 
+		if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+			dc->hwss.interdependent_update_lock(dc, context, false);
+		else
+			dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+		dc->hwss.post_unlock_program_front_end(dc, context);
 		return;
 	}
 
@@ -2224,8 +2295,6 @@ static void commit_planes_for_stream(struct dc *dc,
 			pipe_ctx->stream == stream) {
 			struct dc_stream_status *stream_status = NULL;
 
-			top_pipe_to_program = pipe_ctx;
-
 			if (!pipe_ctx->plane_state)
 				continue;
 
@@ -2270,12 +2339,6 @@ static void commit_planes_for_stream(struct dc *dc,
 
 	// Update Type FAST, Surface updates
 	if (update_type == UPDATE_TYPE_FAST) {
-		/* Lock the top pipe while updating plane addrs, since freesync requires
-		 *  plane addr update event triggers to be synchronized.
-		 *  top_pipe_to_program is expected to never be NULL
-		 */
-		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-
 		if (dc->hwss.set_flip_control_gsl)
 			for (i = 0; i < surface_count; i++) {
 				struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2317,10 +2380,31 @@ static void commit_planes_for_stream(struct dc *dc,
 					dc->hwss.update_plane_addr(dc, pipe_ctx);
 			}
 		}
-
-		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
 	}
 
+	if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+		dc->hwss.interdependent_update_lock(dc, context, false);
+	else
+		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+		if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+					top_pipe_to_program->stream_res.tg,
+					CRTC_STATE_VACTIVE);
+			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+					top_pipe_to_program->stream_res.tg,
+					CRTC_STATE_VBLANK);
+			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+					top_pipe_to_program->stream_res.tg,
+					CRTC_STATE_VACTIVE);
+			top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+					top_pipe_to_program->stream_res.tg);
+		}
+
+	if (update_type != UPDATE_TYPE_FAST)
+		dc->hwss.post_unlock_program_front_end(dc, context);
+
 	// Fire manual trigger only when bottom plane is flipped
 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a09119c..67cfff1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,7 +45,7 @@
 #include "dpcd_defs.h"
 #include "dmcu.h"
 #include "hw/clk_mgr.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -585,20 +585,23 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
 		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
 }
 
-static bool detect_dp(
-	struct dc_link *link,
-	struct display_sink_capability *sink_caps,
-	bool *converter_disable_audio,
-	struct audio_support *audio_support,
-	enum dc_detect_reason reason)
+static bool detect_dp(struct dc_link *link,
+		      struct display_sink_capability *sink_caps,
+		      bool *converter_disable_audio,
+		      struct audio_support *audio_support,
+		      enum dc_detect_reason reason)
 {
 	bool boot = false;
+
 	sink_caps->signal = link_detect_sink(link, reason);
 	sink_caps->transaction_type =
 		get_ddc_transaction_type(sink_caps->signal);
 
 	if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
 		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+		dpcd_set_source_specific_data(link);
+
 		if (!detect_dp_sink_caps(link))
 			return false;
 
@@ -606,9 +609,8 @@ static bool detect_dp(
 			sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
 			link->type = dc_connection_mst_branch;
 
-			dal_ddc_service_set_transaction_type(
-							link->ddc,
-							sink_caps->transaction_type);
+			dal_ddc_service_set_transaction_type(link->ddc,
+							     sink_caps->transaction_type);
 
 			/*
 			 * This call will initiate MST topology discovery. Which
@@ -637,13 +639,10 @@ static bool detect_dp(
 			if (reason == DETECT_REASON_BOOT)
 				boot = true;
 
-			dm_helpers_dp_update_branch_info(
-				link->ctx,
-				link);
+			dm_helpers_dp_update_branch_info(link->ctx, link);
 
-			if (!dm_helpers_dp_mst_start_top_mgr(
-				link->ctx,
-				link, boot)) {
+			if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
+							     link, boot)) {
 				/* MST not supported */
 				link->type = dc_connection_single;
 				sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
@@ -651,7 +650,7 @@ static bool detect_dp(
 		}
 
 		if (link->type != dc_connection_mst_branch &&
-			is_dp_active_dongle(link)) {
+		    is_dp_active_dongle(link)) {
 			/* DP active dongles */
 			link->type = dc_connection_active_dongle;
 			if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
@@ -662,14 +661,15 @@ static bool detect_dp(
 				return true;
 			}
 
-			if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+			if (link->dpcd_caps.dongle_type !=
+			    DISPLAY_DONGLE_DP_HDMI_CONVERTER)
 				*converter_disable_audio = true;
 		}
 	} else {
 		/* DP passive dongles */
 		sink_caps->signal = dp_passive_dongle_detection(link->ddc,
-				sink_caps,
-				audio_support);
+								sink_caps,
+								audio_support);
 	}
 
 	return true;
@@ -769,8 +769,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
 
 	if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
 			link->connector_signal == SIGNAL_TYPE_EDP) &&
-			link->local_sink)
+			link->local_sink) {
+
+		// need to re-write OUI and brightness in resume case
+		if (link->connector_signal == SIGNAL_TYPE_EDP) {
+			dpcd_set_source_specific_data(link);
+			dc_link_set_default_brightness_aux(link); //TODO: use cached
+		}
+
 		return true;
+	}
 
 	if (false == dc_link_detect_sink(link, &new_connection_type)) {
 		BREAK_TO_DEBUGGER();
@@ -818,6 +826,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
 		}
 
 		case SIGNAL_TYPE_EDP: {
+			read_current_link_settings_on_detect(link);
+
+			dpcd_set_source_specific_data(link);
+
 			detect_edp_sink_caps(link);
 			read_current_link_settings_on_detect(link);
 			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -958,10 +970,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
 			break;
 		}
 
+		if (link->local_sink->edid_caps.panel_patch.disable_fec)
+			link->ctx->dc->debug.disable_fec = true;
+
 		// Check if edid is the same
 		if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
 			same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
 
+		if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+			link->ctx->dc->debug.hdmi20_disable = true;
+
 		if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
 			sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
 			/*
@@ -1480,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
 	}
 }
 
-static enum dc_status enable_link_dp(
-		struct dc_state *state,
-		struct pipe_ctx *pipe_ctx)
+static enum dc_status enable_link_dp(struct dc_state *state,
+				     struct pipe_ctx *pipe_ctx)
 {
 	struct dc_stream_state *stream = pipe_ctx->stream;
 	enum dc_status status;
@@ -1492,6 +1509,7 @@ static enum dc_status enable_link_dp(
 	bool fec_enable;
 	int i;
 	bool apply_seamless_boot_optimization = false;
+	uint32_t bl_oled_enable_delay = 50; // in ms
 
 	// check for seamless boot
 	for (i = 0; i < state->stream_count; i++) {
@@ -1513,31 +1531,45 @@ static enum dc_status enable_link_dp(
 	pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
 			link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
 	if (state->clk_mgr && !apply_seamless_boot_optimization)
-		state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
+		state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+						     state, false);
+
+	// during mode switch we do DP_SET_POWER off then on, and OUI is lost
+	dpcd_set_source_specific_data(link);
 
 	skip_video_pattern = true;
 
 	if (link_settings.link_rate == LINK_RATE_LOW)
-			skip_video_pattern = false;
+		skip_video_pattern = false;
 
-	if (perform_link_training_with_retries(
-			&link_settings,
-			skip_video_pattern,
-			LINK_TRAINING_ATTEMPTS,
-			pipe_ctx,
-			pipe_ctx->stream->signal)) {
+	if (perform_link_training_with_retries(&link_settings,
+					       skip_video_pattern,
+					       LINK_TRAINING_ATTEMPTS,
+					       pipe_ctx,
+					       pipe_ctx->stream->signal)) {
 		link->cur_link_settings = link_settings;
 		status = DC_OK;
-	}
-	else
+	} else {
 		status = DC_FAIL_DP_LINK_TRAINING;
+	}
 
-	if (link->preferred_training_settings.fec_enable != NULL)
+	if (link->preferred_training_settings.fec_enable)
 		fec_enable = *link->preferred_training_settings.fec_enable;
 	else
 		fec_enable = true;
 
 	dp_set_fec_enable(link, fec_enable);
+
+	// during mode set we do DP_SET_POWER off then on, aux writes are lost
+	if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+		dc_link_set_default_brightness_aux(link); // TODO: use cached if known
+		if (link->dpcd_sink_ext_caps.bits.oled == 1)
+			msleep(bl_oled_enable_delay);
+		dc_link_backlight_enable_aux(link, true);
+	}
+
 	return status;
 }
 
@@ -1733,8 +1765,7 @@ static void write_i2c_retimer_setting(
 				slave_address, buffer[0], buffer[1], i2c_success?1:0);
 
 			if (!i2c_success)
-				/* Write failure */
-				ASSERT(i2c_success);
+				goto i2c_write_fail;
 
 			/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
 			 * needs to be set to 1 on every 0xA-0xC write.
@@ -1752,8 +1783,7 @@ static void write_i2c_retimer_setting(
 						pipe_ctx->stream->link->ddc,
 						slave_address, &offset, 1, &value, 1);
 					if (!i2c_success)
-						/* Write failure */
-						ASSERT(i2c_success);
+						goto i2c_write_fail;
 				}
 
 				buffer[0] = offset;
@@ -1765,8 +1795,7 @@ static void write_i2c_retimer_setting(
 					offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 					slave_address, buffer[0], buffer[1], i2c_success?1:0);
 				if (!i2c_success)
-					/* Write failure */
-					ASSERT(i2c_success);
+					goto i2c_write_fail;
 			}
 		}
 	}
@@ -1786,8 +1815,7 @@ static void write_i2c_retimer_setting(
 					slave_address, buffer[0], buffer[1], i2c_success?1:0);
 
 				if (!i2c_success)
-					/* Write failure */
-					ASSERT(i2c_success);
+					goto i2c_write_fail;
 
 				/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
 				 * needs to be set to 1 on every 0xA-0xC write.
@@ -1805,8 +1833,7 @@ static void write_i2c_retimer_setting(
 								pipe_ctx->stream->link->ddc,
 								slave_address, &offset, 1, &value, 1);
 						if (!i2c_success)
-							/* Write failure */
-							ASSERT(i2c_success);
+							goto i2c_write_fail;
 					}
 
 					buffer[0] = offset;
@@ -1818,8 +1845,7 @@ static void write_i2c_retimer_setting(
 						offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 						slave_address, buffer[0], buffer[1], i2c_success?1:0);
 					if (!i2c_success)
-						/* Write failure */
-						ASSERT(i2c_success);
+						goto i2c_write_fail;
 				}
 			}
 		}
@@ -1837,8 +1863,7 @@ static void write_i2c_retimer_setting(
 				offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 				slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0x00 to 0x23 */
 		buffer[0] = 0x00;
@@ -1849,8 +1874,7 @@ static void write_i2c_retimer_setting(
 			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0xff to 0x00 */
 		buffer[0] = 0xff;
@@ -1861,10 +1885,14 @@ static void write_i2c_retimer_setting(
 			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 	}
+
+	return;
+
+i2c_write_fail:
+	DC_LOG_DEBUG("Set retimer failed");
 }
 
 static void write_i2c_default_retimer_setting(
@@ -1889,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0A to 0x17 */
 	buffer[0] = 0x0A;
@@ -1901,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0B to 0xDA or 0xD8 */
 	buffer[0] = 0x0B;
@@ -1913,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0A to 0x17 */
 	buffer[0] = 0x0A;
@@ -1925,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0C to 0x1D or 0x91 */
 	buffer[0] = 0x0C;
@@ -1937,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 	/* Write offset 0x0A to 0x17 */
 	buffer[0] = 0x0A;
@@ -1949,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
 		offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 		slave_address, buffer[0], buffer[1], i2c_success?1:0);
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		goto i2c_write_fail;
 
 
 	if (is_vga_mode) {
@@ -1965,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
 			offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0x00 to 0x23 */
 		buffer[0] = 0x00;
@@ -1977,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
 			offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 
 		/* Write offset 0xff to 0x00 */
 		buffer[0] = 0xff;
@@ -1989,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
 			offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
 			slave_address, buffer[0], buffer[1], i2c_success?1:0);
 		if (!i2c_success)
-			/* Write failure */
-			ASSERT(i2c_success);
+			goto i2c_write_fail;
 	}
+
+	return;
+
+i2c_write_fail:
+	DC_LOG_DEBUG("Set default retimer failed");
 }
 
 static void write_i2c_redriver_setting(
@@ -2020,8 +2044,7 @@ static void write_i2c_redriver_setting(
 		slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
 
 	if (!i2c_success)
-		/* Write failure */
-		ASSERT(i2c_success);
+		DC_LOG_DEBUG("Set redriver failed");
 }
 
 static void disable_link(struct dc_link *link, enum signal_type signal)
@@ -2400,8 +2423,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
 	struct dmcu *dmcu = dc->res_pool->dmcu;
 	struct dmub_psr *psr = dc->res_pool->psr;
 
-	if ((psr != NULL) && link->psr_feature_enabled)
-		psr->funcs->set_psr_enable(psr, allow_active);
+	if (psr != NULL && link->psr_feature_enabled)
+		psr->funcs->psr_enable(psr, allow_active);
 	else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
 		dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
 
@@ -2417,7 +2440,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
 	struct dmub_psr *psr = dc->res_pool->psr;
 
 	if (psr != NULL && link->psr_feature_enabled)
-		psr->funcs->get_psr_state(psr_state);
+		psr->funcs->psr_get_state(psr, psr_state);
 	else if (dmcu != NULL && link->psr_feature_enabled)
 		dmcu->funcs->get_psr_state(dmcu, psr_state);
 
@@ -2589,7 +2612,7 @@ bool dc_link_setup_psr(struct dc_link *link,
 	psr_context->frame_delay = 0;
 
 	if (psr)
-		link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+		link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
 	else
 		link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
 
@@ -2922,10 +2945,13 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 		memset(&config, 0, sizeof(config));
 
 		config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
-		config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+		/*stream_enc_inst*/
+		config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
 		config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
 		config.dpms_off = dpms_off;
 		config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+		config.mst_supported = (pipe_ctx->stream->signal ==
+				SIGNAL_TYPE_DISPLAY_PORT_MST);
 		cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
 	}
 }
@@ -3061,6 +3087,9 @@ void core_link_enable_stream(
 		dc->hwss.unblank_stream(pipe_ctx,
 			&pipe_ctx->stream->link->cur_link_settings);
 
+		if (stream->sink_patches.delay_ignore_msa > 0)
+			msleep(stream->sink_patches.delay_ignore_msa);
+
 		if (dc_is_dp_signal(pipe_ctx->stream->signal))
 			enable_stream_features(pipe_ctx);
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
@@ -3373,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps(
 	link_bw_kbps *= 8;   /* 8 bits per byte*/
 	link_bw_kbps *= link_setting->lane_count;
 
-	if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+	if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
 		/* Account for FEC overhead.
 		 * We have to do it based on caps,
 		 * and not based on FEC being set ready,
@@ -3417,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap(
 	dp_overwrite_extended_receiver_cap(link);
 }
 
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+	return (dc_is_dp_signal(link->connector_signal) &&
+			link->link_enc->features.fec_supported &&
+			link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+			!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index a49c10d..256889e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -126,22 +126,16 @@ struct aux_payloads {
 	struct vector payloads;
 };
 
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+static bool dal_ddc_i2c_payloads_create(
+		struct dc_context *ctx,
+		struct i2c_payloads *payloads,
+		uint32_t count)
 {
-	struct i2c_payloads *payloads;
-
-	payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
-
-	if (!payloads)
-		return NULL;
-
 	if (dal_vector_construct(
 		&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
-		return payloads;
+		return true;
 
-	kfree(payloads);
-	return NULL;
-
+	return false;
 }
 
 static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
 	return p->payloads.count;
 }
 
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
 {
-	if (!p || !*p)
+	if (!p)
 		return;
-	dal_vector_destruct(&(*p)->payloads);
-	kfree(*p);
-	*p = NULL;
 
+	dal_vector_destruct(&p->payloads);
 }
 
 #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
@@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data(
 
 	uint32_t payloads_num = write_payloads + read_payloads;
 
+
 	if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
 		return false;
 
+	if (!payloads_num)
+		return false;
+
 	/*TODO: len of payload data for i2c and aux is uint8!!!!,
 	 *  but we want to read 256 over i2c!!!!*/
 	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data(
 			ret = dal_ddc_submit_aux_command(ddc, &payload);
 		}
 	} else {
-		struct i2c_payloads *payloads =
-			dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+		struct i2c_command command = {0};
+		struct i2c_payloads payloads;
 
-		struct i2c_command command = {
-			.payloads = dal_ddc_i2c_payloads_get(payloads),
-			.number_of_payloads = 0,
-			.engine = DDC_I2C_COMMAND_ENGINE,
-			.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+		if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+			return false;
+
+		command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+		command.number_of_payloads = 0;
+		command.engine = DDC_I2C_COMMAND_ENGINE;
+		command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
 
 		dal_ddc_i2c_payloads_add(
-			payloads, address, write_size, write_buf, true);
+			&payloads, address, write_size, write_buf, true);
 
 		dal_ddc_i2c_payloads_add(
-			payloads, address, read_size, read_buf, false);
+			&payloads, address, read_size, read_buf, false);
 
 		command.number_of_payloads =
-			dal_ddc_i2c_payloads_get_count(payloads);
+			dal_ddc_i2c_payloads_get_count(&payloads);
 
 		ret = dm_helpers_submit_i2c(
 				ddc->ctx,
@@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
 	uint8_t write_buffer[2] = {0};
 	/*Lower than 340 Scramble bit from SCDC caps*/
 
+	if (ddc_service->link->local_sink &&
+		ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+		return;
+
 	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
 			sizeof(offset), &sink_version, sizeof(sink_version));
 	if (sink_version == 1) {
@@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
 	uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
 	uint8_t tmds_config = 0;
 
+	if (ddc_service->link->local_sink &&
+		ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+		return;
+
 	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
 			sizeof(offset), &tmds_config, sizeof(tmds_config));
 	if (tmds_config & 0x1) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index fd9e696..7cbb1ef 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -945,6 +945,17 @@ static enum link_training_result perform_channel_equalization_sequence(
 }
 #define TRAINING_AUX_RD_INTERVAL 100 //us
 
+static void start_clock_recovery_pattern_early(struct dc_link *link,
+		struct link_training_settings *lt_settings,
+		uint32_t offset)
+{
+	DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+			__func__);
+	dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+	dp_set_hw_lane_settings(link, lt_settings, offset);
+	udelay(400);
+}
+
 static enum link_training_result perform_clock_recovery_sequence(
 	struct dc_link *link,
 	struct link_training_settings *lt_settings,
@@ -962,7 +973,8 @@ static enum link_training_result perform_clock_recovery_sequence(
 	retries_cr = 0;
 	retry_count = 0;
 
-	dp_set_hw_training_pattern(link, tr_pattern, offset);
+	if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+		dp_set_hw_training_pattern(link, tr_pattern, offset);
 
 	/* najeeb - The synaptics MST hub can put the LT in
 	* infinite loop by switching the VS
@@ -1434,6 +1446,13 @@ enum link_training_result dc_link_dp_perform_link_training(
 			&link->preferred_training_settings,
 			&lt_settings);
 
+	/* Configure lttpr mode */
+	if (!link->is_lttpr_mode_transparent)
+		configure_lttpr_mode(link);
+
+	if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+		start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+
 	/* 1. set link rate, lane count and spread. */
 	dpcd_set_link_settings(link, &lt_settings);
 
@@ -1445,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training(
 	dp_set_fec_ready(link, fec_enable);
 
 	if (!link->is_lttpr_mode_transparent) {
-		/* Configure lttpr mode */
-		configure_lttpr_mode(link);
 
 		/* 2. perform link training (set link training done
 		 *  to false is done as well)
@@ -1654,6 +1671,8 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
 	dp_set_panel_mode(link, panel_mode);
 
 	/* Attempt to train with given link training settings */
+	if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+		start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
 
 	/* Set link rate, lane count and spread. */
 	dpcd_set_link_settings(link, &lt_settings);
@@ -1892,6 +1911,16 @@ bool dp_verify_link_cap(
 	/* disable PHY done possible by BIOS, will be done by driver itself */
 	dp_disable_link_phy(link, link->connector_signal);
 
+	dp_cs_id = get_clock_source_id(link);
+
+	/* link training starts with the maximum common settings
+	 * supported by both sink and ASIC.
+	 */
+	initial_link_settings = get_common_supported_link_settings(
+			*known_limit_link_setting,
+			max_link_cap);
+	cur_link_setting = initial_link_settings;
+
 	/* Temporary Renoir-specific workaround for SWDEV-215184;
 	 * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
 	 * so add extra cycle of enabling and disabling the PHY before first link training.
@@ -1902,15 +1931,6 @@ bool dp_verify_link_cap(
 		dp_disable_link_phy(link, link->connector_signal);
 	}
 
-	dp_cs_id = get_clock_source_id(link);
-
-	/* link training starts with the maximum common settings
-	 * supported by both sink and ASIC.
-	 */
-	initial_link_settings = get_common_supported_link_settings(
-			*known_limit_link_setting,
-			max_link_cap);
-	cur_link_setting = initial_link_settings;
 	do {
 		skip_video_pattern = true;
 
@@ -2654,9 +2674,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
 	break;
 	}
 
-	test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
-			DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
-			DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+	if (dpcd_test_params.bits.CLR_FORMAT == 0)
+		test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+	else
+		test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+				DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+				DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
 
 	dc_link_dp_set_test_pattern(
 			link,
@@ -3165,6 +3188,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
 		link->wa_flags.dp_keep_receiver_powered = false;
 }
 
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+	uint8_t dpcd_data;
+
+	if (!link)
+		return false;
+
+	if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+		return false;
+
+	link->dpcd_sink_ext_caps.raw = dpcd_data;
+	return true;
+}
+
 static bool retrieve_link_cap(struct dc_link *link)
 {
 	/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -3448,6 +3488,9 @@ static bool retrieve_link_cap(struct dc_link *link)
 				sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
 	}
 
+	if (!dpcd_read_sink_ext_caps(link))
+		link->dpcd_sink_ext_caps.raw = 0;
+
 	/* Connectivity log: detection */
 	CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
 
@@ -3600,6 +3643,8 @@ void detect_edp_sink_caps(struct dc_link *link)
 		}
 	}
 	link->verified_link_cap = link->reported_link_cap;
+
+	dc_link_set_default_brightness_aux(link);
 }
 
 void dc_link_dp_enable_hpd(const struct dc_link *link)
@@ -3691,7 +3736,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
 			struct pipe_ctx *odm_pipe;
 			enum controller_dp_color_space controller_color_space;
 			int opp_cnt = 1;
-			int count;
+			int offset = 0;
+			int dpg_width = width;
 
 			switch (test_pattern_color_space) {
 			case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3713,33 +3759,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
 
 			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
 				opp_cnt++;
+			dpg_width = width / opp_cnt;
+			offset = dpg_width;
 
-			width /= opp_cnt;
+			opp->funcs->opp_set_disp_pattern_generator(opp,
+				controller_test_pattern,
+				controller_color_space,
+				color_depth,
+				NULL,
+				dpg_width,
+				height,
+				0);
 
 			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
 				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
 				odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
 				odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
 					controller_test_pattern,
 					controller_color_space,
 					color_depth,
 					NULL,
-					width,
-					height);
-			}
-			opp->funcs->opp_set_disp_pattern_generator(opp,
-				controller_test_pattern,
-				controller_color_space,
-				color_depth,
-				NULL,
-				width,
-				height);
-			/* wait for dpg to blank pixel data with test pattern */
-			for (count = 0; count < 1000; count++) {
-				if (opp->funcs->dpg_is_blanked(opp))
-					break;
-				udelay(100);
+					dpg_width,
+					height,
+					offset);
+				offset += offset;
 			}
 		}
 	}
@@ -3757,11 +3800,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
 		else if (opp->funcs->opp_set_disp_pattern_generator) {
 			struct pipe_ctx *odm_pipe;
 			int opp_cnt = 1;
+			int dpg_width = width;
 
 			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
 				opp_cnt++;
 
-			width /= opp_cnt;
+			dpg_width = width / opp_cnt;
 			for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
 				struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
 
@@ -3771,16 +3815,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
 					CONTROLLER_DP_COLOR_SPACE_UDEFINED,
 					color_depth,
 					NULL,
-					width,
-					height);
+					dpg_width,
+					height,
+					0);
 			}
 			opp->funcs->opp_set_disp_pattern_generator(opp,
 				CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
 				CONTROLLER_DP_COLOR_SPACE_UDEFINED,
 				color_depth,
 				NULL,
-				width,
-				height);
+				dpg_width,
+				height,
+				0);
 		}
 	}
 	break;
@@ -3958,6 +4004,11 @@ bool dc_link_dp_set_test_pattern(
 		default:
 			break;
 		}
+
+		if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
+			pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+					pipe_ctx->stream_res.tg);
+		pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
 		/* update MSA to requested color space */
 		pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
 				&pipe_ctx->stream->timing,
@@ -3965,9 +4016,27 @@ bool dc_link_dp_set_test_pattern(
 				pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
 				link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
 
+		if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+			if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+				pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+			else
+				pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+			resource_build_info_frame(pipe_ctx);
+			link->dc->hwss.update_info_frame(pipe_ctx);
+		}
+
 		/* CRTC Patterns */
 		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
-
+		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+				CRTC_STATE_VACTIVE);
+		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+				CRTC_STATE_VBLANK);
+		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+				CRTC_STATE_VACTIVE);
+		if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
+			pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+					pipe_ctx->stream_res.tg);
 		/* Set Test Pattern state */
 		link->test_pattern_enabled = true;
 	}
@@ -4097,8 +4166,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
 	struct link_encoder *link_enc = link->link_enc;
 	uint8_t fec_config = 0;
 
-	if (link->dc->debug.disable_fec ||
-			IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+	if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
 		return;
 
 	if (link_enc->funcs->fec_set_ready &&
@@ -4133,8 +4201,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 {
 	struct link_encoder *link_enc = link->link_enc;
 
-	if (link->dc->debug.disable_fec ||
-			IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+	if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
 		return;
 
 	if (link_enc->funcs->fec_set_enable &&
@@ -4157,3 +4224,148 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 	}
 }
 
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+	const uint32_t post_oui_delay = 30; // 30ms
+
+	if (!link->dc->vendor_signature.is_valid) {
+		struct dpcd_amd_signature amd_signature;
+		amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+		amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+		amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+		amd_signature.device_id_byte1 =
+				(uint8_t)(link->ctx->asic_id.chip_id);
+		amd_signature.device_id_byte2 =
+				(uint8_t)(link->ctx->asic_id.chip_id >> 8);
+		memset(&amd_signature.zero, 0, 4);
+		amd_signature.dce_version =
+				(uint8_t)(link->ctx->dce_version);
+		amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
+		amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+
+		core_link_write_dpcd(link, DP_SOURCE_OUI,
+				(uint8_t *)(&amd_signature),
+				sizeof(amd_signature));
+
+	} else {
+		core_link_write_dpcd(link, DP_SOURCE_OUI,
+				link->dc->vendor_signature.data.raw,
+				sizeof(link->dc->vendor_signature.data.raw));
+	}
+
+	// Sink may need to configure internals based on vendor, so allow some
+	// time before proceeding with possibly vendor specific transactions
+	msleep(post_oui_delay);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+		bool isHDR,
+		uint32_t backlight_millinits,
+		uint32_t transition_time_in_ms)
+{
+	struct dpcd_source_backlight_set dpcd_backlight_set;
+	uint8_t backlight_control = isHDR ? 1 : 0;
+
+	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+			link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+		return false;
+
+	// OLEDs have no PWM, they can only use AUX
+	if (link->dpcd_sink_ext_caps.bits.oled == 1)
+		backlight_control = 1;
+
+	*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+	*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+			(uint8_t *)(&dpcd_backlight_set),
+			sizeof(dpcd_backlight_set)) != DC_OK)
+		return false;
+
+	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+			&backlight_control, 1) != DC_OK)
+		return false;
+
+	return true;
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+		uint32_t *backlight_millinits_avg,
+		uint32_t *backlight_millinits_peak)
+{
+	union dpcd_source_backlight_get dpcd_backlight_get;
+
+	memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+			link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+		return false;
+
+	if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+			dpcd_backlight_get.raw,
+			sizeof(union dpcd_source_backlight_get)))
+		return false;
+
+	*backlight_millinits_avg =
+		dpcd_backlight_get.bytes.backlight_millinits_avg;
+	*backlight_millinits_peak =
+		dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+	/* On non-supported panels dpcd_read usually succeeds with 0 returned */
+	if (*backlight_millinits_avg == 0 ||
+			*backlight_millinits_avg > *backlight_millinits_peak)
+		return false;
+
+	return true;
+}
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+	uint8_t backlight_enable = enable ? 1 : 0;
+
+	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+		link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+		return false;
+
+	if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+		&backlight_enable, 1) != DC_OK)
+		return false;
+
+	return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+	if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+		link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+		return false;
+
+	if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+		(uint8_t *) backlight_millinits,
+		sizeof(uint32_t)))
+		return false;
+
+	return true;
+}
+
+bool dc_link_set_default_brightness_aux(struct dc_link *link)
+{
+	uint32_t default_backlight;
+
+	if (link &&
+		(link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+		if (!dc_link_read_default_bl_aux(link, &default_backlight))
+			default_backlight = 150000;
+		// if < 5 nits or > 5000, it might be wrong readback
+		if (default_backlight < 5000 || default_backlight > 5000000)
+			default_backlight = 150000; //
+
+		return dc_link_set_backlight_level_nits(link, true,
+				default_backlight, 0);
+	}
+	return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index ddb8550..51e0ee6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -153,18 +153,19 @@ bool edp_receiver_ready_T9(struct dc_link *link)
 	unsigned char edpRev = 0;
 	enum dc_status result = DC_OK;
 	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-	if (edpRev < DP_EDP_12)
-		return true;
-	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
-	do {
-		sinkstatus = 1;
-		result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
-		if (sinkstatus == 0)
-			break;
-		if (result != DC_OK)
-			break;
-		udelay(100); //MAx T9
-	} while (++tries < 50);
+
+     /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+	if (result == DC_OK && edpRev >= DP_EDP_12) {
+		do {
+			sinkstatus = 1;
+			result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+			if (sinkstatus == 0)
+				break;
+			if (result != DC_OK)
+				break;
+			udelay(100); //MAx T9
+		} while (++tries < 50);
+	}
 
 	if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
 		udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
@@ -183,21 +184,22 @@ bool edp_receiver_ready_T7(struct dc_link *link)
 	unsigned long long time_taken_in_ns = 0;
 
 	result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-	if (result == DC_OK && edpRev < DP_EDP_12)
-		return true;
-	/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
-	enter_timestamp = dm_get_timestamp(link->ctx);
-	do {
-		sinkstatus = 0;
-		result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
-		if (sinkstatus == 1)
-			break;
-		if (result != DC_OK)
-			break;
-		udelay(25);
-		finish_timestamp = dm_get_timestamp(link->ctx);
-		time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
-	} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+
+	if (result == DC_OK && edpRev >= DP_EDP_12) {
+		/* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+		enter_timestamp = dm_get_timestamp(link->ctx);
+		do {
+			sinkstatus = 0;
+			result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+			if (sinkstatus == 1)
+				break;
+			if (result != DC_OK)
+				break;
+			udelay(25);
+			finish_timestamp = dm_get_timestamp(link->ctx);
+			time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+		} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+	}
 
 	if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
 		udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -429,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
 		dsc_cfg.color_depth = stream->timing.display_color_depth;
+		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
 		ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
 		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -533,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
 		dsc_cfg.color_depth = stream->timing.display_color_depth;
+		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
 
 		DC_LOG_DSC(" ");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a0eb9e5..75c7ce4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,12 +46,12 @@
 #include "dce100/dce100_resource.h"
 #include "dce110/dce110_resource.h"
 #include "dce112/dce112_resource.h"
+#include "dce120/dce120_resource.h"
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/dcn10_resource.h"
-#endif
 #include "dcn20/dcn20_resource.h"
 #include "dcn21/dcn21_resource.h"
-#include "dce120/dce120_resource.h"
+#endif
 
 #define DC_LOGGER_INIT(logger)
 
@@ -532,6 +532,51 @@ static inline void get_vp_scan_direction(
 		*flip_horz_scan_dir = !*flip_horz_scan_dir;
 }
 
+int get_num_odm_splits(struct pipe_ctx *pipe)
+{
+	int odm_split_count = 0;
+	struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+	while (next_pipe) {
+		odm_split_count++;
+		next_pipe = next_pipe->next_odm_pipe;
+	}
+	pipe = pipe->prev_odm_pipe;
+	while (pipe) {
+		odm_split_count++;
+		pipe = pipe->prev_odm_pipe;
+	}
+	return odm_split_count;
+}
+
+static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+{
+	*split_count = get_num_odm_splits(pipe_ctx);
+	*split_idx = 0;
+	if (*split_count == 0) {
+		/*Check for mpc split*/
+		struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+
+		while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+			(*split_idx)++;
+			(*split_count)++;
+			split_pipe = split_pipe->top_pipe;
+		}
+		split_pipe = pipe_ctx->bottom_pipe;
+		while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+			(*split_count)++;
+			split_pipe = split_pipe->bottom_pipe;
+		}
+	} else {
+		/*Get odm split index*/
+		struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+
+		while (split_pipe) {
+			(*split_idx)++;
+			split_pipe = split_pipe->prev_odm_pipe;
+		}
+	}
+}
+
 static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 	struct rect clip, dest;
 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
 			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
-	bool pri_split = pipe_ctx->bottom_pipe &&
-			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
-	bool sec_split = pipe_ctx->top_pipe &&
-			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+	int split_count = 0;
+	int split_idx = 0;
 	bool orthogonal_rotation, flip_y_start, flip_x_start;
 
+	calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
 	if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
 		stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-		pri_split = false;
-		sec_split = false;
+		split_count = 0;
+		split_idx = 0;
 	}
 
 	/* The actual clip is an intersection between stream
@@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
 	data->viewport.height = clip.height * surf_src.height / dest.height;
 
 	/* Handle split */
-	if (pri_split || sec_split) {
+	if (split_count) {
+		/* extra pixels in the division remainder need to go to pipes after
+		 * the extra pixel index minus one(epimo) defined here as:
+		 */
+		int epimo = 0;
+
 		if (orthogonal_rotation) {
-			if (flip_y_start != pri_split)
-				data->viewport.height /= 2;
-			else {
-				data->viewport.y +=  data->viewport.height / 2;
-				/* Ceil offset pipe */
-				data->viewport.height = (data->viewport.height + 1) / 2;
-			}
+			if (flip_y_start)
+				split_idx = split_count - split_idx;
+
+			epimo = split_count - data->viewport.height % (split_count + 1);
+
+			data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
+			if (split_idx > epimo)
+				data->viewport.y += split_idx - epimo - 1;
+			data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
 		} else {
-			if (flip_x_start != pri_split)
-				data->viewport.width /= 2;
-			else {
-				data->viewport.x +=  data->viewport.width / 2;
-				/* Ceil offset pipe */
-				data->viewport.width = (data->viewport.width + 1) / 2;
-			}
+			if (flip_x_start)
+				split_idx = split_count - split_idx;
+
+			epimo = split_count - data->viewport.width % (split_count + 1);
+
+			data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
+			if (split_idx > epimo)
+				data->viewport.x += split_idx - epimo - 1;
+			data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
 		}
 	}
 
@@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	const struct dc_stream_state *stream = pipe_ctx->stream;
+	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
 	struct rect surf_clip = plane_state->clip_rect;
-	bool pri_split = pipe_ctx->bottom_pipe &&
-			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
-	bool sec_split = pipe_ctx->top_pipe &&
-			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
-	bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+	bool pri_split_tb = pipe_ctx->bottom_pipe &&
+			pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
+			stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+	bool sec_split_tb = pipe_ctx->top_pipe &&
+			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
+			stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+	int split_count = 0;
+	int split_idx = 0;
 
-	pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+	calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
+	data->recout.x = stream->dst.x;
 	if (stream->src.x < surf_clip.x)
-		pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
-			- stream->src.x) * stream->dst.width
+		data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
 						/ stream->src.width;
 
-	pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
-			stream->dst.width / stream->src.width;
-	if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
-			stream->dst.x + stream->dst.width)
-		pipe_ctx->plane_res.scl_data.recout.width =
-			stream->dst.x + stream->dst.width
-						- pipe_ctx->plane_res.scl_data.recout.x;
+	data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
+	if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
+		data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
 
-	pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+	data->recout.y = stream->dst.y;
 	if (stream->src.y < surf_clip.y)
-		pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
-			- stream->src.y) * stream->dst.height
+		data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
 						/ stream->src.height;
 
-	pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
-			stream->dst.height / stream->src.height;
-	if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
-			stream->dst.y + stream->dst.height)
-		pipe_ctx->plane_res.scl_data.recout.height =
-			stream->dst.y + stream->dst.height
-						- pipe_ctx->plane_res.scl_data.recout.y;
+	data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
+	if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
+		data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
 
 	/* Handle h & v split, handle rotation using viewport */
-	if (sec_split && top_bottom_split) {
-		pipe_ctx->plane_res.scl_data.recout.y +=
-				pipe_ctx->plane_res.scl_data.recout.height / 2;
+	if (sec_split_tb) {
+		data->recout.y += data->recout.height / 2;
 		/* Floor primary pipe, ceil 2ndary pipe */
-		pipe_ctx->plane_res.scl_data.recout.height =
-				(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
-	} else if (pri_split && top_bottom_split)
-		pipe_ctx->plane_res.scl_data.recout.height /= 2;
-	else if (sec_split) {
-		pipe_ctx->plane_res.scl_data.recout.x +=
-				pipe_ctx->plane_res.scl_data.recout.width / 2;
-		/* Ceil offset pipe */
-		pipe_ctx->plane_res.scl_data.recout.width =
-				(pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
-	} else if (pri_split)
-		pipe_ctx->plane_res.scl_data.recout.width /= 2;
+		data->recout.height = (data->recout.height + 1) / 2;
+	} else if (pri_split_tb)
+		data->recout.height /= 2;
+	else if (split_count) {
+		/* extra pixels in the division remainder need to go to pipes after
+		 * the extra pixel index minus one(epimo) defined here as:
+		 */
+		int epimo = split_count - data->recout.width % (split_count + 1);
+
+		/*no recout offset due to odm */
+		if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+			data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
+			if (split_idx > epimo)
+				data->recout.x += split_idx - epimo - 1;
+		}
+		data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+	}
 }
 
 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -832,12 +886,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
 {
 	const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	const struct dc_stream_state *stream = pipe_ctx->stream;
+	struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
 	struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
 	struct rect src = pipe_ctx->plane_state->src_rect;
 	int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
 	int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
 			|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
 	bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
+	int odm_idx = 0;
 
 	/*
 	 * Need to calculate the scan direction for viewport to make adjustments
@@ -869,6 +925,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
 					* stream->dst.width / stream->src.width -
 					src.x * plane_state->dst_rect.width / src.width
 					* stream->dst.width / stream->src.width);
+	/*modified recout_skip_h calculation due to odm having no recout offset*/
+	while (odm_pipe) {
+		odm_idx++;
+		odm_pipe = odm_pipe->prev_odm_pipe;
+	}
+	if (odm_idx)
+		recout_skip_h += odm_idx * data->recout.width;
+
 	recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
 					* stream->dst.height / stream->src.height -
 					src.y * plane_state->dst_rect.height / src.height
@@ -1021,6 +1085,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 		store_h_border_left + timing->h_border_right;
 	pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
 		timing->v_border_top + timing->v_border_bottom;
+	if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
+		pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
 
 	/* Taps calculations */
 	if (pipe_ctx->plane_res.xfm != NULL)
@@ -2034,7 +2100,7 @@ enum dc_status resource_map_pool_resources(
 	for (i = 0; i < context->stream_count; i++)
 		if (context->streams[i] == stream) {
 			context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
-			context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+			context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
 			context->stream_status[i].audio_inst =
 				pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
 
@@ -2108,10 +2174,10 @@ enum dc_status dc_validate_global_state(
 			if (pipe_ctx->stream != stream)
 				continue;
 
-			if (dc->res_pool->funcs->get_default_swizzle_mode &&
+			if (dc->res_pool->funcs->patch_unknown_plane_state &&
 					pipe_ctx->plane_state &&
 					pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
-				result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+				result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
 				if (result != DC_OK)
 					return result;
 			}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index a96d8de..64cf24a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
 		 */
 		memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
 		dc->vm_pa_config.valid = true;
+
+		if (pa_config->is_hvm_enabled == 0)
+			dc->debug.nv12_iflip_vm_wa = false;
 	}
 
 	return num_vmids;
@@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc)
 	int i;
 	int in_use = 0;
 
-	for (i = 0; i < dc->vm_helper->num_vmid; i++)
+	for (i = 0; i < MAX_HUBP; i++)
 		in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
 			| dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
 	return in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8ff25b5..d3ceb39 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.69"
+#define DC_VER "3.2.76"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -126,6 +126,7 @@ struct dc_bug_wa {
 	bool no_connect_phy_config;
 	bool dedcn20_305_wa;
 	bool skip_clock_update;
+	bool lt_early_cr_pattern;
 };
 
 struct dc_dcc_surface_param {
@@ -229,6 +230,7 @@ struct dc_config {
 	bool forced_clocks;
 	bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
 	bool multi_mon_pp_mclk_switch;
+	bool psr_on_dmub;
 };
 
 enum visual_confirm {
@@ -388,6 +390,7 @@ struct dc_debug_options {
 	int always_scale;
 	bool disable_pplib_clock_request;
 	bool disable_clock_gate;
+	bool disable_mem_low_power;
 	bool disable_dmcu;
 	bool disable_psr;
 	bool force_abm_enable;
@@ -453,6 +456,7 @@ struct dc_phy_addr_space_config {
 	} gart_config;
 
 	bool valid;
+	bool is_hvm_enabled;
 	uint64_t page_table_default_page_addr;
 };
 
@@ -518,6 +522,7 @@ struct dc {
 
 	/* Require to optimize clocks and bandwidth for added/removed planes */
 	bool optimized_required;
+	bool wm_optimized_required;
 
 	/* Require to maintain clocks and bandwidth for UEFI enabled HW */
 	int optimize_seamless_boot_streams;
@@ -526,6 +531,7 @@ struct dc {
 	struct compressor *fbc_compressor;
 
 	struct dc_debug_data debug_data;
+	struct dpcd_vendor_signature vendor_signature;
 
 	const char *build_id;
 	struct vm_helper *vm_helper;
@@ -565,12 +571,14 @@ struct dc_init_data {
 	struct dc_reg_helper_state *dmub_offload;
 
 	struct dc_config flags;
-	uint32_t log_mask;
+	uint64_t log_mask;
+
 	/**
 	 * gpu_info FW provided soc bounding box struct or 0 if not
 	 * available in FW
 	 */
 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+	struct dpcd_vendor_signature vendor_signature;
 };
 
 struct dc_callback_init {
@@ -682,7 +690,6 @@ struct dc_3dlut {
 	struct kref refcount;
 	struct tetrahedral_params lut_3d;
 	struct fixed31_32 hdr_multiplier;
-	bool initialized; /*remove after diag fix*/
 	union dc_3dlut_state state;
 	struct dc_context *ctx;
 };
@@ -865,6 +872,7 @@ struct dc_flip_addrs {
 	unsigned int flip_timestamp_in_us;
 	bool flip_immediate;
 	/* TODO: add flip duration for FreeSync */
+	bool triplebuffer_flips;
 };
 
 bool dc_post_update_surfaces_to_stream(
@@ -979,6 +987,20 @@ struct dpcd_caps {
 
 };
 
+union dpcd_sink_ext_caps {
+	struct {
+		/* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+		 * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+		 */
+		uint8_t sdr_aux_backlight_control : 1;
+		uint8_t hdr_aux_backlight_control : 1;
+		uint8_t reserved_1 : 2;
+		uint8_t oled : 1;
+		uint8_t reserved : 3;
+	} bits;
+	uint8_t raw;
+};
+
 #include "dc_link.h"
 
 /*******************************************************************************
@@ -1004,6 +1026,11 @@ struct dc_sink_dsc_caps {
 	struct dsc_dec_dpcd_caps dsc_dec_caps;
 };
 
+struct dc_sink_fec_caps {
+	bool is_rx_fec_supported;
+	bool is_topology_fec_supported;
+};
+
 /*
  * The sink structure contains EDID and other display device properties
  */
@@ -1017,7 +1044,10 @@ struct dc_sink {
 	struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
 	bool converter_disable_audio;
 
-	struct dc_sink_dsc_caps sink_dsc_caps;
+	struct dc_sink_dsc_caps dsc_caps;
+	struct dc_sink_fec_caps fec_caps;
+
+	bool is_vsc_sdp_colorimetry_supported;
 
 	/* private to DC core */
 	struct dc_link *link;
@@ -1075,7 +1105,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
 unsigned int dc_get_target_backlight_pwm(struct dc *dc);
 
 bool dc_is_dmcu_initialized(struct dc *dc);
-bool dc_is_hw_initialized(struct dc *dc);
 
 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index dfe4472..bb2730e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision {
 	uint8_t ieee_fw_rev[2];
 };
 
+struct dpcd_vendor_signature {
+	bool is_valid;
+
+	union dpcd_ieee_vendor_signature {
+		struct {
+			uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+			uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+			uint8_t ieee_hw_rev;
+			uint8_t ieee_fw_rev[2];
+		};
+		uint8_t raw[12];
+	} data;
+};
+
+struct dpcd_amd_signature {
+	uint8_t AMD_IEEE_TxSignature_byte1;
+	uint8_t AMD_IEEE_TxSignature_byte2;
+	uint8_t AMD_IEEE_TxSignature_byte3;
+	uint8_t device_id_byte1;
+	uint8_t device_id_byte2;
+	uint8_t zero[4];
+	uint8_t dce_version;
+	uint8_t dal_version_byte1;
+	uint8_t dal_version_byte2;
+};
+
+struct dpcd_source_backlight_set {
+	struct  {
+		uint8_t byte0;
+		uint8_t byte1;
+		uint8_t byte2;
+		uint8_t byte3;
+	} backlight_level_millinits;
+
+	struct  {
+		uint8_t byte0;
+		uint8_t byte1;
+	} backlight_transition_time_ms;
+};
+
+union dpcd_source_backlight_get {
+	struct {
+		uint32_t backlight_millinits_peak; /* 326h */
+		uint32_t backlight_millinits_avg; /* 32Ah */
+	} bytes;
+	uint8_t raw[8];
+};
+
 /*DPCD register of DP receiver capability field bits-*/
 union edp_configuration_cap {
 	struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d256031..00ff5e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -26,6 +26,7 @@
 #ifndef DC_LINK_H_
 #define DC_LINK_H_
 
+#include "dc.h"
 #include "dc_types.h"
 #include "grph_object_defs.h"
 
@@ -128,6 +129,7 @@ struct dc_link {
 	enum edp_revision edp_revision;
 	bool psr_feature_enabled;
 	bool psr_allow_active;
+	union dpcd_sink_ext_caps dpcd_sink_ext_caps;
 
 	/* MST record stream using this link */
 	struct link_flags {
@@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link,
 		uint32_t backlight_pwm_u16_16,
 		uint32_t frame_ramp);
 
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+		bool isHDR,
+		uint32_t backlight_millinits,
+		uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+		uint32_t *backlight_millinits,
+		uint32_t *backlight_millinits_peak);
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
+
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
+bool dc_link_set_default_brightness_aux(struct dc_link *link);
+
 int dc_link_get_backlight_level(const struct dc_link *dc_link);
 
 bool dc_link_set_abm_disable(const struct dc_link *dc_link);
@@ -316,4 +333,7 @@ bool dc_submit_i2c_oem(
 
 uint32_t dc_bandwidth_in_kbps_from_timing(
 	const struct dc_crtc_timing *timing);
+
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
 #endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 92096de..a5c7ef4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -118,6 +118,7 @@ union stream_update_flags {
 		uint32_t dpms_off:1;
 		uint32_t gamut_remap:1;
 		uint32_t wb_update:1;
+		uint32_t dsc_changed : 1;
 	} bits;
 
 	uint32_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e59532d..0d21010 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -229,7 +229,9 @@ struct dc_panel_patch {
 	unsigned int extra_t12_ms;
 	unsigned int extra_delay_backlight_off;
 	unsigned int extra_t7_ms;
-	unsigned int manage_secondary_link;
+	unsigned int skip_scdc_overwrite;
+	unsigned int delay_ignore_msa;
+	unsigned int disable_fec;
 };
 
 struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fdf3d8f..fbfcff7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
 dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 68c4049..743042d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 			case AUX_TRANSACTION_REPLY_AUX_DEFER:
 			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
 				retry_on_defer = true;
-				/* fall through */
+				fallthrough;
 			case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
 				if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
 					goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 30d953a..f0cebe7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
 	struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
 	const struct dc_config *config = &dmcu->ctx->dc->config;
 	bool status = false;
+	struct dc_context *ctx = dmcu->ctx;
+	unsigned int i;
+	//  5 4 3 2 1 0
+	//  F E D C B A - bit 0 is A, bit 5 is F
+	unsigned int tx_interrupt_mask = 0;
 
 	PERF_TRACE();
 	/*  Definition of DC_DMCU_SCRATCH
@@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
 	 */
 	dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
 
+	for (i = 0; i < ctx->dc->link_count; i++) {
+		if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) {
+			if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A &&
+					ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) {
+				tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter;
+			}
+		}
+	}
+
 	switch (dmcu->dmcu_state) {
 	case DMCU_UNLOADED:
 		status = false;
@@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
 		/* Set backlight ramping stepsize */
 		REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
 
+		REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask);
+
 		/* Set command to initialize microcontroller */
 		REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
 			MCP_INIT_DMCU);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 066188b..24adec4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -267,6 +267,9 @@ static void set_speed(
 	uint32_t xtal_ref_div = 0;
 	uint32_t prescale = 0;
 
+	if (speed == 0)
+		return;
+
 	REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
 
 	if (xtal_ref_div == 0)
@@ -274,17 +277,15 @@ static void set_speed(
 
 	prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
 
-	if (speed) {
-		if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
-			REG_UPDATE_N(SPEED, 3,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
-		else
-			REG_UPDATE_N(SPEED, 2,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
-				     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
-	}
+	if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+		REG_UPDATE_N(SPEED, 3,
+			     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+			     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+			     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+	else
+		REG_UPDATE_N(SPEED, 2,
+			     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+			     FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
 }
 
 static bool setup_engine(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 8aa937f..51481e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
 		sign = 1;
 		floating = 1;
-		/* fall through */
+		fallthrough;
 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
 		grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
index 48862be..7311f31 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -22,1004 +22,1330 @@
  * Authors: AMD
  *
  */
-
 #include "transform.h"
 
+//=========================================
+// <num_taps>    = 2
+// <num_phases>  = 16
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = s1.10
+// <CoefOut>     = s1.12
+//=========================================
 static const uint16_t filter_2tap_16p[18] = {
-	4096, 0,
-	3840, 256,
-	3584, 512,
-	3328, 768,
-	3072, 1024,
-	2816, 1280,
-	2560, 1536,
-	2304, 1792,
-	2048, 2048
+		0x1000, 0x0000,
+		0x0FF0, 0x0010,
+		0x0FB0, 0x0050,
+		0x0F34, 0x00CC,
+		0x0E68, 0x0198,
+		0x0D44, 0x02BC,
+		0x0BC4, 0x043C,
+		0x09FC, 0x0604,
+		0x0800, 0x0800
 };
 
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_3tap_16p_upscale[27] = {
-	2048, 2048, 0,
-	1708, 2424, 16348,
-	1372, 2796, 16308,
-	1056, 3148, 16272,
-	768, 3464, 16244,
-	512, 3728, 16236,
-	296, 3928, 16252,
-	124, 4052, 16296,
-	0, 4096, 0
+		0x0804, 0x07FC, 0x0000,
+		0x06AC, 0x0978, 0x3FDC,
+		0x055C, 0x0AF0, 0x3FB4,
+		0x0420, 0x0C50, 0x3F90,
+		0x0300, 0x0D88, 0x3F78,
+		0x0200, 0x0E90, 0x3F70,
+		0x0128, 0x0F5C, 0x3F7C,
+		0x007C, 0x0FD8, 0x3FAC,
+		0x0000, 0x1000, 0x0000
 };
 
-static const uint16_t filter_3tap_16p_117[27] = {
-	2048, 2048, 0,
-	1824, 2276, 16376,
-	1600, 2496, 16380,
-	1376, 2700, 16,
-	1156, 2880, 52,
-	948, 3032, 108,
-	756, 3144, 192,
-	580, 3212, 296,
-	428, 3236, 428
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_116[27] = {
+		0x0804, 0x07FC, 0x0000,
+		0x0700, 0x0914, 0x3FEC,
+		0x0604, 0x0A1C, 0x3FE0,
+		0x050C, 0x0B14, 0x3FE0,
+		0x041C, 0x0BF4, 0x3FF0,
+		0x0340, 0x0CB0, 0x0010,
+		0x0274, 0x0D3C, 0x0050,
+		0x01C0, 0x0D94, 0x00AC,
+		0x0128, 0x0DB4, 0x0124
 };
 
-static const uint16_t filter_3tap_16p_150[27] = {
-	2048, 2048, 0,
-	1872, 2184, 36,
-	1692, 2308, 88,
-	1516, 2420, 156,
-	1340, 2516, 236,
-	1168, 2592, 328,
-	1004, 2648, 440,
-	844, 2684, 560,
-	696, 2696, 696
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_149[27] = {
+		0x0804, 0x07FC, 0x0000,
+		0x0730, 0x08CC, 0x0004,
+		0x0660, 0x098C, 0x0014,
+		0x0590, 0x0A3C, 0x0034,
+		0x04C4, 0x0AD4, 0x0068,
+		0x0400, 0x0B54, 0x00AC,
+		0x0348, 0x0BB0, 0x0108,
+		0x029C, 0x0BEC, 0x0178,
+		0x0200, 0x0C00, 0x0200
 };
 
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_3tap_16p_183[27] = {
-	2048, 2048, 0,
-	1892, 2104, 92,
-	1744, 2152, 196,
-	1592, 2196, 300,
-	1448, 2232, 412,
-	1304, 2256, 528,
-	1168, 2276, 648,
-	1032, 2288, 772,
-	900, 2292, 900
+		0x0804, 0x07FC, 0x0000,
+		0x0754, 0x0880, 0x002C,
+		0x06A8, 0x08F0, 0x0068,
+		0x05FC, 0x0954, 0x00B0,
+		0x0550, 0x09AC, 0x0104,
+		0x04A8, 0x09F0, 0x0168,
+		0x0408, 0x0A20, 0x01D8,
+		0x036C, 0x0A40, 0x0254,
+		0x02DC, 0x0A48, 0x02DC
 };
 
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_4tap_16p_upscale[36] = {
-	0, 4096, 0, 0,
-	16240, 4056, 180, 16380,
-	16136, 3952, 404, 16364,
-	16072, 3780, 664, 16344,
-	16040, 3556, 952, 16312,
-	16036, 3284, 1268, 16272,
-	16052, 2980, 1604, 16224,
-	16084, 2648, 1952, 16176,
-	16128, 2304, 2304, 16128
+		0x0000, 0x1000, 0x0000, 0x0000,
+		0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+		0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+		0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+		0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+		0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+		0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+		0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+		0x3F00, 0x08FC, 0x0900, 0x3F04
 };
 
-static const uint16_t filter_4tap_16p_117[36] = {
-	428, 3236, 428, 0,
-	276, 3232, 604, 16364,
-	148, 3184, 800, 16340,
-	44, 3104, 1016, 16312,
-	16344, 2984, 1244, 16284,
-	16284, 2832, 1488, 16256,
-	16244, 2648, 1732, 16236,
-	16220, 2440, 1976, 16220,
-	16212, 2216, 2216, 16212
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_116[36] = {
+		0x01A8, 0x0CB4, 0x01A4, 0x0000,
+		0x0110, 0x0CB0, 0x0254, 0x3FEC,
+		0x0090, 0x0C80, 0x031C, 0x3FD4,
+		0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+		0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+		0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+		0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+		0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+		0x3F54, 0x08AC, 0x08AC, 0x3F54
 };
 
-static const uint16_t filter_4tap_16p_150[36] = {
-	696, 2700, 696, 0,
-	560, 2700, 848, 16364,
-	436, 2676, 1008, 16348,
-	328, 2628, 1180, 16336,
-	232, 2556, 1356, 16328,
-	152, 2460, 1536, 16328,
-	84, 2344, 1716, 16332,
-	28, 2208, 1888, 16348,
-	16376, 2052, 2052, 16376
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_149[36] = {
+		0x02B8, 0x0A90, 0x02B8, 0x0000,
+		0x0230, 0x0A90, 0x0350, 0x3FF0,
+		0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+		0x0148, 0x0A48, 0x049C, 0x3FD4,
+		0x00E8, 0x0A00, 0x054C, 0x3FCC,
+		0x0098, 0x09A0, 0x0600, 0x3FC8,
+		0x0054, 0x0928, 0x06B4, 0x3FD0,
+		0x001C, 0x08A4, 0x0760, 0x3FE0,
+		0x3FFC, 0x0804, 0x0804, 0x3FFC
 };
 
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_4tap_16p_183[36] = {
-	940, 2208, 940, 0,
-	832, 2200, 1052, 4,
-	728, 2180, 1164, 16,
-	628, 2148, 1280, 36,
-	536, 2100, 1392, 60,
-	448, 2044, 1504, 92,
-	368, 1976, 1612, 132,
-	296, 1900, 1716, 176,
-	232, 1812, 1812, 232
+		0x03B0, 0x08A0, 0x03B0, 0x0000,
+		0x0348, 0x0898, 0x041C, 0x0004,
+		0x02DC, 0x0884, 0x0490, 0x0010,
+		0x0278, 0x0864, 0x0500, 0x0024,
+		0x021C, 0x0838, 0x0570, 0x003C,
+		0x01C8, 0x07FC, 0x05E0, 0x005C,
+		0x0178, 0x07B8, 0x064C, 0x0084,
+		0x0130, 0x076C, 0x06B0, 0x00B4,
+		0x00F0, 0x0714, 0x0710, 0x00EC
 };
 
+//=========================================
+// <num_taps>    = 2
+// <num_phases>  = 64
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness>   = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
 static const uint16_t filter_2tap_64p[66] = {
-	4096, 0,
-	4032, 64,
-	3968, 128,
-	3904, 192,
-	3840, 256,
-	3776, 320,
-	3712, 384,
-	3648, 448,
-	3584, 512,
-	3520, 576,
-	3456, 640,
-	3392, 704,
-	3328, 768,
-	3264, 832,
-	3200, 896,
-	3136, 960,
-	3072, 1024,
-	3008, 1088,
-	2944, 1152,
-	2880, 1216,
-	2816, 1280,
-	2752, 1344,
-	2688, 1408,
-	2624, 1472,
-	2560, 1536,
-	2496, 1600,
-	2432, 1664,
-	2368, 1728,
-	2304, 1792,
-	2240, 1856,
-	2176, 1920,
-	2112, 1984,
-	2048, 2048 };
+		0x1000, 0x0000,
+		0x1000, 0x0000,
+		0x0FFC, 0x0004,
+		0x0FF8, 0x0008,
+		0x0FF0, 0x0010,
+		0x0FE4, 0x001C,
+		0x0FD8, 0x0028,
+		0x0FC4, 0x003C,
+		0x0FB0, 0x0050,
+		0x0F98, 0x0068,
+		0x0F7C, 0x0084,
+		0x0F58, 0x00A8,
+		0x0F34, 0x00CC,
+		0x0F08, 0x00F8,
+		0x0ED8, 0x0128,
+		0x0EA4, 0x015C,
+		0x0E68, 0x0198,
+		0x0E28, 0x01D8,
+		0x0DE4, 0x021C,
+		0x0D98, 0x0268,
+		0x0D44, 0x02BC,
+		0x0CEC, 0x0314,
+		0x0C90, 0x0370,
+		0x0C2C, 0x03D4,
+		0x0BC4, 0x043C,
+		0x0B58, 0x04A8,
+		0x0AE8, 0x0518,
+		0x0A74, 0x058C,
+		0x09FC, 0x0604,
+		0x0980, 0x0680,
+		0x0900, 0x0700,
+		0x0880, 0x0780,
+		0x0800, 0x0800
+};
 
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_3tap_64p_upscale[99] = {
-	2048, 2048, 0,
-	1960, 2140, 16376,
-	1876, 2236, 16364,
-	1792, 2328, 16356,
-	1708, 2424, 16348,
-	1620, 2516, 16336,
-	1540, 2612, 16328,
-	1456, 2704, 16316,
-	1372, 2796, 16308,
-	1292, 2884, 16296,
-	1212, 2976, 16288,
-	1136, 3060, 16280,
-	1056, 3148, 16272,
-	984, 3228, 16264,
-	908, 3312, 16256,
-	836, 3388, 16248,
-	768, 3464, 16244,
-	700, 3536, 16240,
-	636, 3604, 16236,
-	572, 3668, 16236,
-	512, 3728, 16236,
-	456, 3784, 16236,
-	400, 3836, 16240,
-	348, 3884, 16244,
-	296, 3928, 16252,
-	252, 3964, 16260,
-	204, 4000, 16268,
-	164, 4028, 16284,
-	124, 4052, 16296,
-	88, 4072, 16316,
-	56, 4084, 16336,
-	24, 4092, 16356,
-	0, 4096, 0
+		0x0804, 0x07FC, 0x0000,
+		0x07A8, 0x0860, 0x3FF8,
+		0x0754, 0x08BC, 0x3FF0,
+		0x0700, 0x0918, 0x3FE8,
+		0x06AC, 0x0978, 0x3FDC,
+		0x0654, 0x09D8, 0x3FD4,
+		0x0604, 0x0A34, 0x3FC8,
+		0x05B0, 0x0A90, 0x3FC0,
+		0x055C, 0x0AF0, 0x3FB4,
+		0x050C, 0x0B48, 0x3FAC,
+		0x04BC, 0x0BA0, 0x3FA4,
+		0x0470, 0x0BF4, 0x3F9C,
+		0x0420, 0x0C50, 0x3F90,
+		0x03D8, 0x0C9C, 0x3F8C,
+		0x038C, 0x0CF0, 0x3F84,
+		0x0344, 0x0D40, 0x3F7C,
+		0x0300, 0x0D88, 0x3F78,
+		0x02BC, 0x0DD0, 0x3F74,
+		0x027C, 0x0E14, 0x3F70,
+		0x023C, 0x0E54, 0x3F70,
+		0x0200, 0x0E90, 0x3F70,
+		0x01C8, 0x0EC8, 0x3F70,
+		0x0190, 0x0EFC, 0x3F74,
+		0x015C, 0x0F2C, 0x3F78,
+		0x0128, 0x0F5C, 0x3F7C,
+		0x00FC, 0x0F7C, 0x3F88,
+		0x00CC, 0x0FA4, 0x3F90,
+		0x00A4, 0x0FC0, 0x3F9C,
+		0x007C, 0x0FD8, 0x3FAC,
+		0x0058, 0x0FE8, 0x3FC0,
+		0x0038, 0x0FF4, 0x3FD4,
+		0x0018, 0x1000, 0x3FE8,
+		0x0000, 0x1000, 0x0000
 };
 
-static const uint16_t filter_3tap_64p_117[99] = {
-	2048, 2048, 0,
-	1992, 2104, 16380,
-	1936, 2160, 16380,
-	1880, 2220, 16376,
-	1824, 2276, 16376,
-	1768, 2332, 16376,
-	1712, 2388, 16376,
-	1656, 2444, 16376,
-	1600, 2496, 16380,
-	1544, 2548, 0,
-	1488, 2600, 4,
-	1432, 2652, 8,
-	1376, 2700, 16,
-	1320, 2748, 20,
-	1264, 2796, 32,
-	1212, 2840, 40,
-	1156, 2880, 52,
-	1104, 2920, 64,
-	1052, 2960, 80,
-	1000, 2996, 92,
-	948, 3032, 108,
-	900, 3060, 128,
-	852, 3092, 148,
-	804, 3120, 168,
-	756, 3144, 192,
-	712, 3164, 216,
-	668, 3184, 240,
-	624, 3200, 268,
-	580, 3212, 296,
-	540, 3220, 328,
-	500, 3228, 360,
-	464, 3232, 392,
-	428, 3236, 428
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_116[99] = {
+		0x0804, 0x07FC, 0x0000,
+		0x07C0, 0x0844, 0x3FFC,
+		0x0780, 0x0888, 0x3FF8,
+		0x0740, 0x08D0, 0x3FF0,
+		0x0700, 0x0914, 0x3FEC,
+		0x06C0, 0x0958, 0x3FE8,
+		0x0684, 0x0998, 0x3FE4,
+		0x0644, 0x09DC, 0x3FE0,
+		0x0604, 0x0A1C, 0x3FE0,
+		0x05C4, 0x0A5C, 0x3FE0,
+		0x0588, 0x0A9C, 0x3FDC,
+		0x0548, 0x0ADC, 0x3FDC,
+		0x050C, 0x0B14, 0x3FE0,
+		0x04CC, 0x0B54, 0x3FE0,
+		0x0490, 0x0B8C, 0x3FE4,
+		0x0458, 0x0BC0, 0x3FE8,
+		0x041C, 0x0BF4, 0x3FF0,
+		0x03E0, 0x0C28, 0x3FF8,
+		0x03A8, 0x0C58, 0x0000,
+		0x0374, 0x0C88, 0x0004,
+		0x0340, 0x0CB0, 0x0010,
+		0x0308, 0x0CD8, 0x0020,
+		0x02D8, 0x0CFC, 0x002C,
+		0x02A0, 0x0D20, 0x0040,
+		0x0274, 0x0D3C, 0x0050,
+		0x0244, 0x0D58, 0x0064,
+		0x0214, 0x0D70, 0x007C,
+		0x01E8, 0x0D84, 0x0094,
+		0x01C0, 0x0D94, 0x00AC,
+		0x0198, 0x0DA0, 0x00C8,
+		0x0170, 0x0DAC, 0x00E4,
+		0x014C, 0x0DB0, 0x0104,
+		0x0128, 0x0DB4, 0x0124
 };
 
-static const uint16_t filter_3tap_64p_150[99] = {
-	2048, 2048, 0,
-	2004, 2080, 8,
-	1960, 2116, 16,
-	1916, 2148, 28,
-	1872, 2184, 36,
-	1824, 2216, 48,
-	1780, 2248, 60,
-	1736, 2280, 76,
-	1692, 2308, 88,
-	1648, 2336, 104,
-	1604, 2368, 120,
-	1560, 2392, 136,
-	1516, 2420, 156,
-	1472, 2444, 172,
-	1428, 2472, 192,
-	1384, 2492, 212,
-	1340, 2516, 236,
-	1296, 2536, 256,
-	1252, 2556, 280,
-	1212, 2576, 304,
-	1168, 2592, 328,
-	1124, 2608, 356,
-	1084, 2624, 384,
-	1044, 2636, 412,
-	1004, 2648, 440,
-	964, 2660, 468,
-	924, 2668, 500,
-	884, 2676, 528,
-	844, 2684, 560,
-	808, 2688, 596,
-	768, 2692, 628,
-	732, 2696, 664,
-	696, 2696, 696
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_149[99] = {
+		0x0804, 0x07FC, 0x0000,
+		0x07CC, 0x0834, 0x0000,
+		0x0798, 0x0868, 0x0000,
+		0x0764, 0x089C, 0x0000,
+		0x0730, 0x08CC, 0x0004,
+		0x0700, 0x08FC, 0x0004,
+		0x06CC, 0x092C, 0x0008,
+		0x0698, 0x095C, 0x000C,
+		0x0660, 0x098C, 0x0014,
+		0x062C, 0x09B8, 0x001C,
+		0x05FC, 0x09E4, 0x0020,
+		0x05C4, 0x0A10, 0x002C,
+		0x0590, 0x0A3C, 0x0034,
+		0x055C, 0x0A64, 0x0040,
+		0x0528, 0x0A8C, 0x004C,
+		0x04F8, 0x0AB0, 0x0058,
+		0x04C4, 0x0AD4, 0x0068,
+		0x0490, 0x0AF8, 0x0078,
+		0x0460, 0x0B18, 0x0088,
+		0x0430, 0x0B38, 0x0098,
+		0x0400, 0x0B54, 0x00AC,
+		0x03D0, 0x0B6C, 0x00C4,
+		0x03A0, 0x0B88, 0x00D8,
+		0x0374, 0x0B9C, 0x00F0,
+		0x0348, 0x0BB0, 0x0108,
+		0x0318, 0x0BC4, 0x0124,
+		0x02EC, 0x0BD4, 0x0140,
+		0x02C4, 0x0BE0, 0x015C,
+		0x029C, 0x0BEC, 0x0178,
+		0x0274, 0x0BF4, 0x0198,
+		0x024C, 0x0BFC, 0x01B8,
+		0x0228, 0x0BFC, 0x01DC,
+		0x0200, 0x0C00, 0x0200
 };
 
+//=========================================
+// <num_taps>    = 3
+// <num_phases>  = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_3tap_64p_183[99] = {
-	2048, 2048, 0,
-	2008, 2060, 20,
-	1968, 2076, 44,
-	1932, 2088, 68,
-	1892, 2104, 92,
-	1856, 2116, 120,
-	1816, 2128, 144,
-	1780, 2140, 168,
-	1744, 2152, 196,
-	1704, 2164, 220,
-	1668, 2176, 248,
-	1632, 2188, 272,
-	1592, 2196, 300,
-	1556, 2204, 328,
-	1520, 2216, 356,
-	1484, 2224, 384,
-	1448, 2232, 412,
-	1412, 2240, 440,
-	1376, 2244, 468,
-	1340, 2252, 496,
-	1304, 2256, 528,
-	1272, 2264, 556,
-	1236, 2268, 584,
-	1200, 2272, 616,
-	1168, 2276, 648,
-	1132, 2280, 676,
-	1100, 2284, 708,
-	1064, 2288, 740,
-	1032, 2288, 772,
-	996, 2292, 800,
-	964, 2292, 832,
-	932, 2292, 868,
-	900, 2292, 900
+		0x0804, 0x07FC, 0x0000,
+		0x07D4, 0x0824, 0x0008,
+		0x07AC, 0x0840, 0x0014,
+		0x0780, 0x0860, 0x0020,
+		0x0754, 0x0880, 0x002C,
+		0x0728, 0x089C, 0x003C,
+		0x0700, 0x08B8, 0x0048,
+		0x06D4, 0x08D4, 0x0058,
+		0x06A8, 0x08F0, 0x0068,
+		0x067C, 0x090C, 0x0078,
+		0x0650, 0x0924, 0x008C,
+		0x0628, 0x093C, 0x009C,
+		0x05FC, 0x0954, 0x00B0,
+		0x05D0, 0x096C, 0x00C4,
+		0x05A8, 0x0980, 0x00D8,
+		0x0578, 0x0998, 0x00F0,
+		0x0550, 0x09AC, 0x0104,
+		0x0528, 0x09BC, 0x011C,
+		0x04FC, 0x09D0, 0x0134,
+		0x04D4, 0x09E0, 0x014C,
+		0x04A8, 0x09F0, 0x0168,
+		0x0480, 0x09FC, 0x0184,
+		0x045C, 0x0A08, 0x019C,
+		0x0434, 0x0A14, 0x01B8,
+		0x0408, 0x0A20, 0x01D8,
+		0x03E0, 0x0A2C, 0x01F4,
+		0x03B8, 0x0A34, 0x0214,
+		0x0394, 0x0A38, 0x0234,
+		0x036C, 0x0A40, 0x0254,
+		0x0348, 0x0A44, 0x0274,
+		0x0324, 0x0A48, 0x0294,
+		0x0300, 0x0A48, 0x02B8,
+		0x02DC, 0x0A48, 0x02DC
 };
 
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_4tap_64p_upscale[132] = {
-	0, 4096, 0, 0,
-	16344, 4092, 40, 0,
-	16308, 4084, 84, 16380,
-	16272, 4072, 132, 16380,
-	16240, 4056, 180, 16380,
-	16212, 4036, 232, 16376,
-	16184, 4012, 288, 16372,
-	16160, 3984, 344, 16368,
-	16136, 3952, 404, 16364,
-	16116, 3916, 464, 16360,
-	16100, 3872, 528, 16356,
-	16084, 3828, 596, 16348,
-	16072, 3780, 664, 16344,
-	16060, 3728, 732, 16336,
-	16052, 3676, 804, 16328,
-	16044, 3616, 876, 16320,
-	16040, 3556, 952, 16312,
-	16036, 3492, 1028, 16300,
-	16032, 3424, 1108, 16292,
-	16032, 3356, 1188, 16280,
-	16036, 3284, 1268, 16272,
-	16036, 3212, 1352, 16260,
-	16040, 3136, 1436, 16248,
-	16044, 3056, 1520, 16236,
-	16052, 2980, 1604, 16224,
-	16060, 2896, 1688, 16212,
-	16064, 2816, 1776, 16200,
-	16076, 2732, 1864, 16188,
-	16084, 2648, 1952, 16176,
-	16092, 2564, 2040, 16164,
-	16104, 2476, 2128, 16152,
-	16116, 2388, 2216, 16140,
-	16128, 2304, 2304, 16128 };
+		0x0000, 0x1000, 0x0000, 0x0000,
+		0x3FDC, 0x0FFC, 0x0028, 0x0000,
+		0x3FB4, 0x0FF8, 0x0054, 0x0000,
+		0x3F94, 0x0FE8, 0x0084, 0x0000,
+		0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+		0x3F58, 0x0FC4, 0x00E8, 0x3FFC,
+		0x3F3C, 0x0FAC, 0x0120, 0x3FF8,
+		0x3F24, 0x0F90, 0x0158, 0x3FF4,
+		0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+		0x3EF8, 0x0F4C, 0x01D0, 0x3FEC,
+		0x3EE8, 0x0F20, 0x0210, 0x3FE8,
+		0x3ED8, 0x0EF4, 0x0254, 0x3FE0,
+		0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+		0x3EC0, 0x0E90, 0x02DC, 0x3FD4,
+		0x3EB8, 0x0E58, 0x0324, 0x3FCC,
+		0x3EB0, 0x0E20, 0x036C, 0x3FC4,
+		0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+		0x3EA8, 0x0DA4, 0x0404, 0x3FB0,
+		0x3EA4, 0x0D60, 0x0454, 0x3FA8,
+		0x3EA4, 0x0D1C, 0x04A4, 0x3F9C,
+		0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+		0x3EA8, 0x0C88, 0x0548, 0x3F88,
+		0x3EAC, 0x0C3C, 0x059C, 0x3F7C,
+		0x3EB0, 0x0BF0, 0x05F0, 0x3F70,
+		0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+		0x3EBC, 0x0B54, 0x0698, 0x3F58,
+		0x3EC4, 0x0B00, 0x06F0, 0x3F4C,
+		0x3ECC, 0x0AAC, 0x0748, 0x3F40,
+		0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+		0x3EE0, 0x0A04, 0x07F8, 0x3F24,
+		0x3EEC, 0x09AC, 0x0850, 0x3F18,
+		0x3EF8, 0x0954, 0x08A8, 0x3F0C,
+		0x3F00, 0x08FC, 0x0900, 0x3F04
+};
 
-static const uint16_t filter_4tap_64p_117[132] = {
-	420, 3248, 420, 0,
-	380, 3248, 464, 16380,
-	344, 3248, 508, 16372,
-	308, 3248, 552, 16368,
-	272, 3240, 596, 16364,
-	236, 3236, 644, 16356,
-	204, 3224, 692, 16352,
-	172, 3212, 744, 16344,
-	144, 3196, 796, 16340,
-	116, 3180, 848, 16332,
-	88, 3160, 900, 16324,
-	60, 3136, 956, 16320,
-	36, 3112, 1012, 16312,
-	16, 3084, 1068, 16304,
-	16380, 3056, 1124, 16296,
-	16360, 3024, 1184, 16292,
-	16340, 2992, 1244, 16284,
-	16324, 2956, 1304, 16276,
-	16308, 2920, 1364, 16268,
-	16292, 2880, 1424, 16264,
-	16280, 2836, 1484, 16256,
-	16268, 2792, 1548, 16252,
-	16256, 2748, 1608, 16244,
-	16248, 2700, 1668, 16240,
-	16240, 2652, 1732, 16232,
-	16232, 2604, 1792, 16228,
-	16228, 2552, 1856, 16224,
-	16220, 2500, 1916, 16220,
-	16216, 2444, 1980, 16216,
-	16216, 2388, 2040, 16216,
-	16212, 2332, 2100, 16212,
-	16212, 2276, 2160, 16212,
-	16212, 2220, 2220, 16212 };
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_116[132] = {
+		0x01A8, 0x0CB4, 0x01A4, 0x0000,
+		0x017C, 0x0CB8, 0x01D0, 0x3FFC,
+		0x0158, 0x0CB8, 0x01F8, 0x3FF8,
+		0x0130, 0x0CB4, 0x0228, 0x3FF4,
+		0x0110, 0x0CB0, 0x0254, 0x3FEC,
+		0x00EC, 0x0CA8, 0x0284, 0x3FE8,
+		0x00CC, 0x0C9C, 0x02B4, 0x3FE4,
+		0x00AC, 0x0C90, 0x02E8, 0x3FDC,
+		0x0090, 0x0C80, 0x031C, 0x3FD4,
+		0x0070, 0x0C70, 0x0350, 0x3FD0,
+		0x0058, 0x0C5C, 0x0384, 0x3FC8,
+		0x003C, 0x0C48, 0x03BC, 0x3FC0,
+		0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+		0x0010, 0x0C10, 0x042C, 0x3FB4,
+		0x3FFC, 0x0BF4, 0x0464, 0x3FAC,
+		0x3FE8, 0x0BD4, 0x04A0, 0x3FA4,
+		0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+		0x3FC4, 0x0B8C, 0x0518, 0x3F98,
+		0x3FB4, 0x0B68, 0x0554, 0x3F90,
+		0x3FA8, 0x0B40, 0x0590, 0x3F88,
+		0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+		0x3F90, 0x0AEC, 0x0608, 0x3F7C,
+		0x3F84, 0x0ABC, 0x0648, 0x3F78,
+		0x3F7C, 0x0A90, 0x0684, 0x3F70,
+		0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+		0x3F6C, 0x0A2C, 0x0700, 0x3F68,
+		0x3F64, 0x09F8, 0x0740, 0x3F64,
+		0x3F60, 0x09C4, 0x077C, 0x3F60,
+		0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+		0x3F58, 0x0958, 0x07F8, 0x3F58,
+		0x3F58, 0x091C, 0x0834, 0x3F58,
+		0x3F54, 0x08E4, 0x0870, 0x3F58,
+		0x3F54, 0x08AC, 0x08AC, 0x3F54
+};
 
-static const uint16_t filter_4tap_64p_150[132] = {
-	696, 2700, 696, 0,
-	660, 2704, 732, 16380,
-	628, 2704, 768, 16376,
-	596, 2704, 804, 16372,
-	564, 2700, 844, 16364,
-	532, 2696, 884, 16360,
-	500, 2692, 924, 16356,
-	472, 2684, 964, 16352,
-	440, 2676, 1004, 16352,
-	412, 2668, 1044, 16348,
-	384, 2656, 1088, 16344,
-	360, 2644, 1128, 16340,
-	332, 2632, 1172, 16336,
-	308, 2616, 1216, 16336,
-	284, 2600, 1260, 16332,
-	260, 2580, 1304, 16332,
-	236, 2560, 1348, 16328,
-	216, 2540, 1392, 16328,
-	196, 2516, 1436, 16328,
-	176, 2492, 1480, 16324,
-	156, 2468, 1524, 16324,
-	136, 2440, 1568, 16328,
-	120, 2412, 1612, 16328,
-	104, 2384, 1656, 16328,
-	88, 2352, 1700, 16332,
-	72, 2324, 1744, 16332,
-	60, 2288, 1788, 16336,
-	48, 2256, 1828, 16340,
-	36, 2220, 1872, 16344,
-	24, 2184, 1912, 16352,
-	12, 2148, 1952, 16356,
-	4, 2112, 1996, 16364,
-	16380, 2072, 2036, 16372 };
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_149[132] = {
+		0x02B8, 0x0A90, 0x02B8, 0x0000,
+		0x0294, 0x0A94, 0x02DC, 0x3FFC,
+		0x0274, 0x0A94, 0x0300, 0x3FF8,
+		0x0250, 0x0A94, 0x0328, 0x3FF4,
+		0x0230, 0x0A90, 0x0350, 0x3FF0,
+		0x0214, 0x0A8C, 0x0374, 0x3FEC,
+		0x01F0, 0x0A88, 0x03A0, 0x3FE8,
+		0x01D4, 0x0A80, 0x03C8, 0x3FE4,
+		0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+		0x0198, 0x0A70, 0x041C, 0x3FDC,
+		0x0180, 0x0A64, 0x0444, 0x3FD8,
+		0x0164, 0x0A54, 0x0470, 0x3FD8,
+		0x0148, 0x0A48, 0x049C, 0x3FD4,
+		0x0130, 0x0A38, 0x04C8, 0x3FD0,
+		0x0118, 0x0A24, 0x04F4, 0x3FD0,
+		0x0100, 0x0A14, 0x0520, 0x3FCC,
+		0x00E8, 0x0A00, 0x054C, 0x3FCC,
+		0x00D4, 0x09E8, 0x057C, 0x3FC8,
+		0x00C0, 0x09D0, 0x05A8, 0x3FC8,
+		0x00AC, 0x09B8, 0x05D4, 0x3FC8,
+		0x0098, 0x09A0, 0x0600, 0x3FC8,
+		0x0084, 0x0984, 0x0630, 0x3FC8,
+		0x0074, 0x0964, 0x065C, 0x3FCC,
+		0x0064, 0x0948, 0x0688, 0x3FCC,
+		0x0054, 0x0928, 0x06B4, 0x3FD0,
+		0x0044, 0x0908, 0x06E0, 0x3FD4,
+		0x0038, 0x08E8, 0x070C, 0x3FD4,
+		0x002C, 0x08C4, 0x0738, 0x3FD8,
+		0x001C, 0x08A4, 0x0760, 0x3FE0,
+		0x0014, 0x087C, 0x078C, 0x3FE4,
+		0x0008, 0x0858, 0x07B4, 0x3FEC,
+		0x0000, 0x0830, 0x07DC, 0x3FF4,
+		0x3FFC, 0x0804, 0x0804, 0x3FFC
+};
 
+//=========================================
+// <num_taps>    = 4
+// <num_phases>  = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_4tap_64p_183[132] = {
-	944, 2204, 944, 0,
-	916, 2204, 972, 0,
-	888, 2200, 996, 0,
-	860, 2200, 1024, 4,
-	832, 2196, 1052, 4,
-	808, 2192, 1080, 8,
-	780, 2188, 1108, 12,
-	756, 2180, 1140, 12,
-	728, 2176, 1168, 16,
-	704, 2168, 1196, 20,
-	680, 2160, 1224, 24,
-	656, 2152, 1252, 28,
-	632, 2144, 1280, 36,
-	608, 2132, 1308, 40,
-	584, 2120, 1336, 48,
-	560, 2112, 1364, 52,
-	536, 2096, 1392, 60,
-	516, 2084, 1420, 68,
-	492, 2072, 1448, 76,
-	472, 2056, 1476, 84,
-	452, 2040, 1504, 92,
-	428, 2024, 1532, 100,
-	408, 2008, 1560, 112,
-	392, 1992, 1584, 120,
-	372, 1972, 1612, 132,
-	352, 1956, 1636, 144,
-	336, 1936, 1664, 156,
-	316, 1916, 1688, 168,
-	300, 1896, 1712, 180,
-	284, 1876, 1736, 192,
-	268, 1852, 1760, 208,
-	252, 1832, 1784, 220,
-	236, 1808, 1808, 236 };
+		0x03B0, 0x08A0, 0x03B0, 0x0000,
+		0x0394, 0x08A0, 0x03CC, 0x0000,
+		0x037C, 0x089C, 0x03E8, 0x0000,
+		0x0360, 0x089C, 0x0400, 0x0004,
+		0x0348, 0x0898, 0x041C, 0x0004,
+		0x032C, 0x0894, 0x0438, 0x0008,
+		0x0310, 0x0890, 0x0454, 0x000C,
+		0x02F8, 0x0888, 0x0474, 0x000C,
+		0x02DC, 0x0884, 0x0490, 0x0010,
+		0x02C4, 0x087C, 0x04AC, 0x0014,
+		0x02AC, 0x0874, 0x04C8, 0x0018,
+		0x0290, 0x086C, 0x04E4, 0x0020,
+		0x0278, 0x0864, 0x0500, 0x0024,
+		0x0264, 0x0858, 0x051C, 0x0028,
+		0x024C, 0x084C, 0x0538, 0x0030,
+		0x0234, 0x0844, 0x0554, 0x0034,
+		0x021C, 0x0838, 0x0570, 0x003C,
+		0x0208, 0x0828, 0x058C, 0x0044,
+		0x01F0, 0x081C, 0x05A8, 0x004C,
+		0x01DC, 0x080C, 0x05C4, 0x0054,
+		0x01C8, 0x07FC, 0x05E0, 0x005C,
+		0x01B4, 0x07EC, 0x05FC, 0x0064,
+		0x019C, 0x07DC, 0x0618, 0x0070,
+		0x018C, 0x07CC, 0x0630, 0x0078,
+		0x0178, 0x07B8, 0x064C, 0x0084,
+		0x0164, 0x07A8, 0x0664, 0x0090,
+		0x0150, 0x0794, 0x0680, 0x009C,
+		0x0140, 0x0780, 0x0698, 0x00A8,
+		0x0130, 0x076C, 0x06B0, 0x00B4,
+		0x0120, 0x0758, 0x06C8, 0x00C0,
+		0x0110, 0x0740, 0x06E0, 0x00D0,
+		0x0100, 0x072C, 0x06F8, 0x00DC,
+		0x00F0, 0x0714, 0x0710, 0x00EC
+};
 
+//=========================================
+// <num_taps>    = 5
+// <num_phases>  = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_5tap_64p_upscale[165] = {
-	15936, 2496, 2496, 15936, 0,
-	15948, 2404, 2580, 15924, 0,
-	15960, 2312, 2664, 15912, 4,
-	15976, 2220, 2748, 15904, 8,
-	15992, 2128, 2832, 15896, 12,
-	16004, 2036, 2912, 15888, 16,
-	16020, 1944, 2992, 15880, 20,
-	16036, 1852, 3068, 15876, 20,
-	16056, 1760, 3140, 15876, 24,
-	16072, 1668, 3216, 15872, 28,
-	16088, 1580, 3284, 15872, 32,
-	16104, 1492, 3352, 15876, 32,
-	16120, 1404, 3420, 15876, 36,
-	16140, 1316, 3480, 15884, 40,
-	16156, 1228, 3540, 15892, 40,
-	16172, 1144, 3600, 15900, 40,
-	16188, 1060, 3652, 15908, 44,
-	16204, 980, 3704, 15924, 44,
-	16220, 900, 3756, 15936, 44,
-	16236, 824, 3800, 15956, 44,
-	16248, 744, 3844, 15972, 44,
-	16264, 672, 3884, 15996, 44,
-	16276, 600, 3920, 16020, 44,
-	16292, 528, 3952, 16044, 40,
-	16304, 460, 3980, 16072, 40,
-	16316, 396, 4008, 16104, 36,
-	16328, 332, 4032, 16136, 32,
-	16336, 272, 4048, 16172, 28,
-	16348, 212, 4064, 16208, 24,
-	16356, 156, 4080, 16248, 16,
-	16368, 100, 4088, 16292, 12,
-	16376, 48, 4092, 16336, 4,
-	0, 0, 4096, 0, 0 };
+		0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000,
+		0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000,
+		0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004,
+		0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008,
+		0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C,
+		0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010,
+		0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014,
+		0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014,
+		0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018,
+		0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C,
+		0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020,
+		0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020,
+		0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024,
+		0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028,
+		0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028,
+		0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028,
+		0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C,
+		0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C,
+		0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C,
+		0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C,
+		0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C,
+		0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C,
+		0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C,
+		0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028,
+		0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028,
+		0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024,
+		0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020,
+		0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C,
+		0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018,
+		0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010,
+		0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C,
+		0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004,
+		0x0000, 0x0000, 0x1000, 0x0000, 0x0000
+};
 
-static const uint16_t filter_5tap_64p_117[165] = {
-	16056, 2372, 2372, 16056, 0,
-	16052, 2312, 2432, 16060, 0,
-	16052, 2252, 2488, 16064, 0,
-	16052, 2188, 2548, 16072, 0,
-	16052, 2124, 2600, 16076, 0,
-	16052, 2064, 2656, 16088, 0,
-	16052, 2000, 2708, 16096, 0,
-	16056, 1932, 2760, 16108, 0,
-	16060, 1868, 2808, 16120, 0,
-	16064, 1804, 2856, 16132, 0,
-	16068, 1740, 2904, 16148, 16380,
-	16076, 1676, 2948, 16164, 16380,
-	16080, 1612, 2992, 16180, 16376,
-	16088, 1544, 3032, 16200, 16372,
-	16096, 1480, 3072, 16220, 16372,
-	16104, 1420, 3108, 16244, 16368,
-	16112, 1356, 3144, 16268, 16364,
-	16120, 1292, 3180, 16292, 16360,
-	16128, 1232, 3212, 16320, 16356,
-	16136, 1168, 3240, 16344, 16352,
-	16144, 1108, 3268, 16376, 16344,
-	16156, 1048, 3292, 20, 16340,
-	16164, 988, 3316, 52, 16332,
-	16172, 932, 3336, 88, 16328,
-	16184, 872, 3356, 124, 16320,
-	16192, 816, 3372, 160, 16316,
-	16204, 760, 3388, 196, 16308,
-	16212, 708, 3400, 236, 16300,
-	16220, 656, 3412, 276, 16292,
-	16232, 604, 3420, 320, 16284,
-	16240, 552, 3424, 364, 16276,
-	16248, 504, 3428, 408, 16268,
-	16256, 456, 3428, 456, 16256 };
+//=========================================
+// <num_taps>    = 5
+// <num_phases>  = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_116[165] = {
+		0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000,
+		0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000,
+		0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000,
+		0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000,
+		0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000,
+		0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000,
+		0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000,
+		0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000,
+		0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC,
+		0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC,
+		0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8,
+		0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8,
+		0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4,
+		0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0,
+		0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC,
+		0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8,
+		0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4,
+		0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0,
+		0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC,
+		0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8,
+		0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0,
+		0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC,
+		0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4,
+		0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0,
+		0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8,
+		0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0,
+		0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC,
+		0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4,
+		0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C,
+		0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94,
+		0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C,
+		0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84,
+		0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C
+};
 
-static const uint16_t filter_5tap_64p_150[165] = {
-	16368, 2064, 2064, 16368, 0,
-	16352, 2028, 2100, 16380, 16380,
-	16340, 1996, 2132, 12, 16376,
-	16328, 1960, 2168, 24, 16376,
-	16316, 1924, 2204, 44, 16372,
-	16308, 1888, 2236, 60, 16368,
-	16296, 1848, 2268, 76, 16364,
-	16288, 1812, 2300, 96, 16360,
-	16280, 1772, 2328, 116, 16356,
-	16272, 1736, 2360, 136, 16352,
-	16268, 1696, 2388, 160, 16348,
-	16260, 1656, 2416, 180, 16344,
-	16256, 1616, 2440, 204, 16340,
-	16248, 1576, 2464, 228, 16336,
-	16244, 1536, 2492, 252, 16332,
-	16240, 1496, 2512, 276, 16324,
-	16240, 1456, 2536, 304, 16320,
-	16236, 1416, 2556, 332, 16316,
-	16232, 1376, 2576, 360, 16312,
-	16232, 1336, 2592, 388, 16308,
-	16232, 1296, 2612, 416, 16300,
-	16232, 1256, 2628, 448, 16296,
-	16232, 1216, 2640, 480, 16292,
-	16232, 1172, 2652, 512, 16288,
-	16232, 1132, 2664, 544, 16284,
-	16232, 1092, 2676, 576, 16280,
-	16236, 1056, 2684, 608, 16272,
-	16236, 1016, 2692, 644, 16268,
-	16240, 976, 2700, 680, 16264,
-	16240, 936, 2704, 712, 16260,
-	16244, 900, 2708, 748, 16256,
-	16248, 860, 2708, 788, 16252,
-	16248, 824, 2708, 824, 16248 };
+//=========================================
+// <num_taps>    = 5
+// <num_phases>  = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_149[165] = {
+		0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000,
+		0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000,
+		0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC,
+		0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8,
+		0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4,
+		0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4,
+		0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0,
+		0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC,
+		0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8,
+		0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4,
+		0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0,
+		0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC,
+		0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8,
+		0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4,
+		0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC,
+		0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8,
+		0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4,
+		0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0,
+		0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC,
+		0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4,
+		0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0,
+		0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC,
+		0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8,
+		0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4,
+		0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0,
+		0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98,
+		0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94,
+		0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90,
+		0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C,
+		0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88,
+		0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84,
+		0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80,
+		0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C
+};
 
+//=========================================
+// <num_taps>    = 5
+// <num_phases>  = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_5tap_64p_183[165] = {
-	228, 1816, 1816, 228, 0,
-	216, 1792, 1836, 248, 16380,
-	200, 1772, 1860, 264, 16376,
-	184, 1748, 1884, 280, 16376,
-	168, 1728, 1904, 300, 16372,
-	156, 1704, 1928, 316, 16368,
-	144, 1680, 1948, 336, 16364,
-	128, 1656, 1968, 356, 16364,
-	116, 1632, 1988, 376, 16360,
-	104, 1604, 2008, 396, 16356,
-	96, 1580, 2024, 416, 16356,
-	84, 1556, 2044, 440, 16352,
-	72, 1528, 2060, 460, 16348,
-	64, 1504, 2076, 484, 16348,
-	52, 1476, 2092, 504, 16344,
-	44, 1448, 2104, 528, 16344,
-	36, 1424, 2120, 552, 16340,
-	28, 1396, 2132, 576, 16340,
-	20, 1368, 2144, 600, 16340,
-	12, 1340, 2156, 624, 16336,
-	4, 1312, 2168, 652, 16336,
-	0, 1284, 2180, 676, 16336,
-	16376, 1256, 2188, 700, 16332,
-	16372, 1228, 2196, 728, 16332,
-	16368, 1200, 2204, 752, 16332,
-	16364, 1172, 2212, 780, 16332,
-	16356, 1144, 2216, 808, 16332,
-	16352, 1116, 2220, 836, 16332,
-	16352, 1084, 2224, 860, 16332,
-	16348, 1056, 2228, 888, 16336,
-	16344, 1028, 2232, 916, 16336,
-	16340, 1000, 2232, 944, 16336,
-	16340, 972, 2232, 972, 16340 };
+		0x0168, 0x069C, 0x0698, 0x0164, 0x0000,
+		0x0154, 0x068C, 0x06AC, 0x0174, 0x0000,
+		0x0144, 0x0674, 0x06C0, 0x0188, 0x0000,
+		0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC,
+		0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC,
+		0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC,
+		0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8,
+		0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8,
+		0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8,
+		0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8,
+		0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4,
+		0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4,
+		0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4,
+		0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4,
+		0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4,
+		0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4,
+		0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4,
+		0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8,
+		0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8,
+		0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8,
+		0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8,
+		0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC,
+		0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC,
+		0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000,
+		0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000,
+		0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000,
+		0x003C, 0x0488, 0x07C8, 0x0370, 0x0004,
+		0x0038, 0x0470, 0x07CC, 0x0384, 0x0008,
+		0x0030, 0x045C, 0x07D0, 0x039C, 0x0008,
+		0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C,
+		0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010,
+		0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018,
+		0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C
+};
 
+//=========================================
+// <num_taps>    = 6
+// <num_phases>  = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_6tap_64p_upscale[198] = {
-	0, 0, 4092, 0, 0, 0,
-	12, 16332, 4092, 52, 16368, 0,
-	24, 16280, 4088, 108, 16356, 0,
-	36, 16236, 4080, 168, 16340, 0,
-	44, 16188, 4064, 228, 16324, 0,
-	56, 16148, 4052, 292, 16308, 0,
-	64, 16108, 4032, 356, 16292, 4,
-	72, 16072, 4008, 424, 16276, 4,
-	80, 16036, 3980, 492, 16256, 4,
-	88, 16004, 3952, 564, 16240, 8,
-	96, 15972, 3920, 636, 16220, 8,
-	100, 15944, 3884, 712, 16204, 12,
-	108, 15916, 3844, 788, 16184, 16,
-	112, 15896, 3800, 864, 16164, 20,
-	116, 15872, 3756, 944, 16144, 20,
-	120, 15852, 3708, 1024, 16124, 24,
-	120, 15836, 3656, 1108, 16104, 28,
-	124, 15824, 3600, 1192, 16084, 32,
-	124, 15808, 3544, 1276, 16064, 36,
-	124, 15800, 3484, 1360, 16044, 40,
-	128, 15792, 3420, 1448, 16024, 44,
-	128, 15784, 3352, 1536, 16004, 48,
-	124, 15780, 3288, 1624, 15988, 52,
-	124, 15776, 3216, 1712, 15968, 56,
-	124, 15776, 3144, 1800, 15948, 64,
-	120, 15776, 3068, 1888, 15932, 68,
-	120, 15780, 2992, 1976, 15912, 72,
-	116, 15784, 2916, 2064, 15896, 76,
-	112, 15792, 2836, 2152, 15880, 80,
-	108, 15796, 2752, 2244, 15868, 84,
-	104, 15804, 2672, 2328, 15852, 88,
-	104, 15816, 2588, 2416, 15840, 92,
-	100, 15828, 2504, 2504, 15828, 100 };
+		0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+		0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000,
+		0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000,
+		0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000,
+		0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000,
+		0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000,
+		0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004,
+		0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004,
+		0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004,
+		0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008,
+		0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008,
+		0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C,
+		0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010,
+		0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014,
+		0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014,
+		0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018,
+		0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C,
+		0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020,
+		0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024,
+		0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028,
+		0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C,
+		0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030,
+		0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034,
+		0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038,
+		0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040,
+		0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044,
+		0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048,
+		0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C,
+		0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050,
+		0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054,
+		0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058,
+		0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C,
+		0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064
+};
 
-static const uint16_t filter_6tap_64p_117[198] = {
-	16168, 476, 3568, 476, 16168, 0,
-	16180, 428, 3564, 528, 16156, 0,
-	16192, 376, 3556, 584, 16144, 4,
-	16204, 328, 3548, 636, 16128, 4,
-	16216, 280, 3540, 692, 16116, 8,
-	16228, 232, 3524, 748, 16104, 12,
-	16240, 188, 3512, 808, 16092, 12,
-	16252, 148, 3492, 864, 16080, 16,
-	16264, 104, 3472, 924, 16068, 16,
-	16276, 64, 3452, 984, 16056, 20,
-	16284, 28, 3428, 1044, 16048, 24,
-	16296, 16376, 3400, 1108, 16036, 24,
-	16304, 16340, 3372, 1168, 16024, 28,
-	16316, 16304, 3340, 1232, 16016, 32,
-	16324, 16272, 3308, 1296, 16004, 32,
-	16332, 16244, 3272, 1360, 15996, 36,
-	16344, 16212, 3236, 1424, 15988, 36,
-	16352, 16188, 3200, 1488, 15980, 40,
-	16360, 16160, 3160, 1552, 15972, 40,
-	16368, 16136, 3116, 1616, 15964, 40,
-	16372, 16112, 3072, 1680, 15956, 44,
-	16380, 16092, 3028, 1744, 15952, 44,
-	0, 16072, 2980, 1808, 15948, 44,
-	8, 16052, 2932, 1872, 15944, 48,
-	12, 16036, 2880, 1936, 15940, 48,
-	16, 16020, 2828, 2000, 15936, 48,
-	20, 16008, 2776, 2064, 15936, 48,
-	24, 15996, 2724, 2128, 15936, 48,
-	28, 15984, 2668, 2192, 15936, 48,
-	32, 15972, 2612, 2252, 15940, 44,
-	36, 15964, 2552, 2316, 15940, 44,
-	40, 15956, 2496, 2376, 15944, 44,
-	40, 15952, 2436, 2436, 15952, 40 };
+//=========================================
+// <num_taps>    = 6
+// <num_phases>  = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_116[198] = {
+		0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000,
+		0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000,
+		0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004,
+		0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004,
+		0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008,
+		0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008,
+		0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C,
+		0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010,
+		0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010,
+		0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014,
+		0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014,
+		0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018,
+		0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018,
+		0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018,
+		0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C,
+		0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C,
+		0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C,
+		0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020,
+		0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020,
+		0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020,
+		0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020,
+		0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020,
+		0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020,
+		0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020,
+		0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020,
+		0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020,
+		0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C,
+		0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C,
+		0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C,
+		0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018,
+		0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014,
+		0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014,
+		0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010
+};
 
-static const uint16_t filter_6tap_64p_150[198] = {
-	16148, 920, 2724, 920, 16148, 0,
-	16152, 880, 2724, 956, 16148, 0,
-	16152, 844, 2720, 996, 16144, 0,
-	16156, 804, 2716, 1032, 16144, 0,
-	16156, 768, 2712, 1072, 16144, 0,
-	16160, 732, 2708, 1112, 16144, 16380,
-	16164, 696, 2700, 1152, 16144, 16380,
-	16168, 660, 2692, 1192, 16148, 16380,
-	16172, 628, 2684, 1232, 16148, 16380,
-	16176, 592, 2672, 1272, 16152, 16376,
-	16180, 560, 2660, 1312, 16152, 16376,
-	16184, 524, 2648, 1348, 16156, 16376,
-	16192, 492, 2632, 1388, 16160, 16372,
-	16196, 460, 2616, 1428, 16164, 16372,
-	16200, 432, 2600, 1468, 16168, 16368,
-	16204, 400, 2584, 1508, 16176, 16364,
-	16212, 368, 2564, 1548, 16180, 16364,
-	16216, 340, 2544, 1588, 16188, 16360,
-	16220, 312, 2524, 1628, 16196, 16356,
-	16228, 284, 2504, 1668, 16204, 16356,
-	16232, 256, 2480, 1704, 16212, 16352,
-	16240, 232, 2456, 1744, 16224, 16348,
-	16244, 204, 2432, 1780, 16232, 16344,
-	16248, 180, 2408, 1820, 16244, 16340,
-	16256, 156, 2380, 1856, 16256, 16336,
-	16260, 132, 2352, 1896, 16268, 16332,
-	16268, 108, 2324, 1932, 16280, 16328,
-	16272, 88, 2296, 1968, 16292, 16324,
-	16276, 64, 2268, 2004, 16308, 16320,
-	16284, 44, 2236, 2036, 16324, 16312,
-	16288, 24, 2204, 2072, 16340, 16308,
-	16292, 8, 2172, 2108, 16356, 16304,
-	16300, 16372, 2140, 2140, 16372, 16300 };
+//=========================================
+// <num_taps>    = 6
+// <num_phases>  = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_149[198] = {
+		0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000,
+		0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000,
+		0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000,
+		0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000,
+		0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000,
+		0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000,
+		0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000,
+		0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000,
+		0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000,
+		0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC,
+		0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC,
+		0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8,
+		0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8,
+		0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4,
+		0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4,
+		0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0,
+		0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0,
+		0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC,
+		0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8,
+		0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8,
+		0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4,
+		0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0,
+		0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC,
+		0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8,
+		0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4,
+		0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0,
+		0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC,
+		0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8,
+		0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4,
+		0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0,
+		0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8,
+		0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4,
+		0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0
+};
 
+//=========================================
+// <num_taps>    = 6
+// <num_phases>  = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_6tap_64p_183[198] = {
-	16296, 1032, 2196, 1032, 16296, 0,
-	16292, 1004, 2200, 1060, 16304, 16380,
-	16288, 976, 2200, 1088, 16308, 16380,
-	16284, 952, 2196, 1116, 16312, 16376,
-	16284, 924, 2196, 1144, 16320, 16376,
-	16280, 900, 2192, 1172, 16324, 16372,
-	16276, 872, 2192, 1200, 16332, 16368,
-	16276, 848, 2188, 1228, 16340, 16368,
-	16272, 820, 2180, 1256, 16348, 16364,
-	16272, 796, 2176, 1280, 16356, 16360,
-	16268, 768, 2168, 1308, 16364, 16360,
-	16268, 744, 2164, 1336, 16372, 16356,
-	16268, 716, 2156, 1364, 16380, 16352,
-	16264, 692, 2148, 1392, 4, 16352,
-	16264, 668, 2136, 1420, 16, 16348,
-	16264, 644, 2128, 1448, 28, 16344,
-	16264, 620, 2116, 1472, 36, 16340,
-	16264, 596, 2108, 1500, 48, 16340,
-	16268, 572, 2096, 1524, 60, 16336,
-	16268, 548, 2080, 1552, 72, 16332,
-	16268, 524, 2068, 1576, 88, 16328,
-	16268, 504, 2056, 1604, 100, 16324,
-	16272, 480, 2040, 1628, 112, 16324,
-	16272, 456, 2024, 1652, 128, 16320,
-	16272, 436, 2008, 1680, 144, 16316,
-	16276, 416, 1992, 1704, 156, 16312,
-	16276, 392, 1976, 1724, 172, 16308,
-	16280, 372, 1956, 1748, 188, 16308,
-	16280, 352, 1940, 1772, 204, 16304,
-	16284, 332, 1920, 1796, 224, 16300,
-	16288, 312, 1900, 1816, 240, 16296,
-	16288, 296, 1880, 1840, 256, 16296,
-	16292, 276, 1860, 1860, 276, 16292 };
+		0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000,
+		0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000,
+		0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC,
+		0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC,
+		0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8,
+		0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8,
+		0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4,
+		0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4,
+		0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0,
+		0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0,
+		0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC,
+		0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC,
+		0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8,
+		0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8,
+		0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4,
+		0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4,
+		0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0,
+		0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0,
+		0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC,
+		0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC,
+		0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8,
+		0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8,
+		0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8,
+		0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4,
+		0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4,
+		0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4,
+		0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0,
+		0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0,
+		0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0,
+		0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0,
+		0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0,
+		0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0,
+		0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC
+};
 
+//=========================================
+// <num_taps>    = 7
+// <num_phases>  = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_7tap_64p_upscale[231] = {
-	176, 15760, 2488, 2488, 15760, 176, 0,
-	172, 15772, 2404, 2572, 15752, 180, 16380,
-	168, 15784, 2324, 2656, 15740, 184, 16380,
-	164, 15800, 2240, 2736, 15732, 188, 16376,
-	160, 15812, 2152, 2816, 15728, 192, 16376,
-	152, 15828, 2068, 2896, 15724, 192, 16376,
-	148, 15848, 1984, 2972, 15720, 196, 16372,
-	140, 15864, 1896, 3048, 15720, 196, 16372,
-	136, 15884, 1812, 3124, 15720, 196, 16368,
-	128, 15900, 1724, 3196, 15720, 196, 16368,
-	120, 15920, 1640, 3268, 15724, 196, 16368,
-	116, 15940, 1552, 3336, 15732, 196, 16364,
-	108, 15964, 1468, 3400, 15740, 196, 16364,
-	104, 15984, 1384, 3464, 15748, 192, 16364,
-	96, 16004, 1300, 3524, 15760, 188, 16364,
-	88, 16028, 1216, 3584, 15776, 184, 16364,
-	84, 16048, 1132, 3640, 15792, 180, 16360,
-	76, 16072, 1048, 3692, 15812, 176, 16360,
-	68, 16092, 968, 3744, 15832, 168, 16360,
-	64, 16116, 888, 3788, 15856, 160, 16360,
-	56, 16140, 812, 3832, 15884, 152, 16360,
-	52, 16160, 732, 3876, 15912, 144, 16360,
-	44, 16184, 656, 3912, 15944, 136, 16364,
-	40, 16204, 584, 3944, 15976, 124, 16364,
-	32, 16228, 512, 3976, 16012, 116, 16364,
-	28, 16248, 440, 4004, 16048, 104, 16364,
-	24, 16268, 372, 4028, 16092, 88, 16368,
-	20, 16288, 304, 4048, 16132, 76, 16368,
-	12, 16308, 240, 4064, 16180, 60, 16372,
-	8, 16328, 176, 4076, 16228, 48, 16372,
-	4, 16348, 112, 4088, 16276, 32, 16376,
-	0, 16364, 56, 4092, 16328, 16, 16380,
-	0, 0, 0, 4096, 0, 0, 0 };
+		0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000,
+		0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000,
+		0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000,
+		0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC,
+		0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC,
+		0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8,
+		0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8,
+		0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8,
+		0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4,
+		0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4,
+		0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4,
+		0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0,
+		0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0,
+		0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0,
+		0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC,
+		0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC,
+		0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC,
+		0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC,
+		0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC,
+		0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC,
+		0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC,
+		0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC,
+		0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC,
+		0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0,
+		0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0,
+		0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0,
+		0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4,
+		0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4,
+		0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8,
+		0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8,
+		0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC,
+		0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000,
+		0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000
+};
 
-static const uint16_t filter_7tap_64p_117[231] = {
-	92, 15868, 2464, 2464, 15868, 92, 0,
-	96, 15864, 2404, 2528, 15876, 88, 0,
-	100, 15860, 2344, 2584, 15884, 84, 0,
-	104, 15856, 2280, 2644, 15892, 76, 0,
-	108, 15852, 2216, 2700, 15904, 72, 0,
-	108, 15852, 2152, 2756, 15916, 64, 0,
-	112, 15852, 2088, 2812, 15932, 60, 0,
-	112, 15852, 2024, 2864, 15948, 52, 0,
-	112, 15856, 1960, 2916, 15964, 44, 0,
-	116, 15860, 1892, 2964, 15984, 36, 0,
-	116, 15864, 1828, 3016, 16004, 24, 4,
-	116, 15868, 1760, 3060, 16024, 16, 4,
-	116, 15876, 1696, 3108, 16048, 8, 8,
-	116, 15884, 1628, 3152, 16072, 16380, 8,
-	112, 15892, 1564, 3192, 16100, 16372, 8,
-	112, 15900, 1496, 3232, 16124, 16360, 12,
-	112, 15908, 1428, 3268, 16156, 16348, 12,
-	108, 15920, 1364, 3304, 16188, 16336, 16,
-	108, 15928, 1300, 3340, 16220, 16324, 20,
-	104, 15940, 1232, 3372, 16252, 16312, 20,
-	104, 15952, 1168, 3400, 16288, 16300, 24,
-	100, 15964, 1104, 3428, 16328, 16284, 28,
-	96, 15980, 1040, 3452, 16364, 16272, 28,
-	96, 15992, 976, 3476, 20, 16256, 32,
-	92, 16004, 916, 3496, 64, 16244, 36,
-	88, 16020, 856, 3516, 108, 16228, 40,
-	84, 16032, 792, 3532, 152, 16216, 44,
-	80, 16048, 732, 3544, 200, 16200, 48,
-	80, 16064, 676, 3556, 248, 16184, 48,
-	76, 16080, 616, 3564, 296, 16168, 52,
-	72, 16092, 560, 3568, 344, 16156, 56,
-	68, 16108, 504, 3572, 396, 16140, 60,
-	64, 16124, 452, 3576, 452, 16124, 64 };
+//=========================================
+// <num_taps>    = 7
+// <num_phases>  = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_116[231] = {
+		0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000,
+		0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000,
+		0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000,
+		0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000,
+		0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000,
+		0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004,
+		0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004,
+		0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004,
+		0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008,
+		0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008,
+		0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C,
+		0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C,
+		0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010,
+		0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010,
+		0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014,
+		0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014,
+		0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018,
+		0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C,
+		0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C,
+		0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020,
+		0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024,
+		0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024,
+		0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028,
+		0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C,
+		0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030,
+		0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030,
+		0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034,
+		0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038,
+		0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038,
+		0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C,
+		0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040,
+		0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040,
+		0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044
+};
 
-static const uint16_t filter_7tap_64p_150[231] = {
-	16224, 16380, 2208, 2208, 16380, 16224, 0,
-	16232, 16360, 2172, 2236, 16, 16216, 0,
-	16236, 16340, 2140, 2268, 40, 16212, 0,
-	16244, 16324, 2104, 2296, 60, 16204, 4,
-	16252, 16304, 2072, 2324, 84, 16196, 4,
-	16256, 16288, 2036, 2352, 108, 16192, 4,
-	16264, 16268, 2000, 2380, 132, 16184, 8,
-	16272, 16252, 1960, 2408, 160, 16176, 8,
-	16276, 16240, 1924, 2432, 184, 16172, 8,
-	16284, 16224, 1888, 2456, 212, 16164, 8,
-	16288, 16212, 1848, 2480, 240, 16160, 12,
-	16296, 16196, 1812, 2500, 268, 16152, 12,
-	16300, 16184, 1772, 2524, 296, 16144, 12,
-	16308, 16172, 1736, 2544, 324, 16140, 12,
-	16312, 16164, 1696, 2564, 356, 16136, 12,
-	16320, 16152, 1656, 2584, 388, 16128, 12,
-	16324, 16144, 1616, 2600, 416, 16124, 12,
-	16328, 16136, 1576, 2616, 448, 16116, 12,
-	16332, 16128, 1536, 2632, 480, 16112, 12,
-	16340, 16120, 1496, 2648, 516, 16108, 12,
-	16344, 16112, 1456, 2660, 548, 16104, 12,
-	16348, 16104, 1416, 2672, 580, 16100, 12,
-	16352, 16100, 1376, 2684, 616, 16096, 12,
-	16356, 16096, 1336, 2696, 652, 16092, 12,
-	16360, 16092, 1296, 2704, 688, 16088, 12,
-	16364, 16088, 1256, 2712, 720, 16084, 12,
-	16368, 16084, 1220, 2720, 760, 16084, 8,
-	16368, 16080, 1180, 2724, 796, 16080, 8,
-	16372, 16080, 1140, 2732, 832, 16080, 8,
-	16376, 16076, 1100, 2732, 868, 16076, 4,
-	16380, 16076, 1060, 2736, 908, 16076, 4,
-	16380, 16076, 1020, 2740, 944, 16076, 0,
-	0, 16076, 984, 2740, 984, 16076, 0 };
+//=========================================
+// <num_taps>    = 7
+// <num_phases>  = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_149[231] = {
+		0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000,
+		0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000,
+		0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000,
+		0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004,
+		0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004,
+		0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004,
+		0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008,
+		0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008,
+		0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008,
+		0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008,
+		0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C,
+		0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C,
+		0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C,
+		0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C,
+		0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010,
+		0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010,
+		0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010,
+		0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010,
+		0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010,
+		0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010,
+		0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010,
+		0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010,
+		0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010,
+		0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010,
+		0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C,
+		0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C,
+		0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C,
+		0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C,
+		0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008,
+		0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008,
+		0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008,
+		0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004,
+		0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004
+};
 
+//=========================================
+// <num_taps>    = 7
+// <num_phases>  = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_7tap_64p_183[231] = {
-	16216, 324, 1884, 1884, 324, 16216, 0,
-	16220, 304, 1864, 1904, 344, 16216, 0,
-	16224, 284, 1844, 1924, 364, 16216, 0,
-	16224, 264, 1824, 1944, 384, 16212, 16380,
-	16228, 248, 1804, 1960, 408, 16212, 16380,
-	16228, 228, 1784, 1976, 428, 16208, 16380,
-	16232, 212, 1760, 1996, 452, 16208, 16380,
-	16236, 192, 1740, 2012, 472, 16208, 16376,
-	16240, 176, 1716, 2028, 496, 16208, 16376,
-	16240, 160, 1696, 2040, 516, 16208, 16376,
-	16244, 144, 1672, 2056, 540, 16208, 16376,
-	16248, 128, 1648, 2068, 564, 16208, 16372,
-	16252, 112, 1624, 2084, 588, 16208, 16372,
-	16256, 96, 1600, 2096, 612, 16208, 16368,
-	16256, 84, 1576, 2108, 636, 16208, 16368,
-	16260, 68, 1552, 2120, 660, 16208, 16368,
-	16264, 56, 1524, 2132, 684, 16212, 16364,
-	16268, 40, 1500, 2140, 712, 16212, 16364,
-	16272, 28, 1476, 2152, 736, 16216, 16360,
-	16276, 16, 1448, 2160, 760, 16216, 16356,
-	16280, 4, 1424, 2168, 788, 16220, 16356,
-	16284, 16376, 1396, 2176, 812, 16224, 16352,
-	16288, 16368, 1372, 2184, 840, 16224, 16352,
-	16292, 16356, 1344, 2188, 864, 16228, 16348,
-	16292, 16344, 1320, 2196, 892, 16232, 16344,
-	16296, 16336, 1292, 2200, 916, 16236, 16344,
-	16300, 16324, 1264, 2204, 944, 16240, 16340,
-	16304, 16316, 1240, 2208, 972, 16248, 16336,
-	16308, 16308, 1212, 2212, 996, 16252, 16332,
-	16312, 16300, 1184, 2216, 1024, 16256, 16332,
-	16316, 16292, 1160, 2216, 1052, 16264, 16328,
-	16316, 16284, 1132, 2216, 1076, 16268, 16324,
-	16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+		0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000,
+		0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000,
+		0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000,
+		0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC,
+		0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC,
+		0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC,
+		0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8,
+		0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8,
+		0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4,
+		0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4,
+		0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0,
+		0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0,
+		0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC,
+		0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC,
+		0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8,
+		0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8,
+		0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4,
+		0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4,
+		0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0,
+		0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC,
+		0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC,
+		0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8,
+		0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8,
+		0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4,
+		0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4,
+		0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0,
+		0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC,
+		0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC,
+		0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8,
+		0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8,
+		0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4,
+		0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4,
+		0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0
+};
 
+//=========================================
+// <num_taps>    = 8
+// <num_phases>  = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_8tap_64p_upscale[264] = {
-	0, 0, 0, 4096, 0, 0, 0, 0,
-	16376, 20, 16328, 4092, 56, 16364, 4, 0,
-	16372, 36, 16272, 4088, 116, 16340, 12, 0,
-	16364, 56, 16220, 4080, 180, 16320, 20, 0,
-	16360, 76, 16172, 4064, 244, 16296, 24, 16380,
-	16356, 92, 16124, 4048, 312, 16276, 32, 16380,
-	16352, 108, 16080, 4032, 380, 16252, 40, 16380,
-	16344, 124, 16036, 4008, 452, 16228, 48, 16380,
-	16340, 136, 15996, 3980, 524, 16204, 56, 16380,
-	16340, 152, 15956, 3952, 600, 16180, 64, 16376,
-	16336, 164, 15920, 3920, 672, 16156, 76, 16376,
-	16332, 176, 15888, 3884, 752, 16132, 84, 16376,
-	16328, 188, 15860, 3844, 828, 16104, 92, 16372,
-	16328, 200, 15828, 3800, 908, 16080, 100, 16372,
-	16324, 208, 15804, 3756, 992, 16056, 108, 16372,
-	16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
-	16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
-	16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
-	16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
-	16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
-	16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
-	16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
-	16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
-	16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
-	16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
-	16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
-	16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
-	16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
-	16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
-	16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
-	16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
-	16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
-	16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+		0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000,
+		0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000,
+		0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000,
+		0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000,
+		0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000,
+		0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000,
+		0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000,
+		0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000,
+		0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC,
+		0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC,
+		0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC,
+		0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC,
+		0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8,
+		0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8,
+		0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4,
+		0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4,
+		0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0,
+		0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0,
+		0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC,
+		0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC,
+		0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8,
+		0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8,
+		0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4,
+		0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4,
+		0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0,
+		0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC,
+		0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC,
+		0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8,
+		0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8,
+		0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4,
+		0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4,
+		0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0,
+		0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0
+};
 
-static const uint16_t filter_8tap_64p_117[264] = {
-	116, 16100, 428, 3564, 428, 16100, 116, 0,
-	112, 16116, 376, 3564, 484, 16084, 120, 16380,
-	104, 16136, 324, 3560, 540, 16064, 124, 16380,
-	100, 16152, 272, 3556, 600, 16048, 128, 16380,
-	96, 16168, 220, 3548, 656, 16032, 136, 16376,
-	88, 16188, 172, 3540, 716, 16016, 140, 16376,
-	84, 16204, 124, 3528, 780, 16000, 144, 16376,
-	80, 16220, 76, 3512, 840, 15984, 148, 16372,
-	76, 16236, 32, 3496, 904, 15968, 152, 16372,
-	68, 16252, 16376, 3480, 968, 15952, 156, 16372,
-	64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
-	60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
-	56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
-	48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
-	44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
-	40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
-	36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
-	32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
-	28, 4, 16052, 3208, 1564, 15836, 176, 16364,
-	24, 16, 16028, 3168, 1632, 15828, 176, 16364,
-	20, 28, 16000, 3124, 1700, 15820, 176, 16364,
-	16, 40, 15976, 3080, 1768, 15812, 176, 16364,
-	12, 52, 15952, 3036, 1836, 15808, 176, 16364,
-	8, 64, 15932, 2988, 1904, 15800, 176, 16364,
-	4, 76, 15912, 2940, 1972, 15800, 172, 16364,
-	4, 84, 15892, 2888, 2040, 15796, 172, 16364,
-	0, 96, 15876, 2836, 2104, 15792, 168, 16364,
-	16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
-	16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
-	16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
-	16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
-	16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
-	16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+//=========================================
+// <num_taps>    = 8
+// <num_phases>  = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_116[264] = {
+		0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000,
+		0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000,
+		0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000,
+		0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000,
+		0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000,
+		0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC,
+		0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC,
+		0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC,
+		0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC,
+		0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC,
+		0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC,
+		0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC,
+		0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC,
+		0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC,
+		0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC,
+		0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC,
+		0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC,
+		0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC,
+		0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC,
+		0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC,
+		0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000,
+		0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000,
+		0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000,
+		0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000,
+		0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000,
+		0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000,
+		0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004,
+		0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004,
+		0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008,
+		0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008,
+		0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C,
+		0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010,
+		0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010
+};
 
-static const uint16_t filter_8tap_64p_150[264] = {
-	16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
-	0, 16020, 992, 2756, 1068, 16024, 16376, 0,
-	4, 16020, 952, 2752, 1108, 16024, 16372, 0,
-	8, 16020, 916, 2748, 1148, 16028, 16368, 0,
-	12, 16020, 876, 2744, 1184, 16032, 16364, 4,
-	16, 16020, 840, 2740, 1224, 16036, 16356, 4,
-	20, 16024, 800, 2732, 1264, 16040, 16352, 4,
-	20, 16024, 764, 2724, 1304, 16044, 16348, 8,
-	24, 16028, 728, 2716, 1344, 16052, 16340, 8,
-	28, 16028, 692, 2704, 1380, 16056, 16336, 12,
-	28, 16032, 656, 2696, 1420, 16064, 16328, 12,
-	32, 16036, 620, 2684, 1460, 16072, 16324, 12,
-	36, 16040, 584, 2668, 1500, 16080, 16316, 16,
-	36, 16044, 548, 2656, 1536, 16088, 16308, 16,
-	36, 16048, 516, 2640, 1576, 16096, 16304, 20,
-	40, 16052, 480, 2624, 1612, 16108, 16296, 20,
-	40, 16060, 448, 2608, 1652, 16120, 16288, 20,
-	44, 16064, 416, 2588, 1692, 16132, 16280, 24,
-	44, 16068, 384, 2568, 1728, 16144, 16276, 24,
-	44, 16076, 352, 2548, 1764, 16156, 16268, 28,
-	44, 16080, 320, 2528, 1804, 16168, 16260, 28,
-	44, 16088, 292, 2508, 1840, 16184, 16252, 28,
-	44, 16096, 264, 2484, 1876, 16200, 16244, 32,
-	48, 16100, 232, 2460, 1912, 16216, 16236, 32,
-	48, 16108, 204, 2436, 1948, 16232, 16228, 32,
-	48, 16116, 176, 2412, 1980, 16248, 16220, 36,
-	48, 16124, 152, 2384, 2016, 16264, 16216, 36,
-	44, 16128, 124, 2356, 2052, 16284, 16208, 36,
-	44, 16136, 100, 2328, 2084, 16304, 16200, 40,
-	44, 16144, 72, 2300, 2116, 16324, 16192, 40,
-	44, 16152, 48, 2272, 2148, 16344, 16184, 40,
-	44, 16160, 24, 2244, 2180, 16364, 16176, 40,
-	44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+//=========================================
+// <num_taps>    = 8
+// <num_phases>  = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_149[264] = {
+		0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000,
+		0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000,
+		0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000,
+		0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000,
+		0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004,
+		0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004,
+		0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004,
+		0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008,
+		0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008,
+		0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008,
+		0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C,
+		0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C,
+		0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010,
+		0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010,
+		0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010,
+		0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014,
+		0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014,
+		0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018,
+		0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018,
+		0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C,
+		0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C,
+		0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C,
+		0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020,
+		0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020,
+		0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024,
+		0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024,
+		0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024,
+		0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028,
+		0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028,
+		0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028,
+		0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C,
+		0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C,
+		0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C
+};
 
+//=========================================
+// <num_taps>    = 8
+// <num_phases>  = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness>   = 0
+// <CoefType>    = ModifiedLanczos
+// <CoefQuant>   = 1.10
+// <CoefOut>     = 1.12
+//=========================================
 static const uint16_t filter_8tap_64p_183[264] = {
-	16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
-	16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
-	16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
-	16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
-	16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
-	16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
-	16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
-	16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
-	16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
-	16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
-	16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
-	16308, 16188, 868, 2200, 1448, 0, 16220, 4,
-	16312, 16184, 844, 2192, 1472, 12, 16216, 4,
-	16316, 16180, 816, 2184, 1500, 28, 16212, 4,
-	16320, 16176, 792, 2172, 1524, 40, 16208, 4,
-	16324, 16172, 764, 2164, 1548, 56, 16204, 0,
-	16328, 16172, 740, 2156, 1572, 72, 16200, 0,
-	16328, 16168, 712, 2144, 1596, 88, 16196, 0,
-	16332, 16164, 688, 2132, 1620, 100, 16192, 0,
-	16336, 16164, 664, 2120, 1644, 120, 16192, 0,
-	16340, 16160, 640, 2108, 1668, 136, 16188, 0,
-	16344, 16160, 616, 2096, 1688, 152, 16184, 0,
-	16344, 16160, 592, 2080, 1712, 168, 16180, 0,
-	16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
-	16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
-	16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
-	16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
-	16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
-	16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
-	16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
-	16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
-	16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
-	16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+		0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000,
+		0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000,
+		0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000,
+		0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000,
+		0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000,
+		0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000,
+		0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000,
+		0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC,
+		0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC,
+		0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC,
+		0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC,
+		0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8,
+		0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8,
+		0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8,
+		0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4,
+		0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4,
+		0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4,
+		0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0,
+		0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0,
+		0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0,
+		0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC,
+		0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC,
+		0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8,
+		0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8,
+		0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4,
+		0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4,
+		0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0,
+		0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0,
+		0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC,
+		0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC,
+		0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8,
+		0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8,
+		0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
+};
 
 const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
 {
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_3tap_16p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_3tap_16p_117;
+		return filter_3tap_16p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_3tap_16p_150;
+		return filter_3tap_16p_149;
 	else
 		return filter_3tap_16p_183;
 }
@@ -1029,9 +1355,9 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_3tap_64p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_3tap_64p_117;
+		return filter_3tap_64p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_3tap_64p_150;
+		return filter_3tap_64p_149;
 	else
 		return filter_3tap_64p_183;
 }
@@ -1041,9 +1367,9 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_4tap_16p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_4tap_16p_117;
+		return filter_4tap_16p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_4tap_16p_150;
+		return filter_4tap_16p_149;
 	else
 		return filter_4tap_16p_183;
 }
@@ -1053,9 +1379,9 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_4tap_64p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_4tap_64p_117;
+		return filter_4tap_64p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_4tap_64p_150;
+		return filter_4tap_64p_149;
 	else
 		return filter_4tap_64p_183;
 }
@@ -1065,9 +1391,9 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_5tap_64p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_5tap_64p_117;
+		return filter_5tap_64p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_5tap_64p_150;
+		return filter_5tap_64p_149;
 	else
 		return filter_5tap_64p_183;
 }
@@ -1077,9 +1403,9 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_6tap_64p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_6tap_64p_117;
+		return filter_6tap_64p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_6tap_64p_150;
+		return filter_6tap_64p_149;
 	else
 		return filter_6tap_64p_183;
 }
@@ -1089,9 +1415,9 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_7tap_64p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_7tap_64p_117;
+		return filter_7tap_64p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_7tap_64p_150;
+		return filter_7tap_64p_149;
 	else
 		return filter_7tap_64p_183;
 }
@@ -1101,9 +1427,9 @@ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
 	if (ratio.value < dc_fixpt_one.value)
 		return filter_8tap_64p_upscale;
 	else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
-		return filter_8tap_64p_117;
+		return filter_8tap_64p_116;
 	else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
-		return filter_8tap_64p_150;
+		return filter_8tap_64p_149;
 	else
 		return filter_8tap_64p_183;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
new file mode 100644
index 0000000..bb0e1b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 225955e..bc109d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -27,25 +27,55 @@
 #include "dc.h"
 #include "dc_dmub_srv.h"
 #include "../../dmub/inc/dmub_srv.h"
-#include "dmub_fw_state.h"
+#include "../../dmub/inc/dmub_gpint_cmd.h"
 #include "core_types.h"
-#include "ipp.h"
 
 #define MAX_PIPES 6
 
 /**
  * Get PSR state from firmware.
  */
-static void dmub_get_psr_state(uint32_t *psr_state)
+static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
 {
-	// Not yet implemented
-	// Trigger GPINT interrupt from firmware
+	struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+
+	// Send gpint command and wait for ack
+	dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+	dmub_srv_get_gpint_response(srv, psr_state);
+}
+
+/**
+ * Set PSR version.
+ */
+static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
+{
+	union dmub_rb_cmd cmd;
+	struct dc_context *dc = dmub->ctx;
+
+	cmd.psr_set_version.header.type = DMUB_CMD__PSR;
+	cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
+
+	if (stream->psr_version == 0x0) // Unsupported
+		return false;
+	else if (stream->psr_version == 0x1)
+		cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
+	else if (stream->psr_version == 0x2)
+		cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
+
+	cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+
+	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+	dc_dmub_srv_cmd_execute(dc->dmub_srv);
+	dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+	return true;
 }
 
 /**
  * Enable/Disable PSR.
  */
-static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
 {
 	union dmub_rb_cmd cmd;
 	struct dc_context *dc = dmub->ctx;
@@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
 /**
  * Set PSR level.
  */
-static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
 {
 	union dmub_rb_cmd cmd;
 	uint32_t psr_state = 0;
 	struct dc_context *dc = dmub->ctx;
 
-	dmub_get_psr_state(&psr_state);
+	dmub_psr_get_state(dmub, &psr_state);
 
 	if (psr_state == 0)
 		return;
@@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
 /**
  * Setup PSR by programming phy registers and sending psr hw context values to firmware.
  */
-static bool dmub_setup_psr(struct dmub_psr *dmub,
+static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
 		struct dc_link *link,
 		struct psr_context *psr_context)
 {
@@ -101,21 +131,22 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
 		= &cmd.psr_copy_settings.psr_copy_settings_data;
 	struct pipe_ctx *pipe_ctx = NULL;
 	struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+	int i = 0;
 
-	for (int i = 0; i < MAX_PIPES; i++) {
-		if (res_ctx &&
-				res_ctx->pipe_ctx[i].stream &&
-				res_ctx->pipe_ctx[i].stream->link &&
-				res_ctx->pipe_ctx[i].stream->link == link &&
-				res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+	for (i = 0; i < MAX_PIPES; i++) {
+		if (res_ctx->pipe_ctx[i].stream &&
+		    res_ctx->pipe_ctx[i].stream->link == link &&
+		    res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
 			pipe_ctx = &res_ctx->pipe_ctx[i];
 			break;
 		}
 	}
 
-	if (!pipe_ctx ||
-			!&pipe_ctx->plane_res ||
-			!&pipe_ctx->stream_res)
+	if (!pipe_ctx)
+		return false;
+
+	// First, set the psr version
+	if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
 		return false;
 
 	// Program DP DPHY fast training registers
@@ -138,10 +169,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
 
 	copy_settings_data->mpcc_inst				= pipe_ctx->plane_res.mpcc_inst;
 
-	if (pipe_ctx->plane_res.hubp)
-		copy_settings_data->hubp_inst			= pipe_ctx->plane_res.hubp->inst;
-	else
-		copy_settings_data->hubp_inst			= 0;
 	if (pipe_ctx->plane_res.dpp)
 		copy_settings_data->dpp_inst			= pipe_ctx->plane_res.dpp->inst;
 	else
@@ -157,18 +184,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
 
 	// Misc
 	copy_settings_data->psr_level				= psr_context->psr_level.u32all;
-	copy_settings_data->hyst_frames				= psr_context->timehyst_frames;
-	copy_settings_data->hyst_lines				= psr_context->hyst_lines;
-	copy_settings_data->phy_type				= psr_context->phyType;
-	copy_settings_data->aux_repeat				= psr_context->aux_repeats;
-	copy_settings_data->smu_optimizations_en	= psr_context->allow_smu_optimizations;
-	copy_settings_data->skip_wait_for_pll_lock	= psr_context->skipPsrWaitForPllLock;
+	copy_settings_data->smu_optimizations_en		= psr_context->allow_smu_optimizations;
 	copy_settings_data->frame_delay				= psr_context->frame_delay;
-	copy_settings_data->smu_phy_id				= psr_context->smuPhyId;
-	copy_settings_data->num_of_controllers		= psr_context->numberOfControllers;
 	copy_settings_data->frame_cap_ind			= psr_context->psrFrameCaptureIndicationReq;
-	copy_settings_data->phy_num					= psr_context->frame_delay & 0x7;
-	copy_settings_data->link_rate				= psr_context->frame_delay & 0xF;
 
 	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
 	dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -178,10 +196,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
 }
 
 static const struct dmub_psr_funcs psr_funcs = {
-	.set_psr_enable			= dmub_set_psr_enable,
-	.setup_psr				= dmub_setup_psr,
-	.get_psr_state			= dmub_get_psr_state,
-	.set_psr_level			= dmub_set_psr_level,
+	.psr_copy_settings		= dmub_psr_copy_settings,
+	.psr_enable			= dmub_psr_enable,
+	.psr_get_state			= dmub_psr_get_state,
+	.psr_set_level			= dmub_psr_set_level,
 };
 
 /**
@@ -215,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
  */
 void dmub_psr_destroy(struct dmub_psr **dmub)
 {
-	kfree(dmub);
+	kfree(*dmub);
 	*dmub = NULL;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 229958d..f404fec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -27,6 +27,7 @@
 #define _DMUB_PSR_H_
 
 #include "os_types.h"
+#include "dc_link.h"
 
 struct dmub_psr {
 	struct dc_context *ctx;
@@ -34,14 +35,14 @@ struct dmub_psr {
 };
 
 struct dmub_psr_funcs {
-	void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
-	bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
-	void (*get_psr_state)(uint32_t *psr_state);
-	void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+	bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+	void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+	void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
+	void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
 };
 
 struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
 void dmub_psr_destroy(struct dmub_psr **dmub);
 
 
-#endif /* _DCE_DMUB_H_ */
+#endif /* _DMUB_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5b68927..0976e37 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -71,6 +71,8 @@
 #define PANEL_POWER_UP_TIMEOUT 300
 #define PANEL_POWER_DOWN_TIMEOUT 500
 #define HPD_CHECK_INTERVAL 10
+#define OLED_POST_T7_DELAY 100
+#define OLED_PRE_T11_DELAY 150
 
 #define CTX \
 	hws->ctx
@@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
 }
 
 /*todo: cloned in stream enc, fix*/
-static bool is_panel_backlight_on(struct dce_hwseq *hws)
+bool dce110_is_panel_backlight_on(struct dc_link *link)
 {
+	struct dc_context *ctx = link->ctx;
+	struct dce_hwseq *hws = ctx->dc->hwseq;
 	uint32_t value;
 
 	REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
@@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
 	return value;
 }
 
-static bool is_panel_powered_on(struct dce_hwseq *hws)
+bool dce110_is_panel_powered_on(struct dc_link *link)
 {
+	struct dc_context *ctx = link->ctx;
+	struct dce_hwseq *hws = ctx->dc->hwseq;
 	uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
 
-
 	REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
 
 	REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
@@ -816,7 +821,7 @@ void dce110_edp_power_control(
 		return;
 	}
 
-	if (power_up != is_panel_powered_on(hwseq)) {
+	if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
 		/* Send VBIOS command to prompt eDP panel power */
 		if (power_up) {
 			unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -896,7 +901,7 @@ void dce110_edp_backlight_control(
 		return;
 	}
 
-	if (enable && is_panel_backlight_on(hws)) {
+	if (enable && hws->funcs.is_panel_backlight_on(link)) {
 		DC_LOG_HW_RESUME_S3(
 				"%s: panel already powered up. Do nothing.\n",
 				__func__);
@@ -936,9 +941,21 @@ void dce110_edp_backlight_control(
 	if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
 		edp_receiver_ready_T7(link);
 	link_transmitter_control(ctx->dc_bios, &cntl);
+
+	if (enable && link->dpcd_sink_ext_caps.bits.oled)
+		msleep(OLED_POST_T7_DELAY);
+
+	if (link->dpcd_sink_ext_caps.bits.oled ||
+		link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+		link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
+		dc_link_backlight_enable_aux(link, enable);
+
 	/*edp 1.2*/
 	if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
 		edp_receiver_ready_T9(link);
+
+	if (!enable && link->dpcd_sink_ext_caps.bits.oled)
+		msleep(OLED_PRE_T11_DELAY);
 }
 
 void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -2576,17 +2593,6 @@ static void dce110_apply_ctx_for_surface(
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
-		if (stream == pipe_ctx->stream) {
-			if (!pipe_ctx->top_pipe &&
-				(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
-				dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
-		}
-	}
-
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
 		if (pipe_ctx->stream != stream)
 			continue;
@@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface(
 
 	}
 
-	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
-		if ((stream == pipe_ctx->stream) &&
-			(!pipe_ctx->top_pipe) &&
-			(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
-			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
-	}
-
 	if (dc->fbc_compressor)
 		enable_fbc(dc, context);
 }
 
+static void dce110_post_unlock_program_front_end(
+		struct dc *dc,
+		struct dc_state *context)
+{
+}
+
 static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
 	struct dce_hwseq *hws = dc->hwseq;
@@ -2722,6 +2724,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.init_hw = init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+	.post_unlock_program_front_end = dce110_post_unlock_program_front_end,
 	.update_plane_addr = update_plane_addr,
 	.update_pending_status = dce110_update_pending_status,
 	.enable_accelerated_mode = dce110_enable_accelerated_mode,
@@ -2736,6 +2739,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
 	.disable_audio_stream = dce110_disable_audio_stream,
 	.disable_plane = dce110_power_down_fe,
 	.pipe_control_lock = dce_pipe_control_lock,
+	.interdependent_update_lock = NULL,
 	.prepare_bandwidth = dce110_prepare_bandwidth,
 	.optimize_bandwidth = dce110_optimize_bandwidth,
 	.set_drr = set_drr,
@@ -2763,6 +2767,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
 	.disable_stream_gating = NULL,
 	.enable_stream_gating = NULL,
 	.edp_backlight_control = dce110_edp_backlight_control,
+	.is_panel_backlight_on = dce110_is_panel_backlight_on,
+	.is_panel_powered_on = dce110_is_panel_powered_on,
 };
 
 void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 26a9c14..34be166 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready(
 		struct dc_link *link,
 		bool power_up);
 
+bool dce110_is_panel_backlight_on(struct dc_link *link);
+
+bool dce110_is_panel_powered_on(struct dc_link *link);
+
 #endif /* __DC_HWSS_DCE110_H__ */
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index bbd6e01..47a39eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format(
 	struct pwl_result_data *rgb_resulted;
 	struct pwl_result_data *rgb;
 	struct pwl_result_data *rgb_plus_1;
+	struct pwl_result_data *rgb_minus_1;
 
 	int32_t region_start, region_end;
 	int32_t i;
@@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format(
 
 	rgb = rgb_resulted;
 	rgb_plus_1 = rgb_resulted + 1;
+	rgb_minus_1 = rgb;
 
 	i = 1;
 	while (i != hw_points + 1) {
+
+		if (i >= hw_points - 1) {
+			if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+				rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
+			if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+				rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
+			if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+				rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
+		}
+
 		rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
 		rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
 		rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
@@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format(
 		}
 
 		++rgb_plus_1;
+		rgb_minus_1 = rgb;
 		++rgb;
 		++i;
 	}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 446ba0a..deccab0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high(
 	 * pstate takes around ~100us on linux. Unknown currently as to
 	 * why it takes that long on linux
 	 */
-	static unsigned int pstate_wait_timeout_us = 200;
-	static unsigned int pstate_wait_expected_timeout_us = 40;
+	const unsigned int pstate_wait_timeout_us = 200;
+	const unsigned int pstate_wait_expected_timeout_us = 40;
 	static unsigned int max_sampled_pstate_wait_us; /* data collection */
 	static bool forced_pstate_allow; /* help with revert wa */
 
@@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high(
 		forced_pstate_allow = false;
 	}
 
-	/* RV2:
-	 * dchubbubdebugind, at: 0xB
+	/* The following table only applies to DCN1 and DCN2,
+	 * for newer DCNs, need to consult with HW IP folks to read RTL
+	 * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
 	 * description
 	 * 0:     Pipe0 Plane0 Allow Pstate Change
 	 * 1:     Pipe0 Plane1 Allow Pstate Change
@@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high(
 	 * 28:    WB0 Allow Pstate Change
 	 * 29:    WB1 Allow Pstate Change
 	 * 30:    Arbiter's allow_pstate_change
-	 * 31:    SOC pstate change request"
-	 */
-	/*DCN2.x:
-	HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
-	0: Pipe0 Plane0 Allow P-state Change
-	1: Pipe0 Plane1 Allow P-state Change
-	2: Pipe0 Cursor0 Allow P-state Change
-	3: Pipe0 Cursor1 Allow P-state Change
-	4: Pipe1 Plane0 Allow P-state Change
-	5: Pipe1 Plane1 Allow P-state Change
-	6: Pipe1 Cursor0 Allow P-state Change
-	7: Pipe1 Cursor1 Allow P-state Change
-	8: Pipe2 Plane0 Allow P-state Change
-	9: Pipe2 Plane1 Allow P-state Change
-	10: Pipe2 Cursor0 Allow P-state Change
-	11: Pipe2 Cursor1 Allow P-state Change
-	12: Pipe3 Plane0 Allow P-state Change
-	13: Pipe3 Plane1 Allow P-state Change
-	14: Pipe3 Cursor0 Allow P-state Change
-	15: Pipe3 Cursor1 Allow P-state Change
-	16: Pipe4 Plane0 Allow P-state Change
-	17: Pipe4 Plane1 Allow P-state Change
-	18: Pipe4 Cursor0 Allow P-state Change
-	19: Pipe4 Cursor1 Allow P-state Change
-	20: Pipe5 Plane0 Allow P-state Change
-	21: Pipe5 Plane1 Allow P-state Change
-	22: Pipe5 Cursor0 Allow P-state Change
-	23: Pipe5 Cursor1 Allow P-state Change
-	24: Pipe6 Plane0 Allow P-state Change
-	25: Pipe6 Plane1 Allow P-state Change
-	26: Pipe6 Cursor0 Allow P-state Change
-	27: Pipe6 Cursor1 Allow P-state Change
-	28: WB0 Allow P-state Change
-	29: WB1 Allow P-state Change
-	30: Arbiter`s Allow P-state Change
-	31: SOC P-state Change request
-	*/
-	/* RV1:
-	 * dchubbubdebugind, at: 0x7
-	 * description "3-0:   Pipe0 cursor0 QOS
-	 * 7-4:   Pipe1 cursor0 QOS
-	 * 11-8:  Pipe2 cursor0 QOS
-	 * 15-12: Pipe3 cursor0 QOS
-	 * 16:    Pipe0 Plane0 Allow Pstate Change
-	 * 17:    Pipe1 Plane0 Allow Pstate Change
-	 * 18:    Pipe2 Plane0 Allow Pstate Change
-	 * 19:    Pipe3 Plane0 Allow Pstate Change
-	 * 20:    Pipe0 Plane1 Allow Pstate Change
-	 * 21:    Pipe1 Plane1 Allow Pstate Change
-	 * 22:    Pipe2 Plane1 Allow Pstate Change
-	 * 23:    Pipe3 Plane1 Allow Pstate Change
-	 * 24:    Pipe0 cursor0 Allow Pstate Change
-	 * 25:    Pipe1 cursor0 Allow Pstate Change
-	 * 26:    Pipe2 cursor0 Allow Pstate Change
-	 * 27:    Pipe3 cursor0 Allow Pstate Change
-	 * 28:    WB0 Allow Pstate Change
-	 * 29:    WB1 Allow Pstate Change
-	 * 30:    Arbiter's allow_pstate_change
 	 * 31:    SOC pstate change request
 	 */
 
@@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
 }
 
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks(
 {
 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 	uint32_t prog_wm_value;
+	bool wm_pending = false;
 
 	/* Repeat for water mark set A, B, C and D. */
 	/* clock state A */
@@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
 		hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
@@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.pte_meta_urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
+		wm_pending = true;
 
 	/* clock state B */
 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
 		hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
@@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.pte_meta_urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
+		wm_pending = true;
 
 	/* clock state C */
 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
 		hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
@@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.pte_meta_urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
+		wm_pending = true;
 
 	/* clock state D */
 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
 		hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
@@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.pte_meta_urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
+		wm_pending = true;
+
+	return wm_pending;
 }
 
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks(
 {
 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 	uint32_t prog_wm_value;
+	bool wm_pending = false;
 
 	/* clock state A */
 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
 	/* clock state B */
 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
 	/* clock state C */
 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
 	/* clock state D */
 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
+	return wm_pending;
 }
 
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks(
 {
 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 	uint32_t prog_wm_value;
+	bool wm_pending = false;
 
 	/* clock state A */
 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
@@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
 
 	/* clock state B */
 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
 
 	/* clock state C */
 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
 
 	/* clock state D */
 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
+
+	return wm_pending;
 }
 
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower)
 {
 	struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+	bool wm_pending = false;
 	/*
 	 * Need to clamp to max of the register values (i.e. no wrap)
 	 * for dcn1, all wm registers are 21-bit wide
 	 */
-	hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
-	hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
-	hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+	if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
+
+	if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
+
+	if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
 
 	REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -627,6 +616,7 @@ void hubbub1_program_watermarks(
 			DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
 			DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
 #endif
+	return wm_pending;
 }
 
 void hubbub1_update_dchub(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index af577512..343a5371 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high(
 
 void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
 
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub,
 	const struct dcn_hubbub_shift *hubbub_shift,
 	const struct dcn_hubbub_mask *hubbub_mask);
 
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower);
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower);
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1008ac8..9cc3314 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -48,8 +48,8 @@
 #include "dc_link_dp.h"
 #include "dccg.h"
 #include "clk_mgr.h"
-
-
+#include "link_hwss.h"
+#include "dpcd_defs.h"
 #include "dsc.h"
 
 #define DC_LOGGER_INIT(logger)
@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
 			us_x10 % frac);
 }
 
-static void dcn10_lock_all_pipes(struct dc *dc,
+void dcn10_lock_all_pipes(struct dc *dc,
 	struct dc_state *context,
 	bool lock)
 {
@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
 		tg = pipe_ctx->stream_res.tg;
+
 		/*
 		 * Only lock the top pipe's tg to prevent redundant
 		 * (un)locking. Also skip if pipe is disabled.
@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
 			continue;
 
 		if (lock)
-			tg->funcs->lock(tg);
+			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
 		else
-			tg->funcs->unlock(tg);
+			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
 	}
 }
 
@@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe(
 	 * parent pipe.
 	 */
 	if (pipe_ctx->top_pipe == NULL) {
+
+		if (pipe_ctx->stream_res.abm)
+			pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 
 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -1263,7 +1268,8 @@ void dcn10_init_hw(struct dc *dc)
 		}
 
 		//Enable ability to power gate / don't force power on permanently
-		hws->funcs.enable_power_gating_plane(hws, true);
+		if (hws->funcs.enable_power_gating_plane)
+			hws->funcs.enable_power_gating_plane(hws, true);
 
 		return;
 	}
@@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc)
 		if (hws->funcs.dsc_pg_control != NULL)
 			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
 
+	/* we want to turn off all dp displays before doing detection */
+	if (dc->config.power_down_display_on_boot) {
+		uint8_t dpcd_power_state = '\0';
+		enum dc_status status = DC_ERROR_UNEXPECTED;
+
+		for (i = 0; i < dc->link_count; i++) {
+			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+				continue;
+
+			/*
+			 * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
+			 * which needs to read dpcd info with the help of aconnector.
+			 * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
+			 * cannot be read.
+			 */
+			if (dc->links[i]->priv) {
+				/* if any of the displays are lit up turn them off */
+				status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+								&dpcd_power_state, sizeof(dpcd_power_state));
+				if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+					dp_receiver_power_ctrl(dc->links[i], false);
+			}
+		}
+	}
+
 	/* If taking control over from VBIOS, we may want to optimize our first
 	 * mode set, so we need to skip powering down pipes until we know which
 	 * pipes we want to use.
@@ -1325,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc)
 	 */
 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
 		hws->funcs.init_pipes(dc, dc->current_state);
+		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
 	}
 
 	for (i = 0; i < res_pool->audio_count; i++) {
@@ -1355,8 +1389,8 @@ void dcn10_init_hw(struct dc *dc)
 
 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
 	}
-
-	hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+	if (hws->funcs.enable_power_gating_plane)
+		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
 	if (dc->clk_mgr->funcs->notify_wm_ranges)
 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -1576,7 +1610,7 @@ void dcn10_pipe_control_lock(
 	/* use TG master update lock to lock everything on the TG
 	 * therefore only top pipe need to lock
 	 */
-	if (pipe->top_pipe)
+	if (!pipe || pipe->top_pipe)
 		return;
 
 	if (dc->debug.sanity_checks)
@@ -2090,6 +2124,10 @@ void dcn10_get_hdr_visual_confirm_color(
 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
 			/* HDR10, ARGB2101010 - set boarder color to red */
 			color->color_r_cr = color_value;
+		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+			/* FreeSync 2 ARGB2101010 - set boarder color to pink */
+			color->color_r_cr = color_value;
+			color->color_b_cb = color_value;
 		}
 		break;
 	case PIXEL_FORMAT_FP16:
@@ -2512,12 +2550,17 @@ void dcn10_apply_ctx_for_surface(
 	int i;
 	struct timing_generator *tg;
 	uint32_t underflow_check_delay_us;
-	bool removed_pipe[4] = { false };
 	bool interdependent_update = false;
 	struct pipe_ctx *top_pipe_to_program =
 			dcn10_find_top_pipe_for_stream(dc, context, stream);
 	DC_LOGGER_INIT(dc->ctx->logger);
 
+	// Clear pipe_ctx flag
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		pipe_ctx->update_flags.raw = 0;
+	}
+
 	if (!top_pipe_to_program)
 		return;
 
@@ -2531,11 +2574,6 @@ void dcn10_apply_ctx_for_surface(
 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
 
-	if (interdependent_update)
-		dcn10_lock_all_pipes(dc, context, true);
-	else
-		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
-
 	if (underflow_check_delay_us != 0xFFFFFFFF)
 		udelay(underflow_check_delay_us);
 
@@ -2552,18 +2590,6 @@ void dcn10_apply_ctx_for_surface(
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 		struct pipe_ctx *old_pipe_ctx =
 				&dc->current_state->res_ctx.pipe_ctx[i];
-		/*
-		 * Powergate reused pipes that are not powergated
-		 * fairly hacky right now, using opp_id as indicator
-		 * TODO: After move dc_post to dc_update, this will
-		 * be removed.
-		 */
-		if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
-			if (old_pipe_ctx->stream_res.tg == tg &&
-			    old_pipe_ctx->plane_res.hubp &&
-			    old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
-				dc->hwss.disable_plane(dc, old_pipe_ctx);
-		}
 
 		if ((!pipe_ctx->plane_state ||
 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
@@ -2571,7 +2597,7 @@ void dcn10_apply_ctx_for_surface(
 		    old_pipe_ctx->stream_res.tg == tg) {
 
 			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
-			removed_pipe[i] = true;
+			pipe_ctx->update_flags.bits.disable = 1;
 
 			DC_LOG_DC("Reset mpcc for pipe %d\n",
 					old_pipe_ctx->pipe_idx);
@@ -2597,21 +2623,35 @@ void dcn10_apply_ctx_for_surface(
 				&pipe_ctx->dlg_regs,
 				&pipe_ctx->ttu_regs);
 		}
+}
 
-	if (interdependent_update)
-		dcn10_lock_all_pipes(dc, context, false);
-	else
-		dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+void dcn10_post_unlock_program_front_end(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	int i;
 
-	if (num_planes == 0)
-		false_optc_underflow_wa(dc, stream, tg);
+	DC_LOGGER_INIT(dc->ctx->logger);
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx->top_pipe &&
+			!pipe_ctx->prev_odm_pipe &&
+			pipe_ctx->stream) {
+			struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+			if (context->stream_status[i].plane_count == 0)
+				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
+		}
+	}
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++)
-		if (removed_pipe[i])
+		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++)
-		if (removed_pipe[i]) {
+		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
 			dc->hwss.optimize_bandwidth(dc, context);
 			break;
 		}
@@ -2656,7 +2696,7 @@ void dcn10_prepare_bandwidth(
 				false);
 	}
 
-	hubbub->funcs->program_watermarks(hubbub,
+	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
 			&context->bw_ctx.bw.dcn.watermarks,
 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
 			true);
@@ -2693,6 +2733,7 @@ void dcn10_optimize_bandwidth(
 			&context->bw_ctx.bw.dcn.watermarks,
 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
 			true);
+
 	dcn10_stereo_hw_frame_pack_wa(dc, context);
 
 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
@@ -2884,6 +2925,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
 	bool flip_pending;
+	struct dc *dc = plane_state->ctx->dc;
 
 	if (plane_state == NULL)
 		return;
@@ -2901,6 +2943,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 		plane_state->status.is_right_eye =
 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
 	}
+
+	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
+		struct dce_hwseq *hwseq = dc->hwseq;
+		struct timing_generator *tg = dc->res_pool->timing_generators[0];
+		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
+
+		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
+			struct hubbub *hubbub = dc->res_pool->hubbub;
+
+			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
+		}
+	}
 }
 
 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 4d20f65..16a50e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -70,11 +70,18 @@ void dcn10_reset_hw_ctx_wrap(
 		struct dc *dc,
 		struct dc_state *context);
 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_lock_all_pipes(
+		struct dc *dc,
+		struct dc_state *context,
+		bool lock);
 void dcn10_apply_ctx_for_surface(
 		struct dc *dc,
 		const struct dc_stream_state *stream,
 		int num_planes,
 		struct dc_state *context);
+void dcn10_post_unlock_program_front_end(
+		struct dc *dc,
+		struct dc_state *context);
 void dcn10_hubp_pg_control(
 		struct dce_hwseq *hws,
 		unsigned int hubp_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index e7e5352..dd02d39 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.init_hw = dcn10_init_hw,
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+	.post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
 	.update_plane_addr = dcn10_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
 	.update_pending_status = dcn10_update_pending_status,
@@ -49,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
 	.disable_audio_stream = dce110_disable_audio_stream,
 	.disable_plane = dcn10_disable_plane,
 	.pipe_control_lock = dcn10_pipe_control_lock,
+	.interdependent_update_lock = dcn10_lock_all_pipes,
 	.prepare_bandwidth = dcn10_prepare_bandwidth,
 	.optimize_bandwidth = dcn10_optimize_bandwidth,
 	.set_drr = dcn10_set_drr,
@@ -85,6 +87,8 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
 	.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
 	.enable_stream_timing = dcn10_enable_stream_timing,
 	.edp_backlight_control = dce110_edp_backlight_control,
+	.is_panel_backlight_on = dce110_is_panel_backlight_on,
+	.is_panel_powered_on = dce110_is_panel_powered_on,
 	.disable_stream_gating = NULL,
 	.enable_stream_gating = NULL,
 	.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 1a37c90..d3617d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -782,6 +782,11 @@ bool dcn10_link_encoder_validate_output_with_stream(
 	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
 	bool is_valid;
 
+	//if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit
+	if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite &&
+		enc10->base.features.max_hdmi_pixel_clock > 300000)
+		enc10->base.features.max_hdmi_pixel_clock = 300000;
+
 	switch (stream->signal) {
 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
 	case SIGNAL_TYPE_DVI_DUAL_LINK:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index eb13589..76210917 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -62,11 +62,11 @@
 	SRI(DP_DPHY_FAST_TRAINING, DP, id), \
 	SRI(DP_SEC_CNTL1, DP, id), \
 	SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
-	SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
 	SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
 
 
 #define LE_DCN10_REG_LIST(id)\
+	SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
 	LE_DCN_COMMON_REG_LIST(id)
 
 struct dcn10_link_enc_aux_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a9a43b3..63acb8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc,
 	uint32_t asic_blank_end;
 	uint32_t v_init;
 	uint32_t v_fp2 = 0;
-	int32_t vertical_line_start;
 
 	struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
@@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc,
 			patched_crtc_timing.v_border_top;
 
 	/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
-	vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
-	if (vertical_line_start < 0)
-		v_fp2 = -vertical_line_start;
+	if (optc1->vstartup_start > asic_blank_end)
+		v_fp2 = optc1->vstartup_start - asic_blank_end;
 
 	/* Interlace */
 	if (REG(OTG_INTERLACE_CONTROL)) {
@@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
 			REG_UPDATE_3(OTG_STEREO_CONTROL,
 				OTG_STEREO_EN, stereo_en,
 				OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
-				OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+				OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
 
 		if (flags->PROGRAM_POLARITY)
 			REG_UPDATE(OTG_STEREO_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 3b71898..261bdc3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -570,7 +570,7 @@ static const struct dc_plane_cap plane_cap = {
 
 static const struct dc_debug_options debug_defaults_drv = {
 		.sanity_checks = true,
-		.disable_dmcu = true,
+		.disable_dmcu = false,
 		.force_abm_enable = false,
 		.timing_trace = false,
 		.clock_trace = true,
@@ -598,7 +598,7 @@ static const struct dc_debug_options debug_defaults_drv = {
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
-		.disable_dmcu = true,
+		.disable_dmcu = false,
 		.force_abm_enable = false,
 		.timing_trace = true,
 		.clock_trace = true,
@@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
 	return DC_OK;
 }
 
-static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
 {
 	enum dc_status result = DC_OK;
 
@@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
 	.validate_plane = dcn10_validate_plane,
 	.validate_global = dcn10_validate_global,
 	.add_stream_to_ctx = dcn10_add_stream_to_ctx,
-	.get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
+	.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
 	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 376c426..7eba933 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1667,5 +1667,6 @@ void dcn10_stream_encoder_construct(
 	enc1->regs = regs;
 	enc1->se_shift = se_shift;
 	enc1->se_mask = se_mask;
+	enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 50bffbf..62cc265 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
 		REG_UPDATE(DPPCLK_DTO_CTRL,
 				DPPCLK_DTO_ENABLE[dpp_inst], 0);
 	}
+
+	dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
 }
 
 void dccg2_get_dccg_ref_freq(struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 13e057d..42bba7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes(
 	}
 }
 
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
-
-bool dpp2_get_optimal_number_of_taps(
-		struct dpp *dpp,
-		struct scaler_data *scl_data,
-		const struct scaling_taps *in_taps)
-{
-	/* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
-	if (scl_data->viewport.width  != scl_data->h_active &&
-		scl_data->viewport.height != scl_data->v_active &&
-		dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
-		scl_data->format == PIXEL_FORMAT_FP16)
-		return false;
-
-	if (scl_data->viewport.width > scl_data->h_active &&
-		dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
-		scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
-		return false;
-
-	/* TODO: add lb check */
-
-	/* No support for programming ratio of 8, drop to 7.99999.. */
-	if (scl_data->ratios.horz.value == (8ll << 32))
-		scl_data->ratios.horz.value--;
-	if (scl_data->ratios.vert.value == (8ll << 32))
-		scl_data->ratios.vert.value--;
-	if (scl_data->ratios.horz_c.value == (8ll << 32))
-		scl_data->ratios.horz_c.value--;
-	if (scl_data->ratios.vert_c.value == (8ll << 32))
-		scl_data->ratios.vert_c.value--;
-
-	/* Set default taps if none are provided */
-	if (in_taps->h_taps == 0) {
-		if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
-			scl_data->taps.h_taps = 8;
-		else
-			scl_data->taps.h_taps = 4;
-	} else
-		scl_data->taps.h_taps = in_taps->h_taps;
-	if (in_taps->v_taps == 0) {
-		if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
-			scl_data->taps.v_taps = 8;
-		else
-			scl_data->taps.v_taps = 4;
-	} else
-		scl_data->taps.v_taps = in_taps->v_taps;
-	if (in_taps->v_taps_c == 0) {
-		if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
-			scl_data->taps.v_taps_c = 4;
-		else
-			scl_data->taps.v_taps_c = 2;
-	} else
-		scl_data->taps.v_taps_c = in_taps->v_taps_c;
-	if (in_taps->h_taps_c == 0) {
-		if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
-			scl_data->taps.h_taps_c = 4;
-		else
-			scl_data->taps.h_taps_c = 2;
-	} else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
-		/* Only 1 and even h_taps_c are supported by hw */
-		scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
-	else
-		scl_data->taps.h_taps_c = in_taps->h_taps_c;
-
-	if (!dpp->ctx->dc->debug.always_scale) {
-		if (IDENTITY_RATIO(scl_data->ratios.horz))
-			scl_data->taps.h_taps = 1;
-		if (IDENTITY_RATIO(scl_data->ratios.vert))
-			scl_data->taps.v_taps = 1;
-		if (IDENTITY_RATIO(scl_data->ratios.horz_c))
-			scl_data->taps.h_taps_c = 1;
-		if (IDENTITY_RATIO(scl_data->ratios.vert_c))
-			scl_data->taps.v_taps_c = 1;
-	}
-
-	return true;
-}
-
 void oppn20_dummy_program_regamma_pwl(
 		struct dpp *dpp,
 		const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 6bdfee2..1b1ae9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
 	dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
 	dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
 	dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+	dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
 
 	// TODO: in addition to validating slice height (pic height must be divisible by slice height),
 	// see what happens when the same condition doesn't apply for slice_width/pic_width.
@@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
 		reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
 
 	reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
-	reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
 }
 
 static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 9235f7d..c0b21d74 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -562,19 +562,23 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
 	}
 }
 
-static void hubbub2_program_watermarks(
+static bool hubbub2_program_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower)
 {
 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+	bool wm_pending = false;
 	/*
 	 * Need to clamp to max of the register values (i.e. no wrap)
 	 * for dcn1, all wm registers are 21-bit wide
 	 */
-	hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
-	hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+	if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
+
+	if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
 
 	/*
 	 * There's a special case when going from p-state support to p-state unsupported
@@ -592,6 +596,7 @@ static void hubbub2_program_watermarks(
 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
 
 	hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+	return wm_pending;
 }
 
 static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a444fed..2333182 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -307,7 +307,8 @@ void dcn20_init_blank(
 			COLOR_DEPTH_UNDEFINED,
 			&black_color,
 			otg_active_width,
-			otg_active_height);
+			otg_active_height,
+			0);
 
 	if (num_opps == 2) {
 		bottom_opp->funcs->opp_set_disp_pattern_generator(
@@ -317,7 +318,8 @@ void dcn20_init_blank(
 				COLOR_DEPTH_UNDEFINED,
 				&black_color,
 				otg_active_width,
-				otg_active_height);
+				otg_active_height,
+				0);
 	}
 
 	hws->funcs.wait_for_blank_complete(opp);
@@ -645,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing(
 		return DC_ERROR_UNEXPECTED;
 	}
 
+	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
 	pipe_ctx->stream_res.tg->funcs->program_timing(
 			pipe_ctx->stream_res.tg,
 			&stream->timing,
@@ -974,7 +979,8 @@ void dcn20_blank_pixel_data(
 			stream->timing.display_color_depth,
 			&black_color,
 			width,
-			height);
+			height,
+			0);
 
 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
 		odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
@@ -985,7 +991,8 @@ void dcn20_blank_pixel_data(
 				stream->timing.display_color_depth,
 				&black_color,
 				width,
-				height);
+				height,
+				0);
 	}
 
 	if (!blank)
@@ -1088,29 +1095,6 @@ void dcn20_enable_plane(
 //	}
 }
 
-
-void dcn20_pipe_control_lock_global(
-		struct dc *dc,
-		struct pipe_ctx *pipe,
-		bool lock)
-{
-	if (lock) {
-		pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
-				pipe->stream_res.tg);
-		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
-	} else {
-		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
-		pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
-				CRTC_STATE_VACTIVE);
-		pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
-				CRTC_STATE_VBLANK);
-		pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
-				CRTC_STATE_VACTIVE);
-		pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
-				pipe->stream_res.tg);
-	}
-}
-
 void dcn20_pipe_control_lock(
 	struct dc *dc,
 	struct pipe_ctx *pipe,
@@ -1121,7 +1105,7 @@ void dcn20_pipe_control_lock(
 	/* use TG master update lock to lock everything on the TG
 	 * therefore only top pipe need to lock
 	 */
-	if (pipe->top_pipe)
+	if (!pipe || pipe->top_pipe)
 		return;
 
 	if (pipe->plane_state != NULL)
@@ -1536,48 +1520,32 @@ static void dcn20_program_pipe(
 	}
 }
 
-static bool does_pipe_need_lock(struct pipe_ctx *pipe)
-{
-	if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
-			|| pipe->update_flags.raw)
-		return true;
-	if (pipe->bottom_pipe)
-		return does_pipe_need_lock(pipe->bottom_pipe);
-
-	return false;
-}
-
 void dcn20_program_front_end_for_ctx(
 		struct dc *dc,
 		struct dc_state *context)
 {
-	const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
 	int i;
 	struct dce_hwseq *hws = dc->hwseq;
-	bool pipe_locked[MAX_PIPES] = {false};
 	DC_LOGGER_INIT(dc->ctx->logger);
 
-	/* Carry over GSL groups in case the context is changing. */
-	for (i = 0; i < dc->res_pool->pipe_count; i++)
-		if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
-			context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
-				dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+			if (dc->hwss.program_triplebuffer != NULL &&
+				!dc->debug.disable_tri_buf) {
+				/*turn off triple buffer for full update*/
+				dc->hwss.program_triplebuffer(
+					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+			}
+		}
+	}
 
 	/* Set pipe update flags and lock pipes */
 	for (i = 0; i < dc->res_pool->pipe_count; i++)
 		dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
 				&context->res_ctx.pipe_ctx[i]);
-	for (i = 0; i < dc->res_pool->pipe_count; i++)
-		if (!context->res_ctx.pipe_ctx[i].top_pipe &&
-				does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
-			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-			if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
-				dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
-			if (!pipe_ctx->update_flags.bits.enable)
-				dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
-			pipe_locked[i] = true;
-		}
 
 	/* OTG blank before disabling all front ends */
 	for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1615,17 +1583,17 @@ void dcn20_program_front_end_for_ctx(
 				hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
 		}
 	}
+}
 
-	/* Unlock all locked pipes */
-	for (i = 0; i < dc->res_pool->pipe_count; i++)
-		if (pipe_locked[i]) {
-			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+void dcn20_post_unlock_program_front_end(
+		struct dc *dc,
+		struct dc_state *context)
+{
+	int i;
+	const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+	struct dce_hwseq *hwseq = dc->hwseq;
 
-			if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
-				dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
-			if (!pipe_ctx->update_flags.bits.enable)
-				dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
-		}
+	DC_LOGGER_INIT(dc->ctx->logger);
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++)
 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -1651,11 +1619,26 @@ void dcn20_program_front_end_for_ctx(
 	}
 
 	/* WA to apply WM setting*/
-	if (dc->hwseq->wa.DEGVIDCN21)
+	if (hwseq->wa.DEGVIDCN21)
 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-}
 
 
+	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
+	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+		if (dc->current_state->stream_status[0].plane_count == 1 &&
+				context->stream_status[0].plane_count > 1) {
+
+			struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+		}
+	}
+}
+
 void dcn20_prepare_bandwidth(
 		struct dc *dc,
 		struct dc_state *context)
@@ -1668,7 +1651,7 @@ void dcn20_prepare_bandwidth(
 			false);
 
 	/* program dchubbub watermarks */
-	hubbub->funcs->program_watermarks(hubbub,
+	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
 					&context->bw_ctx.bw.dcn.watermarks,
 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
 					false);
@@ -2052,6 +2035,10 @@ static void dcn20_reset_back_end_for_pipe(
 	 * parent pipe.
 	 */
 	if (pipe_ctx->top_pipe == NULL) {
+
+		if (pipe_ctx->stream_res.abm)
+			pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
 
 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 02c9be5..63ce763 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut(
 void dcn20_program_front_end_for_ctx(
 		struct dc *dc,
 		struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+		struct dc *dc,
+		struct dc_state *context);
 void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
 bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
@@ -58,10 +61,6 @@ void dcn20_pipe_control_lock(
 	struct dc *dc,
 	struct pipe_ctx *pipe,
 	bool lock);
-void dcn20_pipe_control_lock_global(
-		struct dc *dc,
-		struct pipe_ctx *pipe,
-		bool lock);
 void dcn20_prepare_bandwidth(
 		struct dc *dc,
 		struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 5e640f1..1e73357 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = NULL,
 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
 	.update_plane_addr = dcn20_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
 	.update_pending_status = dcn10_update_pending_status,
@@ -50,7 +51,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
 	.disable_audio_stream = dce110_disable_audio_stream,
 	.disable_plane = dcn20_disable_plane,
 	.pipe_control_lock = dcn20_pipe_control_lock,
-	.pipe_control_lock_global = dcn20_pipe_control_lock_global,
+	.interdependent_update_lock = dcn10_lock_all_pipes,
 	.prepare_bandwidth = dcn20_prepare_bandwidth,
 	.optimize_bandwidth = dcn20_optimize_bandwidth,
 	.update_bandwidth = dcn20_update_bandwidth,
@@ -96,6 +97,8 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
 	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
 	.enable_stream_timing = dcn20_enable_stream_timing,
 	.edp_backlight_control = dce110_edp_backlight_control,
+	.is_panel_backlight_on = dce110_is_panel_backlight_on,
+	.is_panel_powered_on = dce110_is_panel_powered_on,
 	.disable_stream_gating = dcn20_disable_stream_gating,
 	.enable_stream_gating = dcn20_enable_stream_gating,
 	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 3fccd5e..7bcee58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -36,26 +36,6 @@
 #define BASE(seg) \
 	BASE_INNER(seg)
 
-#define SR(reg_name)\
-		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
-					mm ## reg_name
-
-#define SRI(reg_name, block, id)\
-	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
-					mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
-	.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
-					mm ## reg_name
-
-#define SRII(reg_name, block, id)\
-	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
-					mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
-	.field_name = reg_name ## __ ## field_name ## post_fix
-
-
 #define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
 	SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
 	SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 023cc71..138321e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator(
 		enum dc_color_depth color_depth,
 		const struct tg_color *solid_color,
 		int width,
-		int height)
+		int height,
+		int offset)
 {
 	struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
 	enum test_pattern_color_format bit_depth;
@@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator(
 		DPG_ACTIVE_WIDTH, width,
 		DPG_ACTIVE_HEIGHT, height);
 
+	/* set DPG offset */
+	REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+		DPG_X_OFFSET, offset,
+		DPG_SEGMENT_WIDTH, 0);
+
 	switch (test_pattern) {
 	case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
 	case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 4093bec..64c5b42 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -36,6 +36,7 @@
 #define OPP_DPG_REG_LIST(id) \
 	SRI(DPG_CONTROL, DPG, id), \
 	SRI(DPG_DIMENSIONS, DPG, id), \
+	SRI(DPG_OFFSET_SEGMENT, DPG, id), \
 	SRI(DPG_COLOUR_B_CB, DPG, id), \
 	SRI(DPG_COLOUR_G_Y, DPG, id), \
 	SRI(DPG_COLOUR_R_CR, DPG, id), \
@@ -53,6 +54,7 @@
 	uint32_t FMT_422_CONTROL; \
 	uint32_t DPG_CONTROL; \
 	uint32_t DPG_DIMENSIONS; \
+	uint32_t DPG_OFFSET_SEGMENT; \
 	uint32_t DPG_COLOUR_B_CB; \
 	uint32_t DPG_COLOUR_G_Y; \
 	uint32_t DPG_COLOUR_R_CR; \
@@ -68,6 +70,8 @@
 	OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
 	OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
 	OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+	OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+	OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
 	OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
 	OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
 	OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
@@ -97,6 +101,8 @@
 	type DPG_HRES; \
 	type DPG_ACTIVE_WIDTH; \
 	type DPG_ACTIVE_HEIGHT; \
+	type DPG_X_OFFSET; \
+	type DPG_SEGMENT_WIDTH; \
 	type DPG_COLOUR0_R_CR; \
 	type DPG_COLOUR1_R_CR; \
 	type DPG_COLOUR0_B_CB; \
@@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator(
 	enum dc_color_depth color_depth,
 	const struct tg_color *solid_color,
 	int width,
-	int height);
+	int height,
+	int offset);
 
 bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e310d67..a673952 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
 	.xfc_supported = true,
 	.xfc_fill_bw_overhead_percent = 10.0,
 	.xfc_fill_constant_bytes = 0,
+	.number_of_cursors = 1,
 };
 
 struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
@@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
 	.xfc_supported = true,
 	.xfc_fill_bw_overhead_percent = 10.0,
 	.xfc_fill_constant_bytes = 0,
-	.ptoi_supported = 0
+	.ptoi_supported = 0,
+	.number_of_cursors = 1,
 };
 
 struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
@@ -1039,7 +1041,7 @@ static const struct resource_caps res_cap_nv14 = {
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
-		.disable_dmcu = true,
+		.disable_dmcu = false,
 		.force_abm_enable = false,
 		.timing_trace = false,
 		.clock_trace = true,
@@ -1058,7 +1060,7 @@ static const struct dc_debug_options debug_defaults_drv = {
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
-		.disable_dmcu = true,
+		.disable_dmcu = false,
 		.force_abm_enable = false,
 		.timing_trace = true,
 		.clock_trace = true,
@@ -1254,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_pixel_clock = 600000,
 		.hdmi_ycbcr420_supported = true,
 		.dp_ycbcr420_supported = true,
+		.fec_supported = true,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1668,7 +1671,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
 		}
 }
 
-static void release_dsc(struct resource_context *res_ctx,
+void dcn20_release_dsc(struct resource_context *res_ctx,
 			const struct resource_pool *pool,
 			struct display_stream_compressor **dsc)
 {
@@ -1728,7 +1731,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
 			pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
 
 			if (pipe_ctx->stream_res.dsc)
-				release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
+				dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
 		}
 	}
 
@@ -1972,22 +1975,6 @@ void dcn20_populate_dml_writeback_from_context(
 
 }
 
-static int get_num_odm_heads(struct pipe_ctx *pipe)
-{
-	int odm_head_count = 0;
-	struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-	while (next_pipe) {
-		odm_head_count++;
-		next_pipe = next_pipe->next_odm_pipe;
-	}
-	pipe = pipe->prev_odm_pipe;
-	while (pipe) {
-		odm_head_count++;
-		pipe = pipe->prev_odm_pipe;
-	}
-	return odm_head_count ? odm_head_count + 1 : 0;
-}
-
 int dcn20_populate_dml_pipes_from_context(
 		struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
 {
@@ -2067,8 +2054,8 @@ int dcn20_populate_dml_pipes_from_context(
 		pipes[pipe_cnt].dout.dp_lanes = 4;
 		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
 		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
-		switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
-		case 2:
+		switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+		case 1:
 			pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
 			break;
 		default:
@@ -2076,9 +2063,14 @@ int dcn20_populate_dml_pipes_from_context(
 		}
 		pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
 		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
-				== res_ctx->pipe_ctx[i].plane_state)
-			pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
-		else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
+				== res_ctx->pipe_ctx[i].plane_state) {
+			struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe;
+
+			while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+					== res_ctx->pipe_ctx[i].plane_state)
+				first_pipe = first_pipe->top_pipe;
+			pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
+		} else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
 			struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
 
 			while (first_pipe->prev_odm_pipe)
@@ -2163,16 +2155,20 @@ int dcn20_populate_dml_pipes_from_context(
 		/* todo: default max for now, until there is logic reflecting this in dc*/
 		pipes[pipe_cnt].dout.output_bpc = 12;
 		/*
-		 * Use max cursor settings for calculations to minimize
+		 * For graphic plane, cursor number is 1, nv12 is 0
 		 * bw calculations due to cursor on/off
 		 */
-		pipes[pipe_cnt].pipe.src.num_cursors = 2;
+		if (res_ctx->pipe_ctx[i].plane_state &&
+				res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+			pipes[pipe_cnt].pipe.src.num_cursors = 0;
+		else
+			pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
+
 		pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
 		pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
-		pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
-		pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
 
 		if (!res_ctx->pipe_ctx[i].plane_state) {
+			pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
 			pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
 			pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
 			pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
@@ -2198,19 +2194,21 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
 			pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
 			pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
-			pipes[pipe_cnt].pipe.src.is_hsplit = 0;
-			pipes[pipe_cnt].pipe.dest.odm_combine = 0;
 			pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
 			pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
+
+			if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) {
+				pipes[pipe_cnt].pipe.src.viewport_width /= 2;
+				pipes[pipe_cnt].pipe.dest.recout_width /= 2;
+			}
 		} else {
 			struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
 			struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
 
 			pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
-			pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
-					&& res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
-					|| (res_ctx->pipe_ctx[i].top_pipe
-					&& res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
+			pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
+					|| (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
+					|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
 			pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
 					|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
 			pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
@@ -2235,18 +2233,22 @@ int dcn20_populate_dml_pipes_from_context(
 			pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
 			pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
 			pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
-			pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
 			pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
-			if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
-				pipes[pipe_cnt].pipe.dest.full_recout_width +=
-						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
-				pipes[pipe_cnt].pipe.dest.full_recout_height +=
-						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
-			} else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
-				pipes[pipe_cnt].pipe.dest.full_recout_width +=
-						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
-				pipes[pipe_cnt].pipe.dest.full_recout_height +=
-						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
+			pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
+			if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+				pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+			else {
+				struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
+
+				while (split_pipe && split_pipe->plane_state == pln) {
+					pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+					split_pipe = split_pipe->bottom_pipe;
+				}
+				split_pipe = res_ctx->pipe_ctx[i].top_pipe;
+				while (split_pipe && split_pipe->plane_state == pln) {
+					pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+					split_pipe = split_pipe->top_pipe;
+				}
 			}
 
 			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
@@ -2413,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
 				+ stream->timing.v_border_bottom;
 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
 		dsc_cfg.color_depth = stream->timing.display_color_depth;
+		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
 		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
 
@@ -2499,7 +2502,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
 	return secondary_pipe;
 }
 
-void dcn20_merge_pipes_for_validate(
+static void dcn20_merge_pipes_for_validate(
 		struct dc *dc,
 		struct dc_state *context)
 {
@@ -2524,7 +2527,7 @@ void dcn20_merge_pipes_for_validate(
 			odm_pipe->prev_odm_pipe = NULL;
 			odm_pipe->next_odm_pipe = NULL;
 			if (odm_pipe->stream_res.dsc)
-				release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+				dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
 			/* Clear plane_res and stream_res */
 			memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
 			memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2562,41 +2565,29 @@ int dcn20_validate_apply_pipe_split_flags(
 		struct dc *dc,
 		struct dc_state *context,
 		int vlevel,
-		bool *split)
+		bool *split,
+		bool *merge)
 {
 	int i, pipe_idx, vlevel_split;
+	int plane_count = 0;
 	bool force_split = false;
-	bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
+	bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
 
-	/* Single display loop, exits if there is more than one display */
+	if (context->stream_count > 1) {
+		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+			avoid_split = true;
+	} else if (dc->debug.force_single_disp_pipe_split)
+			force_split = true;
+
+	/* TODO: fix dc bugs and remove this split threshold thing */
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-		bool exit_loop = false;
 
-		if (!pipe->stream || pipe->top_pipe)
-			continue;
-
-		if (dc->debug.force_single_disp_pipe_split) {
-			if (!force_split)
-				force_split = true;
-			else {
-				force_split = false;
-				exit_loop = true;
-			}
-		}
-		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
-			if (avoid_split)
-				avoid_split = false;
-			else {
-				avoid_split = true;
-				exit_loop = true;
-			}
-		}
-		if (exit_loop)
-			break;
+		if (pipe->stream && !pipe->prev_odm_pipe &&
+				(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+			++plane_count;
 	}
-	/* TODO: fix dc bugs and remove this split threshold thing */
-	if (context->stream_count > dc->res_pool->pipe_count / 2)
+	if (plane_count > dc->res_pool->pipe_count / 2)
 		avoid_split = true;
 
 	/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
@@ -2619,11 +2610,12 @@ int dcn20_validate_apply_pipe_split_flags(
 	/* Split loop sets which pipe should be split based on dml outputs and dc flags */
 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+		int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
 
 		if (!context->res_ctx.pipe_ctx[i].stream)
 			continue;
 
-		if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+		if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
 			split[i] = true;
 		if ((pipe->stream->view_format ==
 				VIEW_3D_FORMAT_SIDE_BY_SIDE ||
@@ -2636,10 +2628,44 @@ int dcn20_validate_apply_pipe_split_flags(
 			split[i] = true;
 		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
 			split[i] = true;
-			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
+			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
 		}
-		context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
-			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+		context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
+			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
+
+		if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
+			/*Already split odm pipe tree, don't try to split again*/
+			split[i] = false;
+			split[pipe->prev_odm_pipe->pipe_idx] = false;
+		} else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
+				&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+			/*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
+			split[i] = false;
+			split[pipe->top_pipe->pipe_idx] = false;
+		} else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
+			if (split[i] == false) {
+				/*Exiting mpc/odm combine*/
+				merge[i] = true;
+				if (pipe->prev_odm_pipe) {
+					ASSERT(0); /*should not actually happen yet*/
+					merge[pipe->prev_odm_pipe->pipe_idx] = true;
+				} else
+					merge[pipe->top_pipe->pipe_idx] = true;
+			} else {
+				/*Transition from mpc combine to odm combine or vice versa*/
+				ASSERT(0); /*should not actually happen yet*/
+				split[i] = true;
+				merge[i] = true;
+				if (pipe->prev_odm_pipe) {
+					split[pipe->prev_odm_pipe->pipe_idx] = true;
+					merge[pipe->prev_odm_pipe->pipe_idx] = true;
+				} else {
+					split[pipe->top_pipe->pipe_idx] = true;
+					merge[pipe->top_pipe->pipe_idx] = true;
+				}
+			}
+		}
+
 		/* Adjust dppclk when split is forced, do not bother with dispclk */
 		if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
 			context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
@@ -2681,7 +2707,7 @@ bool dcn20_fast_validate_bw(
 	if (vlevel > context->bw_ctx.dml.soc.num_states)
 		goto validate_fail;
 
-	vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+	vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
 
 	/*initialize pipe_just_split_from to invalid idx*/
 	for (i = 0; i < MAX_PIPES; i++)
@@ -2901,6 +2927,9 @@ void dcn20_calculate_dlg_params(
 							!= dm_dram_clock_change_unsupported;
 	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 
+	if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+		context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
 	/*
 	 * An artifact of dml pipe split/odm is that pipes get merged back together for
 	 * calculation. Therefore we need to only extract for first pipe in ascending index order
@@ -3138,7 +3167,7 @@ static struct dc_cap_funcs cap_funcs = {
 };
 
 
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
 {
 	enum dc_status result = DC_OK;
 
@@ -3164,7 +3193,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
 	.add_stream_to_ctx = dcn20_add_stream_to_ctx,
 	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
 	.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
-	.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+	.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
 	.set_mcif_arb_params = dcn20_set_mcif_arb_params,
 	.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
 	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
@@ -3886,6 +3915,15 @@ static bool dcn20_resource_construct(
 
 	dcn20_hw_sequencer_construct(dc);
 
+	// IF NV12, set PG function pointer to NULL. It's not that
+	// PG isn't supported for NV12, it's that we don't want to
+	// program the registers because that will cause more power
+	// to be consumed. We could have created dcn20_init_hw to get
+	// the same effect by checking ASIC rev, but there was a
+	// request at some point to not check ASIC rev on hw sequencer.
+	if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+		dc->hwseq->funcs.enable_power_gating_plane = NULL;
+
 	dc->caps.max_planes =  pool->base.pipe_count;
 
 	for (i = 0; i < dc->caps.max_planes; ++i)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index f589384..9d5bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params(
 		display_e2e_pipe_params_st *pipes,
 		int pipe_cnt);
 bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
-void dcn20_merge_pipes_for_validate(
-		struct dc *dc,
-		struct dc_state *context);
 int dcn20_validate_apply_pipe_split_flags(
 		struct dc *dc,
 		struct dc_state *context,
 		int vlevel,
-		bool *split);
+		bool *split,
+		bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+			const struct resource_pool *pool,
+			struct display_stream_compressor **dsc);
 bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
 void dcn20_split_stream_for_mpc(
 		struct resource_context *res_ctx,
@@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
 enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
 enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
 
 void dcn20_patch_bounding_box(
 		struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 9b70a1e..99a7ef6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -616,5 +616,6 @@ void dcn20_stream_encoder_construct(
 	enc1->regs = regs;
 	enc1->se_shift = se_shift;
 	enc1->se_mask = se_mask;
+	enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
 }
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index 02fafb0..f1ef46e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -34,13 +34,6 @@
 #define BASE(seg) \
 	BASE_INNER(seg)
 
-#define SRI(reg_name, block, id)\
-	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
-					mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
-	.field_name = reg_name ## __ ## field_name ## post_fix
-
 #define DCN20_VMID_REG_LIST(id)\
 	SRI(CNTL, DCN_VM_CONTEXT, id),\
 	SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index f546260..5e2d14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -141,7 +141,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
 	return NUM_VMID;
 }
 
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -149,6 +149,7 @@ void hubbub21_program_urgent_watermarks(
 {
 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
 	uint32_t prog_wm_value;
+	bool wm_pending = false;
 
 	/* Repeat for water mark set A, B, C and D. */
 	/* clock state A */
@@ -163,7 +164,8 @@ void hubbub21_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+		wm_pending = true;
 
 	/* determine the transfer time for a quantity of data for a particular requestor.*/
 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -172,7 +174,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
-	}
+	} else if (watermarks->a.frac_urg_bw_flip
+			< hubbub1->watermarks.a.frac_urg_bw_flip)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -180,14 +184,18 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
-	}
+	} else if (watermarks->a.frac_urg_bw_nom
+			< hubbub1->watermarks.a.frac_urg_bw_nom)
+		wm_pending = true;
+
 	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
 		hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
 				refclk_mhz, 0x1fffff);
 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
-	}
+	} else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
+		wm_pending = true;
 
 	/* clock state B */
 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -201,7 +209,8 @@ void hubbub21_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+		wm_pending = true;
 
 	/* determine the transfer time for a quantity of data for a particular requestor.*/
 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -210,7 +219,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
-	}
+	} else if (watermarks->a.frac_urg_bw_flip
+			< hubbub1->watermarks.a.frac_urg_bw_flip)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -218,7 +229,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
-	}
+	} else if (watermarks->a.frac_urg_bw_nom
+			< hubbub1->watermarks.a.frac_urg_bw_nom)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
 		hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
@@ -226,7 +239,8 @@ void hubbub21_program_urgent_watermarks(
 				refclk_mhz, 0x1fffff);
 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
-	}
+	} else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
+		wm_pending = true;
 
 	/* clock state C */
 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -240,7 +254,8 @@ void hubbub21_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+		wm_pending = true;
 
 	/* determine the transfer time for a quantity of data for a particular requestor.*/
 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -249,7 +264,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
-	}
+	} else if (watermarks->a.frac_urg_bw_flip
+			< hubbub1->watermarks.a.frac_urg_bw_flip)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -257,7 +274,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
-	}
+	} else if (watermarks->a.frac_urg_bw_nom
+			< hubbub1->watermarks.a.frac_urg_bw_nom)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
 		hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
@@ -265,7 +284,8 @@ void hubbub21_program_urgent_watermarks(
 				refclk_mhz, 0x1fffff);
 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
-	}
+	} else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
+		wm_pending = true;
 
 	/* clock state D */
 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -279,7 +299,8 @@ void hubbub21_program_urgent_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.urgent_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+		wm_pending = true;
 
 	/* determine the transfer time for a quantity of data for a particular requestor.*/
 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -288,7 +309,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
-	}
+	} else if (watermarks->a.frac_urg_bw_flip
+			< hubbub1->watermarks.a.frac_urg_bw_flip)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
 			> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -296,7 +319,9 @@ void hubbub21_program_urgent_watermarks(
 
 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
-	}
+	} else if (watermarks->a.frac_urg_bw_nom
+			< hubbub1->watermarks.a.frac_urg_bw_nom)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
 		hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
@@ -304,10 +329,13 @@ void hubbub21_program_urgent_watermarks(
 				refclk_mhz, 0x1fffff);
 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
-	}
+	} else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
+		wm_pending = true;
+
+	return wm_pending;
 }
 
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -315,6 +343,7 @@ void hubbub21_program_stutter_watermarks(
 {
 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
 	uint32_t prog_wm_value;
+	bool wm_pending = false;
 
 	/* clock state A */
 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -330,7 +359,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -345,7 +376,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
 	/* clock state B */
 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -361,7 +394,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -376,7 +411,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
 	/* clock state C */
 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -392,7 +429,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -407,7 +446,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
 
 	/* clock state D */
 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -423,7 +464,9 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+			< hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+		wm_pending = true;
 
 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
 			> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -438,10 +481,14 @@ void hubbub21_program_stutter_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n",
 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
+			< hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+		wm_pending = true;
+
+	return wm_pending;
 }
 
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
@@ -450,6 +497,8 @@ void hubbub21_program_pstate_watermarks(
 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
 	uint32_t prog_wm_value;
 
+	bool wm_pending = false;
+
 	/* clock state A */
 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
 			> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
@@ -464,7 +513,9 @@ void hubbub21_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->a.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
 
 	/* clock state B */
 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -480,7 +531,9 @@ void hubbub21_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->b.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+		wm_pending = false;
 
 	/* clock state C */
 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -496,7 +549,9 @@ void hubbub21_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->c.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
 
 	/* clock state D */
 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -512,20 +567,30 @@ void hubbub21_program_pstate_watermarks(
 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
 			"HW register value = 0x%x\n\n",
 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
-	}
+	} else if (watermarks->d.cstate_pstate.pstate_change_ns
+			< hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+		wm_pending = true;
+
+	return wm_pending;
 }
 
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower)
 {
 	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+	bool wm_pending = false;
 
-	hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
-	hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
-	hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+	if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
+
+	if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
+
+	if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+		wm_pending = true;
 
 	/*
 	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
@@ -549,6 +614,8 @@ void hubbub21_program_watermarks(
 			DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
 
 	hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+	return wm_pending;
 }
 
 void hubbub21_wm_read_state(struct hubbub *hubbub,
@@ -635,6 +702,7 @@ static const struct hubbub_funcs hubbub21_funcs = {
 	.wm_read_state = hubbub21_wm_read_state,
 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
 	.program_watermarks = hubbub21_program_watermarks,
+	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
 	.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index c4840df..ef3ef28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -113,22 +113,22 @@
 void dcn21_dchvm_init(struct hubbub *hubbub);
 int hubbub21_init_dchub(struct hubbub *hubbub,
 		struct dcn_hubbub_phys_addr_config *pa_config);
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower);
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower);
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
 		bool safe_to_lower);
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
 		struct hubbub *hubbub,
 		struct dcn_watermark_set *watermarks,
 		unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index cf09b93..d285ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -79,32 +79,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline(
 		struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
 {
 	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
-	uint32_t cur_value;
+	uint32_t refcyc_per_vm_group_vblank;
+	uint32_t refcyc_per_vm_req_vblank;
+	uint32_t refcyc_per_vm_group_flip;
+	uint32_t refcyc_per_vm_req_flip;
+	const uint32_t uninitialized_hw_default = 0;
 
-	REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value);
-	if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+	REG_GET(VBLANK_PARAMETERS_5,
+			REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank);
+
+	if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
+			refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank)
 		REG_SET(VBLANK_PARAMETERS_5, 0,
 				REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
 
 	REG_GET(VBLANK_PARAMETERS_6,
-			REFCYC_PER_VM_REQ_VBLANK,
-			&cur_value);
-	if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+			REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank);
+
+	if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
+			refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank)
 		REG_SET(VBLANK_PARAMETERS_6, 0,
 				REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
 
-	REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value);
-	if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+	REG_GET(FLIP_PARAMETERS_3,
+			REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip);
+
+	if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
+			refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip)
 		REG_SET(FLIP_PARAMETERS_3, 0,
 				REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
 
-	REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value);
-	if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+	REG_GET(FLIP_PARAMETERS_4,
+			REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip);
+
+	if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
+			refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip)
 		REG_SET(FLIP_PARAMETERS_4, 0,
 					REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
 
 	REG_SET(FLIP_PARAMETERS_5, 0,
 			REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
+
 	REG_SET(FLIP_PARAMETERS_6, 0,
 			REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
 }
@@ -325,13 +340,9 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
 {
 	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
 
-	PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
 	PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
 	PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
 
-	// The format of default addr is 48:12 of the 48 bit addr
-	mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
 	// The format of high/low are 48:18 of the 48 bit addr
 	mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
 	mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 081ad8e..ada65b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state(
 			true);
 }
 
+/* If user hotplug a HDMI monitor while in monitor off,
+ * OS will do a mode set (with output timing) but keep output off.
+ * In this case DAL will ask vbios to power up the pll in the PHY.
+ * If user unplug the monitor (while we are on monitor off) or
+ * system attempt to enter modern standby (which we will disable PLL),
+ * PHY will hang on the next mode set attempt.
+ * if enable PLL follow by disable PLL (without executing lane enable/disable),
+ * RDPCS_PHY_DP_MPLLB_STATE remains 1,
+ * which indicate that PLL disable attempt actually didnÂ’t go through.
+ * As a workaround, insert PHY lane enable/disable before PLL disable.
+ */
+void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+	if (!pipe_ctx->stream->dpms_off)
+		return;
+
+	pipe_ctx->stream->dpms_off = false;
+	core_link_enable_stream(context, pipe_ctx);
+	core_link_disable_stream(pipe_ctx);
+	pipe_ctx->stream->dpms_off = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 1827360..26bf24d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state(
 		const struct dc *dc,
 		struct dc_state *context);
 
+void dcn21_PLAT_58856_wa(struct dc_state *context,
+		struct pipe_ctx *pipe_ctx);
+
 #endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index fddbd59..b9ff976 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = NULL,
 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
 	.update_plane_addr = dcn20_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
 	.update_pending_status = dcn10_update_pending_status,
@@ -51,7 +52,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
 	.disable_audio_stream = dce110_disable_audio_stream,
 	.disable_plane = dcn20_disable_plane,
 	.pipe_control_lock = dcn20_pipe_control_lock,
-	.pipe_control_lock_global = dcn20_pipe_control_lock_global,
+	.interdependent_update_lock = dcn10_lock_all_pipes,
 	.prepare_bandwidth = dcn20_prepare_bandwidth,
 	.optimize_bandwidth = dcn20_optimize_bandwidth,
 	.update_bandwidth = dcn20_update_bandwidth,
@@ -104,6 +105,8 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
 	.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
 	.enable_stream_timing = dcn20_enable_stream_timing,
 	.edp_backlight_control = dce110_edp_backlight_control,
+	.is_panel_backlight_on = dce110_is_panel_backlight_on,
+	.is_panel_powered_on = dce110_is_panel_powered_on,
 	.disable_stream_gating = dcn20_disable_stream_gating,
 	.enable_stream_gating = dcn20_enable_stream_gating,
 	.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -127,6 +130,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
 	.dccg_init = dcn20_dccg_init,
 	.set_blend_lut = dcn20_set_blend_lut,
 	.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+	.PLAT_58856_wa = dcn21_PLAT_58856_wa,
 };
 
 void dcn21_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 33d0a17..51b5910 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -84,7 +84,7 @@
 #include "dcn21_resource.h"
 #include "vm_helper.h"
 #include "dcn20/dcn20_vmid.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
 
 #define SOC_BOUNDING_BOX_VALID false
 #define DC_LOGGER_INIT(logger)
@@ -156,17 +156,18 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
 	.xfc_supported = false,
 	.xfc_fill_bw_overhead_percent = 10.0,
 	.xfc_fill_constant_bytes = 0,
-	.ptoi_supported = 0
+	.ptoi_supported = 0,
+	.number_of_cursors = 1,
 };
 
 struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
 	.clock_limits = {
 			{
 				.state = 0,
-				.dcfclk_mhz = 304.0,
-				.fabricclk_mhz = 600.0,
-				.dispclk_mhz = 618.0,
-				.dppclk_mhz = 440.0,
+				.dcfclk_mhz = 400.0,
+				.fabricclk_mhz = 400.0,
+				.dispclk_mhz = 600.0,
+				.dppclk_mhz = 400.00,
 				.phyclk_mhz = 600.0,
 				.socclk_mhz = 278.0,
 				.dscclk_mhz = 205.67,
@@ -174,10 +175,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
 			},
 			{
 				.state = 1,
-				.dcfclk_mhz = 304.0,
-				.fabricclk_mhz = 600.0,
-				.dispclk_mhz = 618.0,
-				.dppclk_mhz = 618.0,
+				.dcfclk_mhz = 464.52,
+				.fabricclk_mhz = 800.0,
+				.dispclk_mhz = 654.55,
+				.dppclk_mhz = 626.09,
 				.phyclk_mhz = 600.0,
 				.socclk_mhz = 278.0,
 				.dscclk_mhz = 205.67,
@@ -185,32 +186,65 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
 			},
 			{
 				.state = 2,
-				.dcfclk_mhz = 608.0,
-				.fabricclk_mhz = 1066.0,
-				.dispclk_mhz = 888.0,
-				.dppclk_mhz = 888.0,
-				.phyclk_mhz = 810.0,
+				.dcfclk_mhz = 514.29,
+				.fabricclk_mhz = 933.0,
+				.dispclk_mhz = 757.89,
+				.dppclk_mhz = 685.71,
+				.phyclk_mhz = 600.0,
 				.socclk_mhz = 278.0,
 				.dscclk_mhz = 287.67,
-				.dram_speed_mts = 2133.0,
+				.dram_speed_mts = 1866.0,
 			},
 			{
 				.state = 3,
-				.dcfclk_mhz = 676.0,
-				.fabricclk_mhz = 1600.0,
-				.dispclk_mhz = 1015.0,
-				.dppclk_mhz = 1015.0,
-				.phyclk_mhz = 810.0,
+				.dcfclk_mhz = 576.00,
+				.fabricclk_mhz = 1067.0,
+				.dispclk_mhz = 847.06,
+				.dppclk_mhz = 757.89,
+				.phyclk_mhz = 600.0,
 				.socclk_mhz = 715.0,
 				.dscclk_mhz = 318.334,
-				.dram_speed_mts = 4266.0,
+				.dram_speed_mts = 2134.0,
 			},
 			{
 				.state = 4,
-				.dcfclk_mhz = 810.0,
+				.dcfclk_mhz = 626.09,
+				.fabricclk_mhz = 1200.0,
+				.dispclk_mhz = 900.00,
+				.dppclk_mhz = 847.06,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 953.0,
+				.dscclk_mhz = 489.0,
+				.dram_speed_mts = 2400.0,
+			},
+			{
+				.state = 5,
+				.dcfclk_mhz = 685.71,
+				.fabricclk_mhz = 1333.0,
+				.dispclk_mhz = 1028.57,
+				.dppclk_mhz = 960.00,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 278.0,
+				.dscclk_mhz = 287.67,
+				.dram_speed_mts = 2666.0,
+			},
+			{
+				.state = 6,
+				.dcfclk_mhz = 757.89,
+				.fabricclk_mhz = 1467.0,
+				.dispclk_mhz = 1107.69,
+				.dppclk_mhz = 1028.57,
+				.phyclk_mhz = 810.0,
+				.socclk_mhz = 715.0,
+				.dscclk_mhz = 318.334,
+				.dram_speed_mts = 3200.0,
+			},
+			{
+				.state = 7,
+				.dcfclk_mhz = 847.06,
 				.fabricclk_mhz = 1600.0,
 				.dispclk_mhz = 1395.0,
-				.dppclk_mhz = 1285.0,
+				.dppclk_mhz = 1285.00,
 				.phyclk_mhz = 1325.0,
 				.socclk_mhz = 953.0,
 				.dscclk_mhz = 489.0,
@@ -218,8 +252,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
 			},
 			/*Extra state, no dispclk ramping*/
 			{
-				.state = 5,
-				.dcfclk_mhz = 810.0,
+				.state = 8,
+				.dcfclk_mhz = 847.06,
 				.fabricclk_mhz = 1600.0,
 				.dispclk_mhz = 1395.0,
 				.dppclk_mhz = 1285.0,
@@ -266,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
 	.xfc_bus_transport_time_us = 4,
 	.xfc_xbuf_latency_tolerance_us = 4,
 	.use_urgent_burst_bw = 1,
-	.num_states = 5
+	.num_states = 9
 };
 
 #ifndef MAX
@@ -821,11 +855,12 @@ static const struct dc_plane_cap plane_cap = {
 };
 
 static const struct dc_debug_options debug_defaults_drv = {
-		.disable_dmcu = true,
+		.disable_dmcu = false,
 		.force_abm_enable = false,
 		.timing_trace = false,
 		.clock_trace = true,
 		.disable_pplib_clock_request = true,
+		.min_disp_clk_khz = 100000,
 		.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
 		.force_single_disp_pipe_split = false,
 		.disable_dcc = DCC_ENABLE,
@@ -841,7 +876,7 @@ static const struct dc_debug_options debug_defaults_drv = {
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
-		.disable_dmcu = true,
+		.disable_dmcu = false,
 		.force_abm_enable = false,
 		.timing_trace = true,
 		.clock_trace = true,
@@ -962,6 +997,9 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
 	if (pool->base.dmcu != NULL)
 		dce_dmcu_destroy(&pool->base.dmcu);
 
+	if (pool->base.psr != NULL)
+		dmub_psr_destroy(&pool->base.psr);
+
 	if (pool->base.dccg != NULL)
 		dcn_dccg_destroy(&pool->base.dccg);
 
@@ -1335,26 +1373,78 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 {
 	struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
 	struct clk_limit_table *clk_table = &bw_params->clk_table;
-	int i;
+	unsigned int i, j, k;
+	int closest_clk_lvl;
 
-	dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
-	dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
-	dcn2_1_soc.num_chans = bw_params->num_channels;
+	// diags does not retrieve proper values from SMU
+	// cap states to 5 and make state 5 the max state
+	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) {
+		dcn2_1_soc.num_states = 5;
 
-	for (i = 0; i < clk_table->num_entries; i++) {
+		dcn2_1_soc.clock_limits[5].state = 5;
+		dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0;
+		dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0;
+		dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0;
+		dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0;
+		dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0;
+		dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0;
+		dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0;
+		dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0;
+	} else {
+		dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+		dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
+		dcn2_1_soc.num_chans = bw_params->num_channels;
 
-		dcn2_1_soc.clock_limits[i].state = i;
-		dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
-		dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
-		dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
-		dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+		/* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
+		dcn2_1_soc.clock_limits[0].state = 0;
+		dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
+		dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
+		dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
+		dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
+
+		/*
+		 * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk
+		 * as indicater
+		 */
+
+		closest_clk_lvl = -1;
+		/* index currently being filled */
+		k = 1;
+		for (i = 1; i < clk_table->num_entries; i++) {
+			/* loop backwards, skip duplicate state, +1 because SMU has precision issue */
+			for (j = dcn2_1_soc.num_states - 2; j >= k; j--) {
+				if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+					closest_clk_lvl = j;
+					break;
+				}
+			}
+
+			/* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
+			if (closest_clk_lvl != -1) {
+				dcn2_1_soc.clock_limits[k].state = i;
+				dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+				dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+				dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+				dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+				dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+				dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+				dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+				dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+				dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+				dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+				dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+				k++;
+			}
+		}
+
+		/* duplicate last level */
+		dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1];
+		dcn2_1_soc.clock_limits[k].state = k;
+		dcn2_1_soc.num_states = k + 1;
 	}
-	dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
-	dcn2_1_soc.num_states = i;
 
-	// diags does not retrieve proper values from SMU, do not update DML instance for diags
-	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
-		dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+	dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
 }
 
 /* Temporary Place holder until we can get them from fuse */
@@ -1476,6 +1566,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
 		hws->shifts = &hwseq_shift;
 		hws->masks = &hwseq_mask;
 		hws->wa.DEGVIDCN21 = true;
+		hws->wa.disallow_self_refresh_during_multi_plane_transition = true;
 	}
 	return hws;
 }
@@ -1499,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = {
 		.max_hdmi_pixel_clock = 600000,
 		.hdmi_ycbcr420_supported = true,
 		.dp_ycbcr420_supported = true,
+		.fec_supported = true,
 		.flags.bits.IS_HBR2_CAPABLE = true,
 		.flags.bits.IS_HBR3_CAPABLE = true,
 		.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1639,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context(
 	return pipe_cnt;
 }
 
+enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+	enum dc_status result = DC_OK;
+
+	if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
+		plane_state->dcc.enable = 1;
+		/* align to our worst case block width */
+		plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
+	}
+	result = dcn20_patch_unknown_plane_state(plane_state);
+	return result;
+}
+
 static struct resource_funcs dcn21_res_pool_funcs = {
 	.destroy = dcn21_destroy_resource_pool,
 	.link_enc_create = dcn21_link_encoder_create,
@@ -1648,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
 	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
 	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
 	.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
-	.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+	.patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
 	.set_mcif_arb_params = dcn20_set_mcif_arb_params,
 	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
 	.update_bw_bounding_box = update_bw_bounding_box
@@ -1695,6 +1800,7 @@ static bool dcn21_resource_construct(
 	dc->caps.force_dp_tps4_for_cp2520 = true;
 	dc->caps.extended_aux_timeout_support = true;
 	dc->caps.dmcub_support = true;
+	dc->caps.is_apu = true;
 
 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
 		dc->debug = debug_defaults_drv;
@@ -1758,9 +1864,15 @@ static bool dcn21_resource_construct(
 		goto create_fail;
 	}
 
-	// Leave as NULL to not affect current dmcu psr programming sequence
-	// Will be uncommented when functionality is confirmed to be working
-	pool->base.psr = NULL;
+	if (dc->debug.disable_dmcu) {
+		pool->base.psr = dmub_psr_create(ctx);
+
+		if (pool->base.psr == NULL) {
+			dm_error("DC: failed to create psr obj!\n");
+			BREAK_TO_DEBUGGER();
+			goto create_fail;
+		}
+	}
 
 	pool->base.abm = dce_abm_create(ctx,
 			&abm_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 626d22d..968c46d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,6 +32,7 @@ struct cp_psp_stream_config {
 	uint8_t otg_inst;
 	uint8_t link_enc_inst;
 	uint8_t stream_enc_inst;
+	uint8_t mst_supported;
 	void *dm_stream_ctx;
 	bool dpms_off;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 485a9c6..5bbbafa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2614,6 +2614,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
 
 	if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
 			mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+
+		for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+			if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+				if (mode_lib->vba.DRAMClockChangeWatermark >
+				dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+					mode_lib->vba.MinTTUVBlank[k] += 25;
+			}
+		}
 		mode_lib->vba.DRAMClockChangeWatermark += 25;
 		mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
 	} else if (mode_lib->vba.DummyPStateCheck &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 658f81e..dfd3be4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,7 +25,7 @@
 #ifndef __DISPLAY_MODE_STRUCTS_H__
 #define __DISPLAY_MODE_STRUCTS_H__
 
-#define MAX_CLOCK_LIMIT_STATES 8
+#define MAX_CLOCK_LIMIT_STATES 9
 
 typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
 typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
@@ -61,12 +61,15 @@ struct _vcs_dpi_voltage_scaling_st {
 	double dram_speed_mts;
 	double fabricclk_mhz;
 	double dispclk_mhz;
+	double dram_bw_per_chan_gbps;
 	double phyclk_mhz;
 	double dppclk_mhz;
 	double dtbclk_mhz;
 };
 
 struct _vcs_dpi_soc_bounding_box_st {
+	struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+	unsigned int num_states;
 	double sr_exit_time_us;
 	double sr_enter_plus_exit_time_us;
 	double urgent_latency_us;
@@ -109,8 +112,7 @@ struct _vcs_dpi_soc_bounding_box_st {
 	double xfc_bus_transport_time_us;
 	double xfc_xbuf_latency_tolerance_us;
 	int use_urgent_burst_bw;
-	unsigned int num_states;
-	struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+	double min_dcfclk;
 	bool do_urgent_latency_adjustment;
 	double urgent_latency_adjustment_fabric_clock_component_us;
 	double urgent_latency_adjustment_fabric_clock_reference_mhz;
@@ -189,7 +191,7 @@ struct _vcs_dpi_ip_params_st {
 	unsigned int min_vblank_lines;
 	unsigned int dppclk_delay_subtotal;
 	unsigned int dispclk_delay_subtotal;
-	unsigned int dcfclk_cstate_latency;
+	double dcfclk_cstate_latency;
 	unsigned int dppclk_delay_scl;
 	unsigned int dppclk_delay_scl_lb_only;
 	unsigned int dppclk_delay_cnvc_formatter;
@@ -202,6 +204,7 @@ struct _vcs_dpi_ip_params_st {
 	unsigned int LineBufferFixedBpp;
 	unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
 	unsigned int bug_forcing_LC_req_same_size_fixed;
+	unsigned int number_of_cursors;
 };
 
 struct _vcs_dpi_display_xfc_params_st {
@@ -325,7 +328,6 @@ struct _vcs_dpi_display_pipe_dest_params_st {
 	unsigned int vupdate_width;
 	unsigned int vready_offset;
 	unsigned char interlaced;
-	unsigned char embedded;
 	double pixel_rate_mhz;
 	unsigned char synchronized_vblank_all_planes;
 	unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b3c96d9..6b525c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -266,8 +266,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
 		mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
 		mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
 	}
-	mode_lib->vba.MinVoltageLevel = 0;
-	mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
 
 	mode_lib->vba.DoUrgentLatencyAdjustment =
 		soc->do_urgent_latency_adjustment;
@@ -379,7 +377,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 
 		mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
 
-		mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
 		mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
 		mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
 				(enum scan_direction_class) (src->source_scan);
@@ -396,11 +393,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
 		mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
 				src->viewport_y_c;
 		mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
-		mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height;
-		mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width;
+		mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y;
+		mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y;
 		mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
-		mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c;
-		mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c;
+		mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c;
+		mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c;
 		mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
 		mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
 		mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 2875efd..5d82fc5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -389,7 +389,6 @@ struct vba_vars_st {
 
 	/* vba mode support */
 	/*inputs*/
-	bool EmbeddedPanel[DC__NUM_DPP__MAX];
 	bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
 	double MaxHSCLRatio;
 	double MaxVSCLRatio;
@@ -842,8 +841,6 @@ struct vba_vars_st {
 	double DCCRateChroma[DC__NUM_DPP__MAX];
 
 	double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1];
-	int MinVoltageLevel;
-	int MaxVoltageLevel;
 
 	bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream;
 	bool NumberOfHDMIFRLSupport;
@@ -880,7 +877,6 @@ struct vba_vars_st {
 	double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
 	double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
 	double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
-	bool UseMinimumRequiredDCFCLK;
 	double WritebackDelayTime[DC__NUM_DPP__MAX];
 	unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
 	unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index d2d36d4..f252af1 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -47,9 +47,9 @@
 #include "dce120/hw_factory_dce120.h"
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/hw_factory_dcn10.h"
-#endif
 #include "dcn20/hw_factory_dcn20.h"
 #include "dcn21/hw_factory_dcn21.h"
+#endif
 
 #include "diagnostics/hw_factory_diag.h"
 
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 5d39665..04e2c0f 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -45,9 +45,9 @@
 #include "dce120/hw_translate_dce120.h"
 #if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dcn10/hw_translate_dcn10.h"
-#endif
 #include "dcn20/hw_translate_dcn20.h"
 #include "dcn21/hw_translate_dcn21.h"
+#endif
 
 #include "diagnostics/hw_translate_diag.h"
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f285b76..d523fc9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -124,7 +124,7 @@ struct resource_funcs {
 				struct dc *dc,
 				struct dc_state *new_ctx,
 				struct dc_stream_state *stream);
-	enum dc_status (*get_default_swizzle_mode)(
+	enum dc_status (*patch_unknown_plane_state)(
 			struct dc_plane_state *plane_state);
 
 	struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 8b1f0ce..e94e5fb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -78,6 +78,8 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
 
 bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
 
+void dpcd_set_source_specific_data(struct dc_link *link);
+
 void dp_set_fec_ready(struct dc_link *link, bool ready);
 void dp_set_fec_enable(struct dc_link *link, bool enable);
 bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index ac530c0..ce65678 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -27,6 +27,7 @@
 #define __DAL_CLK_MGR_H__
 
 #include "dc.h"
+#include "dm_pp_smu.h"
 
 #define DCN_MINIMUM_DISPCLK_Khz 100000
 #define DCN_MINIMUM_DPPCLK_Khz 100000
@@ -193,6 +194,7 @@ struct clk_mgr {
 	int dentist_vco_freq_khz;
 	struct clk_state_registers_and_bypass boot_snapshot;
 	struct clk_bw_params *bw_params;
+	struct pp_smu_wm_range_sets ranges;
 };
 
 /* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 862952c..9311d0d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -296,6 +296,10 @@ int clk_mgr_helper_get_active_display_cnt(
 		struct dc *dc,
 		struct dc_state *context);
 
+int clk_mgr_helper_get_active_plane_cnt(
+		struct dc *dc,
+		struct dc_state *context);
+
 
 
 #endif //__DAL_CLK_MGR_INTERNAL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 05ee529..336c80a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -27,11 +27,12 @@
 #define __DAL_DCCG_H__
 
 #include "dc_types.h"
+#include "hw_shared.h"
 
 struct dccg {
 	struct dc_context *ctx;
 	const struct dccg_funcs *funcs;
-
+	int pipe_dppclk_khz[MAX_PIPES];
 	int ref_dppclk;
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c0dc1d0..f5dd0cc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -134,7 +134,7 @@ struct hubbub_funcs {
 			unsigned int dccg_ref_freq_inKhz,
 			unsigned int *dchub_ref_freq_inKhz);
 
-	void (*program_watermarks)(
+	bool (*program_watermarks)(
 			struct hubbub *hubbub,
 			struct dcn_watermark_set *watermarks,
 			unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c597400..7c2a332 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -39,6 +39,7 @@ struct dsc_config {
 	uint32_t pic_height;
 	enum dc_pixel_encoding pixel_encoding;
 	enum dc_color_depth color_depth;  /* Bits per component */
+	bool is_odm;
 	struct dc_dsc_config dc_dsc_cfg;
 };
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 459f95f..f30ab491 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -25,16 +25,15 @@
 #ifndef __DC_DWBC_H__
 #define __DC_DWBC_H__
 
+#include "dal_types.h"
 #include "dc_hw_types.h"
 
-
 #define DWB_SW_V2	1
 #define DWB_MCIF_BUF_COUNT 4
 
 /* forward declaration of mcif_wb struct */
 struct mcif_wb;
 
-enum dce_version;
 
 enum dwb_sw_version {
 	dwb_ver_1_0 = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index fb748f0..c2b392a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -68,6 +68,7 @@ struct encoder_feature_support {
 	unsigned int max_hdmi_pixel_clock;
 	bool hdmi_ycbcr420_supported;
 	bool dp_ycbcr420_supported;
+	bool fec_supported;
 };
 
 union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7575564..2717352 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -310,7 +310,8 @@ struct opp_funcs {
 			enum dc_color_depth color_depth,
 			const struct tg_color *solid_color,
 			int width,
-			int height);
+			int height,
+			int offset);
 
 	bool (*dpg_is_blanked)(
 			struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 351b387..ac6523c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -103,6 +103,7 @@ struct stream_encoder {
 	struct dc_context *ctx;
 	struct dc_bios *bp;
 	enum engine_id id;
+	uint32_t stream_enc_inst;
 };
 
 struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 209118f..d4c1fb2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -66,6 +66,8 @@ struct hw_sequencer_funcs {
 			int num_planes, struct dc_state *context);
 	void (*program_front_end_for_ctx)(struct dc *dc,
 			struct dc_state *context);
+	void (*post_unlock_program_front_end)(struct dc *dc,
+			struct dc_state *context);
 	void (*update_plane_addr)(const struct dc *dc,
 			struct pipe_ctx *pipe_ctx);
 	void (*update_dchub)(struct dce_hwseq *hws,
@@ -78,10 +80,10 @@ struct hw_sequencer_funcs {
 	void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
 
 	/* Pipe Lock Related */
-	void (*pipe_control_lock_global)(struct dc *dc,
-			struct pipe_ctx *pipe, bool lock);
 	void (*pipe_control_lock)(struct dc *dc,
 			struct pipe_ctx *pipe, bool lock);
+	void (*interdependent_update_lock)(struct dc *dc,
+			struct dc_state *context, bool lock);
 	void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
 			bool flip_immediate);
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index ecf5663..52a26e6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -40,10 +40,13 @@ struct dce_hwseq_wa {
 	bool false_optc_underflow;
 	bool DEGVIDCN10_254;
 	bool DEGVIDCN21;
+	bool disallow_self_refresh_during_multi_plane_transition;
 };
 
 struct hwseq_wa_state {
 	bool DEGVIDCN10_253_applied;
+	bool disallow_self_refresh_during_multi_plane_transition_applied;
+	unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
 };
 
 struct pipe_ctx;
@@ -97,6 +100,8 @@ struct hwseq_private_funcs {
 			struct dc *dc);
 	void (*edp_backlight_control)(struct dc_link *link,
 			bool enable);
+	bool (*is_panel_backlight_on)(struct dc_link *link);
+	bool (*is_panel_powered_on)(struct dc_link *link);
 	void (*setup_vupdate_interrupt)(struct dc *dc,
 			struct pipe_ctx *pipe_ctx);
 	bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
@@ -140,6 +145,8 @@ struct hwseq_private_funcs {
 			const struct dc_plane_state *plane_state);
 	bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
 			const struct dc_plane_state *plane_state);
+	void (*PLAT_58856_wa)(struct dc_state *context,
+			struct pipe_ctx *pipe_ctx);
 };
 
 struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5ae8ada..ca4c36c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -179,4 +179,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
 
 void get_audio_check(struct audio_info *aud_modes,
 	struct audio_check *aud_chk);
+
+int get_num_odm_splits(struct pipe_ctx *pipe);
+
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index cd9532b..10b5fa9 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -50,6 +50,7 @@ enum dmub_cmd_type {
 	DMUB_CMD__REG_REG_WAIT = 4,
 	DMUB_CMD__PLAT_54186_WA = 5,
 	DMUB_CMD__PSR = 64,
+	DMUB_CMD__ABM = 66,
 	DMUB_CMD__VBIOS = 128,
 };
 
@@ -216,7 +217,6 @@ struct dmub_rb_cmd_dpphy_init {
 
 struct dmub_cmd_psr_copy_settings_data {
 	uint16_t psr_level;
-	uint8_t hubp_inst;
 	uint8_t dpp_inst;
 	uint8_t mpcc_inst;
 	uint8_t opp_inst;
@@ -225,17 +225,8 @@ struct dmub_cmd_psr_copy_settings_data {
 	uint8_t digbe_inst;
 	uint8_t dpphy_inst;
 	uint8_t aux_inst;
-	uint8_t hyst_frames;
-	uint8_t hyst_lines;
-	uint8_t phy_num;
-	uint8_t phy_type;
-	uint8_t aux_repeat;
 	uint8_t smu_optimizations_en;
-	uint8_t skip_wait_for_pll_lock;
 	uint8_t frame_delay;
-	uint8_t smu_phy_id;
-	uint8_t num_of_controllers;
-	uint8_t link_rate;
 	uint8_t frame_cap_ind;
 };
 
@@ -257,13 +248,59 @@ struct dmub_rb_cmd_psr_enable {
 	struct dmub_cmd_header header;
 };
 
-struct dmub_cmd_psr_setup_data {
+struct dmub_cmd_psr_set_version_data {
 	enum psr_version version; // PSR version 1 or 2
 };
 
-struct dmub_rb_cmd_psr_setup {
+struct dmub_rb_cmd_psr_set_version {
 	struct dmub_cmd_header header;
-	struct dmub_cmd_psr_setup_data psr_setup_data;
+	struct dmub_cmd_psr_set_version_data psr_set_version_data;
+};
+
+struct dmub_cmd_abm_set_pipe_data {
+	uint32_t ramping_boundary;
+	uint32_t otg_inst;
+};
+
+struct dmub_rb_cmd_abm_set_pipe {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data;
+};
+
+struct dmub_cmd_abm_set_backlight_data {
+	uint32_t frame_ramp;
+};
+
+struct dmub_rb_cmd_abm_set_backlight {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data;
+};
+
+struct dmub_cmd_abm_set_level_data {
+	uint32_t level;
+};
+
+struct dmub_rb_cmd_abm_set_level {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_abm_set_level_data abm_set_level_data;
+};
+
+struct dmub_cmd_abm_set_ambient_level_data {
+	uint32_t ambient_lux;
+};
+
+struct dmub_rb_cmd_abm_set_ambient_level {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data;
+};
+
+struct dmub_cmd_abm_set_pwm_frac_data {
+	uint32_t fractional_pwm;
+};
+
+struct dmub_rb_cmd_abm_set_pwm_frac {
+	struct dmub_cmd_header header;
+	struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
 };
 
 union dmub_rb_cmd {
@@ -277,11 +314,16 @@ union dmub_rb_cmd {
 	struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
 	struct dmub_rb_cmd_dpphy_init dpphy_init;
 	struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
-	struct dmub_rb_cmd_psr_enable psr_enable;
+	struct dmub_rb_cmd_psr_set_version psr_set_version;
 	struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+	struct dmub_rb_cmd_psr_enable psr_enable;
 	struct dmub_rb_cmd_psr_set_level psr_set_level;
 	struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
-	struct dmub_rb_cmd_psr_setup psr_setup;
+	struct dmub_rb_cmd_abm_set_pipe abm_set_pipe;
+	struct dmub_rb_cmd_abm_set_backlight abm_set_backlight;
+	struct dmub_rb_cmd_abm_set_level abm_set_level;
+	struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
+	struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
 };
 
 #pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 7b69eb3..d37535d 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,7 +32,7 @@
  */
 
 enum dmub_cmd_psr_type {
-	DMUB_CMD__PSR_SETUP		= 0,
+	DMUB_CMD__PSR_SET_VERSION	= 0,
 	DMUB_CMD__PSR_COPY_SETTINGS	= 1,
 	DMUB_CMD__PSR_ENABLE		= 2,
 	DMUB_CMD__PSR_DISABLE		= 3,
@@ -42,7 +42,16 @@ enum dmub_cmd_psr_type {
 enum psr_version {
 	PSR_VERSION_1			= 0x10, // PSR Version 1
 	PSR_VERSION_2			= 0x20, // PSR Version 2, includes selective update
-	PSR_VERSION_2_Y_COORD		= 0x21, // PSR Version 2, includes Y-coordinate support for SU
+	PSR_VERSION_2_1			= 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+enum dmub_cmd_abm_type {
+	DMUB_CMD__ABM_INIT_CONFIG	= 0,
+	DMUB_CMD__ABM_SET_PIPE		= 1,
+	DMUB_CMD__ABM_SET_BACKLIGHT	= 2,
+	DMUB_CMD__ABM_SET_LEVEL		= 3,
+	DMUB_CMD__ABM_SET_AMBIENT_LEVEL	= 4,
+	DMUB_CMD__ABM_SET_PWM_FRAC	= 5,
 };
 
 #endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
new file mode 100644
index 0000000..652d6fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_GPINT_CMD_H_
+#define _DMUB_GPINT_CMD_H_
+
+#include "dmub_types.h"
+
+/**
+ * The register format for sending a command via the GPINT.
+ */
+union dmub_gpint_data_register {
+	struct {
+		uint32_t param : 16;
+		uint32_t command_code : 12;
+		uint32_t status : 4;
+	} bits;
+	uint32_t all;
+};
+
+/**
+ * The shifts and masks below may alternatively be used to format and read
+ * the command register bits.
+ */
+
+#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
+#define DMUB_GPINT_DATA_PARAM_SHIFT 0
+
+#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
+#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
+
+#define DMUB_GPINT_DATA_STATUS_MASK 0xF
+#define DMUB_GPINT_DATA_STATUS_SHIFT 28
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_gpint_command {
+	DMUB_GPINT__INVALID_COMMAND = 0,
+	DMUB_GPINT__GET_FW_VERSION = 1,
+	DMUB_GPINT__STOP_FW = 2,
+	DMUB_GPINT__GET_PSR_STATE = 7,
+};
+
+/**
+ * Command responses.
+ */
+
+#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
+
+#endif /* _DMUB_GPINT_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index f891759..c2671f2 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -66,6 +66,7 @@
 
 #include "dmub_types.h"
 #include "dmub_cmd.h"
+#include "dmub_gpint_cmd.h"
 #include "dmub_rb.h"
 
 #if defined(__cplusplus)
@@ -103,7 +104,7 @@ enum dmub_window_id {
 	DMUB_WINDOW_4_MAILBOX,
 	DMUB_WINDOW_5_TRACEBUFF,
 	DMUB_WINDOW_6_FW_STATE,
-	DMUB_WINDOW_7_RESERVED,
+	DMUB_WINDOW_7_SCRATCH_MEM,
 	DMUB_WINDOW_TOTAL,
 };
 
@@ -262,6 +263,14 @@ struct dmub_srv_hw_funcs {
 	bool (*is_phy_init)(struct dmub_srv *dmub);
 
 	bool (*is_auto_load_done)(struct dmub_srv *dmub);
+
+	void (*set_gpint)(struct dmub_srv *dmub,
+			  union dmub_gpint_data_register reg);
+
+	bool (*is_gpint_acked)(struct dmub_srv *dmub,
+			       union dmub_gpint_data_register reg);
+
+	uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
 };
 
 /**
@@ -307,6 +316,7 @@ struct dmub_srv {
 	enum dmub_asic asic;
 	void *user_ctx;
 	bool is_virtual;
+	struct dmub_fb scratch_mem_fb;
 	volatile const struct dmub_fw_state *fw_state;
 
 	/* private: internal use only */
@@ -516,6 +526,45 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
 					uint32_t timeout_us);
 
+/**
+ * dmub_srv_send_gpint_command() - Sends a GPINT based command.
+ * @dmub: the dmub service
+ * @command_code: the command code to send
+ * @param: the command parameter to send
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Sends a command via the general purpose interrupt (GPINT).
+ * Waits for the number of microseconds specified by timeout_us
+ * for the command ACK before returning.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_TIMEOUT - wait for ACK timed out
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+			    enum dmub_gpint_command command_code,
+			    uint16_t param, uint32_t timeout_us);
+
+/**
+ * dmub_srv_get_gpint_response() - Queries the GPINT response.
+ * @dmub: the dmub service
+ * @response: the response for the last GPINT
+ *
+ * Returns the response code for the last GPINT interrupt.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ *   DMUB_STATUS_OK - success
+ *   DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+					     uint32_t *response);
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index b2ca8e0..63bb9e2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -60,6 +60,12 @@ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
 {
 	uint32_t tmp;
 
+	if (dmub->fb_base || dmub->fb_offset) {
+		*fb_base = dmub->fb_base;
+		*fb_offset = dmub->fb_offset;
+		return;
+	}
+
 	REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
 	*fb_base = (uint64_t)tmp << 24;
 
@@ -77,11 +83,52 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
 
 void dmub_dcn20_reset(struct dmub_srv *dmub)
 {
+	union dmub_gpint_data_register cmd;
+	const uint32_t timeout = 30;
+	uint32_t in_reset, scratch, i;
+
+	REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset);
+
+	if (in_reset == 0) {
+		cmd.bits.status = 1;
+		cmd.bits.command_code = DMUB_GPINT__STOP_FW;
+		cmd.bits.param = 0;
+
+		dmub->hw_funcs.set_gpint(dmub, cmd);
+
+		/**
+		 * Timeout covers both the ACK and the wait
+		 * for remaining work to finish.
+		 *
+		 * This is mostly bound by the PHY disable sequence.
+		 * Each register check will be greater than 1us, so
+		 * don't bother using udelay.
+		 */
+
+		for (i = 0; i < timeout; ++i) {
+			if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+				break;
+		}
+
+		for (i = 0; i < timeout; ++i) {
+			scratch = dmub->hw_funcs.get_gpint_response(dmub);
+			if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+				break;
+		}
+
+		/* Clear the GPINT command manually so we don't reset again. */
+		cmd.all = 0;
+		dmub->hw_funcs.set_gpint(dmub, cmd);
+
+		/* Force reset in case we timed out, DMCUB is likely hung. */
+	}
+
 	REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
 	REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
 	REG_WRITE(DMCUB_INBOX1_RPTR, 0);
 	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+	REG_WRITE(DMCUB_SCRATCH0, 0);
 }
 
 void dmub_dcn20_reset_release(struct dmub_srv *dmub)
@@ -217,3 +264,25 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
 
 	return supported;
 }
+
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+			  union dmub_gpint_data_register reg)
+{
+	REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
+}
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+			       union dmub_gpint_data_register reg)
+{
+	union dmub_gpint_data_register test;
+
+	reg.bits.status = 0;
+	test.all = REG_READ(DMCUB_GPINT_DATAIN1);
+
+	return test.all == reg.all;
+}
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub)
+{
+	return REG_READ(DMCUB_SCRATCH7);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 04b0fa1..7f046c7 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -91,6 +91,7 @@ struct dmub_srv;
 	DMUB_SR(DMCUB_SCRATCH13) \
 	DMUB_SR(DMCUB_SCRATCH14) \
 	DMUB_SR(DMCUB_SCRATCH15) \
+	DMUB_SR(DMCUB_GPINT_DATAIN1) \
 	DMUB_SR(CC_DC_PIPE_DIS) \
 	DMUB_SR(MMHUBBUB_SOFT_RESET) \
 	DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
@@ -183,4 +184,12 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
 
 bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
 
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+			  union dmub_gpint_data_register reg);
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+			       union dmub_gpint_data_register reg);
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub);
+
 #endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 85a518b..ce32cc7 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -52,8 +52,11 @@
 /* Default tracebuffer size if meta is absent. */
 #define DMUB_TRACE_BUFFER_SIZE (1024)
 
+/* Default scratch mem size. */
+#define DMUB_SCRATCH_MEM_SIZE (256)
+
 /* Number of windows in use. */
-#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
 /* Base addresses. */
 
 #define DMUB_CW0_BASE (0x60000000)
@@ -126,6 +129,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
 		funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
 		funcs->is_supported = dmub_dcn20_is_supported;
 		funcs->is_hw_init = dmub_dcn20_is_hw_init;
+		funcs->set_gpint = dmub_dcn20_set_gpint;
+		funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
+		funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
 
 		if (asic == DMUB_ASIC_DCN21) {
 			dmub->regs = &dmub_srv_dcn21_regs;
@@ -208,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
 	struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
 	struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
 	struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+	struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
 	const struct dmub_fw_meta_info *fw_info;
 	uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
 	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+	uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
 
 	if (!dmub->sw_init)
 		return DMUB_STATUS_INVALID;
@@ -253,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
 	fw_state->base = dmub_align(trace_buff->top, 256);
 	fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
 
-	out->fb_size = dmub_align(fw_state->top, 4096);
+	scratch_mem->base = dmub_align(fw_state->top, 256);
+	scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+
+	out->fb_size = dmub_align(scratch_mem->top, 4096);
 
 	return DMUB_STATUS_OK;
 }
@@ -331,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 	struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
 	struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
 	struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+	struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
 
 	struct dmub_rb_init_params rb_params;
 	struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
@@ -367,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 		dmub->hw_funcs.reset(dmub);
 
 	if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
-	    fw_state_fb) {
+	    fw_state_fb && scratch_mem_fb) {
 		cw2.offset.quad_part = data_fb->gpu_addr;
 		cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
 		cw2.region.top = cw2.region.base + data_fb->size;
@@ -393,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
 
 		dmub->fw_state = fw_state_fb->cpu_addr;
 
+		dmub->scratch_mem_fb = *scratch_mem_fb;
+
 		if (dmub->hw_funcs.setup_windows)
 			dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
 						     &cw5, &cw6);
@@ -522,3 +536,50 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
 
 	return DMUB_STATUS_TIMEOUT;
 }
+
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+			    enum dmub_gpint_command command_code,
+			    uint16_t param, uint32_t timeout_us)
+{
+	union dmub_gpint_data_register reg;
+	uint32_t i;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (!dmub->hw_funcs.set_gpint)
+		return DMUB_STATUS_INVALID;
+
+	if (!dmub->hw_funcs.is_gpint_acked)
+		return DMUB_STATUS_INVALID;
+
+	reg.bits.status = 1;
+	reg.bits.command_code = command_code;
+	reg.bits.param = param;
+
+	dmub->hw_funcs.set_gpint(dmub, reg);
+
+	for (i = 0; i < timeout_us; ++i) {
+		if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
+			return DMUB_STATUS_OK;
+	}
+
+	return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+					     uint32_t *response)
+{
+	*response = 0;
+
+	if (!dmub->sw_init)
+		return DMUB_STATUS_INVALID;
+
+	if (!dmub->hw_funcs.get_gpint_response)
+		return DMUB_STATUS_INVALID;
+
+	*response = dmub->hw_funcs.get_gpint_response(dmub);
+
+	return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index a290398..8a87d0e 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,31 +134,27 @@
 #define PICASSO_A0 0x41
 /* DCN1_01 */
 #define RAVEN2_A0 0x81
-#define RAVEN2_15D8_REV_94 0x94
-#define RAVEN2_15D8_REV_95 0x95
-#define RAVEN2_15D8_REV_E3 0xE3
-#define RAVEN2_15D8_REV_E4 0xE4
-#define RAVEN2_15D8_REV_E9 0xE9
-#define RAVEN2_15D8_REV_EA 0xEA
-#define RAVEN2_15D8_REV_EB 0xEB
 #define RAVEN1_F0 0xF0
 #define RAVEN_UNKNOWN 0xFF
 #ifndef ASICREV_IS_RAVEN
 #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
 #endif
+#define PRID_DALI_DE 0xDE
+#define PRID_DALI_DF 0xDF
+#define PRID_DALI_E3 0xE3
+#define PRID_DALI_E4 0xE4
+
+#define PRID_POLLOCK_94 0x94
+#define PRID_POLLOCK_95 0x95
+#define PRID_POLLOCK_E9 0xE9
+#define PRID_POLLOCK_EA 0xEA
+#define PRID_POLLOCK_EB 0xEB
 
 #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
 #ifndef ASICREV_IS_RAVEN2
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0))
 #endif
 #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
-		|| (eChipRev == RAVEN2_15D8_REV_E4))
-#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
-		|| eChipRev == RAVEN2_15D8_REV_95 \
-			|| eChipRev == RAVEN2_15D8_REV_E9 \
-				|| eChipRev == RAVEN2_15D8_REV_EA \
-					|| eChipRev == RAVEN2_15D8_REV_EB)
 
 #define FAMILY_RV 142 /* DCN 1*/
 
@@ -177,7 +173,7 @@ enum {
 #define ASICREV_IS_NAVI14_M(eChipRev)        ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
 #define RENOIR_A0 0x91
 #define DEVICE_ID_RENOIR_1636 0x1636   // Renoir
-#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
+#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
 
 /*
  * ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 2c90d1b..3d29646 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -149,4 +149,12 @@ enum dpcd_psr_sink_states {
 	PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
 };
 
+#define DP_SOURCE_TABLE_REVISION	    0x310
+#define DP_SOURCE_PAYLOAD_SIZE		    0x311
+#define DP_SOURCE_SINK_CAP		    0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL	    0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK    0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL	    0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE	    0x32F
+
 #endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 89a7092..d66f9d8 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -124,36 +124,37 @@ enum dc_log_type {
 #define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
 		(1 << LOG_DETECTION_EDID_PARSER))
 
-#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
-		(1 << LOG_WARNING) | \
-		(1 << LOG_EVENT_MODE_SET) | \
-		(1 << LOG_EVENT_DETECTION) | \
-		(1 << LOG_EVENT_LINK_TRAINING) | \
-		(1 << LOG_EVENT_LINK_LOSS) | \
-		(1 << LOG_EVENT_UNDERFLOW) | \
-		(1 << LOG_RESOURCE) | \
-		(1 << LOG_FEATURE_OVERRIDE) | \
-		(1 << LOG_DETECTION_EDID_PARSER) | \
-		(1 << LOG_DC) | \
-		(1 << LOG_HW_HOTPLUG) | \
-		(1 << LOG_HW_SET_MODE) | \
-		(1 << LOG_HW_RESUME_S3) | \
-		(1 << LOG_HW_HPD_IRQ) | \
-		(1 << LOG_SYNC) | \
-		(1 << LOG_BANDWIDTH_VALIDATION) | \
-		(1 << LOG_MST) | \
-		(1 << LOG_DETECTION_DP_CAPS) | \
-		(1 << LOG_BACKLIGHT)) | \
-		(1 << LOG_I2C_AUX) | \
-		(1 << LOG_IF_TRACE) | \
-		(1 << LOG_DTN) /* | \
-		(1 << LOG_DEBUG) | \
-		(1 << LOG_BIOS) | \
-		(1 << LOG_SURFACE) | \
-		(1 << LOG_SCALER) | \
-		(1 << LOG_DML) | \
-		(1 << LOG_HW_LINK_TRAINING) | \
-		(1 << LOG_HW_AUDIO)| \
-		(1 << LOG_BANDWIDTH_CALCS)*/
+#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \
+		(1ULL << LOG_WARNING) | \
+		(1ULL << LOG_EVENT_MODE_SET) | \
+		(1ULL << LOG_EVENT_DETECTION) | \
+		(1ULL << LOG_EVENT_LINK_TRAINING) | \
+		(1ULL << LOG_EVENT_LINK_LOSS) | \
+		(1ULL << LOG_EVENT_UNDERFLOW) | \
+		(1ULL << LOG_RESOURCE) | \
+		(1ULL << LOG_FEATURE_OVERRIDE) | \
+		(1ULL << LOG_DETECTION_EDID_PARSER) | \
+		(1ULL << LOG_DC) | \
+		(1ULL << LOG_HW_HOTPLUG) | \
+		(1ULL << LOG_HW_SET_MODE) | \
+		(1ULL << LOG_HW_RESUME_S3) | \
+		(1ULL << LOG_HW_HPD_IRQ) | \
+		(1ULL << LOG_SYNC) | \
+		(1ULL << LOG_BANDWIDTH_VALIDATION) | \
+		(1ULL << LOG_MST) | \
+		(1ULL << LOG_DETECTION_DP_CAPS) | \
+		(1ULL << LOG_BACKLIGHT)) | \
+		(1ULL << LOG_I2C_AUX) | \
+		(1ULL << LOG_IF_TRACE) | \
+		(1ULL << LOG_HDMI_FRL) | \
+		(1ULL << LOG_DTN) /* | \
+		(1ULL << LOG_DEBUG) | \
+		(1ULL << LOG_BIOS) | \
+		(1ULL << LOG_SURFACE) | \
+		(1ULL << LOG_SCALER) | \
+		(1ULL << LOG_DML) | \
+		(1ULL << LOG_HW_LINK_TRAINING) | \
+		(1ULL << LOG_HW_AUDIO)| \
+		(1ULL << LOG_BANDWIDTH_CALCS)*/
 
 #endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b9992eb..4e54282 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -524,12 +524,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
 		infopacket->sb[6] |= 0x04;
 
 	/* PB7 = FreeSync Minimum refresh rate (Hz) */
-	infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
+	infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
 
 	/* PB8 = FreeSync Maximum refresh rate (Hz)
 	 * Note: We should never go above the field rate of the mode timing set.
 	 */
-	infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+	infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
 
 
 	//FreeSync HDR
@@ -747,10 +747,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
 	nominal_field_rate_in_uhz =
 			mod_freesync_calc_nominal_field_rate(stream);
 
-	/* Rounded to the nearest Hz */
-	nominal_field_rate_in_uhz = 1000000ULL *
-			div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
-
 	min_refresh_in_uhz = in_config->min_refresh_in_uhz;
 	max_refresh_in_uhz = in_config->max_refresh_in_uhz;
 
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 8aa528e..e9fbd94 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
 	 * hdcp is not desired
 	 */
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
-				!hdcp->connection.displays[i].adjust.disable) {
+		if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+				!hdcp->displays[i].adjust.disable) {
 			is_auth_needed = 1;
 			break;
 		}
@@ -61,7 +61,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
 
 	return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
 			is_auth_needed &&
-			!hdcp->connection.link.adjust.hdcp1.disable;
+			!hdcp->connection.link.adjust.hdcp1.disable &&
+			!hdcp->connection.is_hdcp1_revoked;
 }
 
 static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
@@ -72,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
 	 * hdcp is not desired
 	 */
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
-				!hdcp->connection.displays[i].adjust.disable) {
+		if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+				!hdcp->displays[i].adjust.disable) {
 			is_auth_needed = 1;
 			break;
 		}
@@ -103,8 +104,6 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
 			event_ctx->unexpected_event = 1;
 			goto out;
 		}
-		/* update topology event if hdcp is not desired */
-		status = mod_hdcp_add_display_topology(hdcp);
 	} else if (is_in_hdcp1_states(hdcp)) {
 		status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
 	} else if (is_in_hdcp1_dp_states(hdcp)) {
@@ -115,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
 	} else if (is_in_hdcp2_dp_states(hdcp)) {
 		status = mod_hdcp_hdcp2_dp_execution(hdcp,
 				event_ctx, &input->hdcp2);
+	} else {
+		event_ctx->unexpected_event = 1;
+		goto out;
 	}
 out:
 	return status;
@@ -191,14 +193,7 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
 			mod_hdcp_hdcp1_destroy_session(hdcp);
 
 		}
-		if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
-			status = mod_hdcp_remove_display_topology(hdcp);
-			if (status != MOD_HDCP_STATUS_SUCCESS) {
-				output->callback_needed = 0;
-				output->watchdog_timer_needed = 0;
-				goto out;
-			}
-		}
+
 		HDCP_TOP_RESET_AUTH_TRACE(hdcp);
 		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
 		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -212,25 +207,12 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
 				goto out;
 			}
 		}
-		if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
-			status = mod_hdcp_remove_display_topology(hdcp);
-			if (status != MOD_HDCP_STATUS_SUCCESS) {
-				output->callback_needed = 0;
-				output->watchdog_timer_needed = 0;
-				goto out;
-			}
-		}
+
 		HDCP_TOP_RESET_AUTH_TRACE(hdcp);
 		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
 		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
 		set_state_id(hdcp, output, HDCP_INITIALIZED);
 	} else if (is_in_cp_not_desired_state(hdcp)) {
-		status = mod_hdcp_remove_display_topology(hdcp);
-		if (status != MOD_HDCP_STATUS_SUCCESS) {
-			output->callback_needed = 0;
-			output->watchdog_timer_needed = 0;
-			goto out;
-		}
 		HDCP_TOP_RESET_AUTH_TRACE(hdcp);
 		memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
 		memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -337,16 +319,20 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
 	if (status != MOD_HDCP_STATUS_SUCCESS)
 		goto out;
 
-	/* add display to connection */
-	hdcp->connection.link = *link;
-	*display_container = *display;
-
 	/* reset retry counters */
 	reset_retry_counts(hdcp);
 
 	/* reset error trace */
 	memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
 
+	/* add display to connection */
+	hdcp->connection.link = *link;
+	*display_container = *display;
+	status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
+	if (status != MOD_HDCP_STATUS_SUCCESS)
+		goto out;
+
 	/* request authentication */
 	if (current_state(hdcp) != HDCP_INITIALIZED)
 		set_state_id(hdcp, output, HDCP_INITIALIZED);
@@ -379,17 +365,20 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
 	if (status != MOD_HDCP_STATUS_SUCCESS)
 		goto out;
 
-	/* remove display */
-	display->state = MOD_HDCP_DISPLAY_INACTIVE;
-
 	/* clear retry counters */
 	reset_retry_counts(hdcp);
 
 	/* reset error trace */
 	memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
 
-	/* request authentication for remaining displays*/
-	if (get_active_display_count(hdcp) > 0)
+	/* remove display */
+	status = mod_hdcp_remove_display_from_topology(hdcp, index);
+	if (status != MOD_HDCP_STATUS_SUCCESS)
+		goto out;
+	memset(display, 0, sizeof(struct mod_hdcp_display));
+
+	/* request authentication when connection is not reset */
+	if (current_state(hdcp) != HDCP_UNINITIALIZED)
 		callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
 				output);
 out:
@@ -496,10 +485,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
 		break;
 	case SIGNAL_TYPE_EDP:
 	case SIGNAL_TYPE_DISPLAY_PORT:
-		mode = MOD_HDCP_MODE_DP;
-		break;
 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
-		mode = MOD_HDCP_MODE_DP_MST;
+		mode = MOD_HDCP_MODE_DP;
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index af78e4f..60ff1a0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -41,7 +41,6 @@ enum mod_hdcp_trans_input_result {
 struct mod_hdcp_transition_input_hdcp1 {
 	uint8_t bksv_read;
 	uint8_t bksv_validation;
-	uint8_t add_topology;
 	uint8_t create_session;
 	uint8_t an_write;
 	uint8_t aksv_write;
@@ -71,7 +70,6 @@ struct mod_hdcp_transition_input_hdcp1 {
 struct mod_hdcp_transition_input_hdcp2 {
 	uint8_t hdcp2version_read;
 	uint8_t hdcp2_capable_check;
-	uint8_t add_topology;
 	uint8_t create_session;
 	uint8_t ake_init_prepare;
 	uint8_t ake_init_write;
@@ -167,9 +165,9 @@ struct mod_hdcp_auth_counters {
 /* contains values per connection */
 struct mod_hdcp_connection {
 	struct mod_hdcp_link link;
-	struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
 	uint8_t is_repeater;
 	uint8_t is_km_stored;
+	uint8_t is_hdcp1_revoked;
 	uint8_t is_hdcp2_revoked;
 	struct mod_hdcp_trace trace;
 	uint8_t hdcp1_retry_count;
@@ -202,6 +200,8 @@ struct mod_hdcp {
 	struct mod_hdcp_config config;
 	/* per connection */
 	struct mod_hdcp_connection connection;
+	/* per displays */
+	struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
 	/* per authentication attempt */
 	struct mod_hdcp_authentication auth;
 	/* per state in an authentication */
@@ -327,10 +327,10 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
 /* TODO: add adjustment log */
 
 /* psp functions */
-enum mod_hdcp_status mod_hdcp_add_display_topology(
-		struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_remove_display_topology(
-		struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+		struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+		struct mod_hdcp *hdcp, uint8_t index);
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
 /* hdcp version helpers */
 static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
 {
-	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
-			hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP);
 }
 
 static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
 {
-	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+	return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
+			hdcp->connection.link.dp.mst_supported);
 }
 
 static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
@@ -503,11 +503,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
 	return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
 }
 
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
-	return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
 static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
 {
 	return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,35 +510,24 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
 
 static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
 {
-	uint8_t added_count = 0;
+	uint8_t active_count = 0;
 	uint8_t i;
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-		if (is_display_active(&hdcp->connection.displays[i]))
-			added_count++;
-	return added_count;
+		if (is_display_active(&hdcp->displays[i]))
+			active_count++;
+	return active_count;
 }
 
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
-	uint8_t added_count = 0;
-	uint8_t i;
-
-	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-		if (is_display_added(&hdcp->connection.displays[i]))
-			added_count++;
-	return added_count;
-}
-
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
 		struct mod_hdcp *hdcp)
 {
 	uint8_t i;
 	struct mod_hdcp_display *display = NULL;
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-		if (is_display_added(&hdcp->connection.displays[i])) {
-			display = &hdcp->connection.displays[i];
+		if (is_display_active(&hdcp->displays[i])) {
+			display = &hdcp->displays[i];
 			break;
 		}
 	return display;
@@ -556,9 +540,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index(
 	struct mod_hdcp_display *display = NULL;
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-		if (hdcp->connection.displays[i].index == index &&
-				is_display_active(&hdcp->connection.displays[i])) {
-			display = &hdcp->connection.displays[i];
+		if (hdcp->displays[i].index == index &&
+				is_display_active(&hdcp->displays[i])) {
+			display = &hdcp->displays[i];
 			break;
 		}
 	return display;
@@ -571,8 +555,8 @@ static inline struct mod_hdcp_display *get_empty_display_container(
 	struct mod_hdcp_display *display = NULL;
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
-		if (!is_display_active(&hdcp->connection.displays[i])) {
-			display = &hdcp->connection.displays[i];
+		if (!is_display_active(&hdcp->displays[i])) {
+			display = &hdcp->displays[i];
 			break;
 		}
 	return display;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37670db..f244b72 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
 	/* device count must be greater than or equal to tracked hdcp displays */
-	return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+	return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
 			MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
 			MOD_HDCP_STATUS_SUCCESS;
 }
@@ -168,10 +168,6 @@ static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
 		goto out;
 	}
 
-	if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
-			&input->add_topology, &status,
-			hdcp, "add_topology"))
-		goto out;
 	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
 			&input->create_session, &status,
 			hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 76edcbe..f371191 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -46,8 +46,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
 		set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
 		break;
 	case H1_A1_EXCHANGE_KSVS:
-		if (input->add_topology != PASS ||
-				input->create_session != PASS) {
+		if (input->create_session != PASS) {
 			/* out of sync with psp state */
 			adjust->hdcp1.disable = 1;
 			fail_and_restart_in_ms(0, &status, output);
@@ -173,8 +172,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 		set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
 		break;
 	case D1_A1_EXCHANGE_KSVS:
-		if (input->add_topology != PASS ||
-				input->create_session != PASS) {
+		if (input->create_session != PASS) {
 			/* out of sync with psp state */
 			adjust->hdcp1.disable = 1;
 			fail_and_restart_in_ms(0, &status, output);
@@ -210,7 +208,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 			fail_and_restart_in_ms(0, &status, output);
 			break;
 		} else if (input->rx_validation != PASS) {
-			if (hdcp->state.stay_count < 2) {
+			if (hdcp->state.stay_count < 2 &&
+					!hdcp->connection.is_hdcp1_revoked) {
 				/* allow 2 additional retries */
 				callback_in_ms(0, output);
 				increment_stay_counter(hdcp);
@@ -231,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 				(!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
 			fail_and_restart_in_ms(0, &status, output);
 			break;
+		} else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) {
+			fail_and_restart_in_ms(0, &status, output);
+			break;
 		}
 		if (conn->is_repeater) {
 			set_watchdog_in_ms(hdcp, 5000, output);
@@ -290,7 +292,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
 			fail_and_restart_in_ms(0, &status, output);
 			break;
 		} else if (input->ksvlist_vp_validation != PASS) {
-			if (hdcp->state.stay_count < 2) {
+			if (hdcp->state.stay_count < 2 &&
+					!hdcp->connection.is_hdcp1_revoked) {
 				/* allow 2 additional retries */
 				callback_in_ms(0, output);
 				increment_stay_counter(hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 5524671..549c113 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -34,7 +34,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
 	if (is_dp_hdcp(hdcp))
 		is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
 	else
-		is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+		is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
 				(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
 						hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
 	return is_ready ? MOD_HDCP_STATUS_SUCCESS :
@@ -67,7 +67,7 @@ static inline enum mod_hdcp_status check_reauthentication_request(
 				MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
 				MOD_HDCP_STATUS_SUCCESS;
 	else
-		ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+		ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ?
 				MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
 				MOD_HDCP_STATUS_SUCCESS;
 	return ret;
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
 static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
 {
 	/* device count must be greater than or equal to tracked hdcp displays */
-	return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+	return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
 			MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
 			MOD_HDCP_STATUS_SUCCESS;
 }
@@ -259,6 +259,7 @@ static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
 		event_ctx->unexpected_event = 1;
 		goto out;
 	}
+
 	if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
 			&input->hdcp2version_read, &status,
 			hdcp, "hdcp2version_read"))
@@ -281,10 +282,7 @@ static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
 		event_ctx->unexpected_event = 1;
 		goto out;
 	}
-	if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
-			&input->add_topology, &status,
-			hdcp, "add_topology"))
-		goto out;
+
 	if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
 			&input->create_session, &status,
 			hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 8cae3e3..e738c7a 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -47,8 +47,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
 		}
 		break;
 	case H2_A1_SEND_AKE_INIT:
-		if (input->add_topology != PASS ||
-				input->create_session != PASS ||
+		if (input->create_session != PASS ||
 				input->ake_init_prepare != PASS) {
 			/* out of sync with psp state */
 			adjust->hdcp2.disable = 1;
@@ -389,8 +388,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
 		}
 		break;
 	case D2_A1_SEND_AKE_INIT:
-		if (input->add_topology != PASS ||
-				input->create_session != PASS ||
+		if (input->create_session != PASS ||
 				input->ake_init_prepare != PASS) {
 			/* out of sync with psp state */
 			adjust->hdcp2.disable = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index ff9d548..bb5130f 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id {
 	MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
 	MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
 	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
 	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
 	MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
 	MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
@@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = {
 	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
 	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
@@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = {
 	[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
 	[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
 	[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
 	[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
@@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp)) {
-		hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+		hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
 				hdcp->auth.msg.hdcp2.ake_cert+1,
 				sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
@@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp)) {
-		hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+		hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
 				hdcp->auth.msg.hdcp2.ake_h_prime+1,
 				sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
@@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp)) {
-		hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+		hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
 				hdcp->auth.msg.hdcp2.ake_pairing_info+1,
 				sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
@@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp)) {
-		hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+		hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
 				hdcp->auth.msg.hdcp2.lc_l_prime+1,
 				sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
@@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
 
 enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
 {
-	enum mod_hdcp_status status;
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
 	if (is_dp_hdcp(hdcp)) {
-		hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
-		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
-				hdcp->auth.msg.hdcp2.rx_id_list+1,
-				sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+		uint32_t device_count = 0;
+		uint32_t rx_id_list_size = 0;
+		uint32_t bytes_read = 0;
 
+		hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
+		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+						hdcp->auth.msg.hdcp2.rx_id_list+1,
+						HDCP_MAX_AUX_TRANSACTION_SIZE);
+		if (status == MOD_HDCP_STATUS_SUCCESS) {
+			bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
+			device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+					(HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+			rx_id_list_size = MIN((21 + 5 * device_count),
+					(sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
+			status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
+					hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
+					(rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
+		}
 	} else {
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
 				hdcp->auth.msg.hdcp2.rx_id_list,
@@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
 	enum mod_hdcp_status status;
 
 	if (is_dp_hdcp(hdcp)) {
-		hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+		hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
 		status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
 				hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
 				sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 724ebce..44956f9 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -90,10 +90,14 @@ char *mod_hdcp_status_to_str(int32_t status)
 		return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
 	case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
 		return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+	case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
+		return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
 	case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
 		return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
 	case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
 		return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+	case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
+		return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
 	case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
 		return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
 	case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index ff91373..d3192b9 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -37,10 +37,11 @@
 /* default logs */
 #define HDCP_ERROR_TRACE(hdcp, status) \
 		HDCP_LOG_ERR(hdcp, \
-			"[Link %d] WARNING %s IN STATE %s", \
+			"[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \
 			hdcp->config.index, \
 			mod_hdcp_status_to_str(status), \
-			mod_hdcp_state_id_to_str(hdcp->state.id))
+			mod_hdcp_state_id_to_str(hdcp->state.id), \
+			hdcp->state.stay_count)
 #define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
 		HDCP_LOG_VER(hdcp, \
 			"[Link %d] HDCP 1.4 enabled on display %d", \
@@ -49,6 +50,15 @@
 		HDCP_LOG_VER(hdcp, \
 			"[Link %d] HDCP 2.2 enabled on display %d", \
 			hdcp->config.index, displayIndex)
+#define HDCP_HDCP1_DISABLED_TRACE(hdcp, displayIndex) \
+		HDCP_LOG_VER(hdcp, \
+			"[Link %d] HDCP 1.4 disabled on display %d", \
+			hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_DISABLED_TRACE(hdcp, displayIndex) \
+		HDCP_LOG_VER(hdcp, \
+			"[Link %d] HDCP 2.2 disabled on display %d", \
+			hdcp->config.index, displayIndex)
+
 /* state machine logs */
 #define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
 		HDCP_LOG_FSM(hdcp, \
@@ -102,6 +112,9 @@
 				sizeof(hdcp->auth.msg.hdcp1.bksv)); \
 		HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
 				sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+		HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
+				(uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
+				sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
 		HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
 				sizeof(hdcp->auth.msg.hdcp1.an)); \
 		HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 7911dc1..836e479 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -44,84 +44,78 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
 	in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
 	in->process.msg3_desc.msg_size = 0;
 }
-enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
-{
-
-	struct psp_context *psp = hdcp->config.psp.handle;
-	struct ta_dtm_shared_memory *dtm_cmd;
-	struct mod_hdcp_display *display = NULL;
-	uint8_t i;
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+		struct mod_hdcp *hdcp, uint8_t index)
+ {
+ 	struct psp_context *psp = hdcp->config.psp.handle;
+ 	struct ta_dtm_shared_memory *dtm_cmd;
+	struct mod_hdcp_display *display =
+			get_active_display_at_index(hdcp, index);
 
 	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
-	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-		if (is_display_added(&(hdcp->connection.displays[i]))) {
+	if (!display || !is_display_active(display))
+		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
-			memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+	memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
-			display = &hdcp->connection.displays[i];
+	dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+	dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+	dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+	dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
 
-			dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
-			dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
-			dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
-			dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+	psp_dtm_invoke(psp, dtm_cmd->cmd_id);
 
-			psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+	if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+		return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
 
-			if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-				return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+	display->state = MOD_HDCP_DISPLAY_ACTIVE;
+	HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ 
+ 	return MOD_HDCP_STATUS_SUCCESS;
+ }
 
-			display->state = MOD_HDCP_DISPLAY_ACTIVE;
-			HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-		}
-	}
-
-	return MOD_HDCP_STATUS_SUCCESS;
-}
-
-enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+		struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
 {
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_dtm_shared_memory *dtm_cmd;
-	struct mod_hdcp_display *display = NULL;
 	struct mod_hdcp_link *link = &hdcp->connection.link;
-	uint8_t i;
 
 	if (!psp->dtm_context.dtm_initialized) {
 		DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+		display->state = MOD_HDCP_DISPLAY_INACTIVE;
 		return MOD_HDCP_STATUS_FAILURE;
 	}
 
 	dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
 
-	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-		if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
-			display = &hdcp->connection.displays[i];
+	memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
 
-			memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+	dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+	dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+	dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+	dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+	dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+	dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+	dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+	if (is_dp_hdcp(hdcp))
+		dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
 
-			dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
-			dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
-			dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
-			dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
-			dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
-			dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
-			dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
-			dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
-			dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
-				TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
-			dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+	dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+	dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+			TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
+	dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
 
-			psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+	psp_dtm_invoke(psp, dtm_cmd->cmd_id);
 
-			if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
-				return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
-			display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-			HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
-		}
+	if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+		display->state = MOD_HDCP_DISPLAY_INACTIVE;
+		return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
 	}
 
+	HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+
 	return MOD_HDCP_STATUS_SUCCESS;
 }
 
@@ -129,7 +123,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
 {
 
 	struct psp_context *psp = hdcp->config.psp.handle;
-	struct mod_hdcp_display *display = get_first_added_display(hdcp);
+	struct mod_hdcp_display *display = get_first_active_display(hdcp);
 	struct ta_hdcp_shared_memory *hdcp_cmd;
 
 	if (!psp->hdcp_context.hdcp_initialized) {
@@ -164,6 +158,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
 
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_hdcp_shared_memory *hdcp_cmd;
+	uint8_t i = 0;
 
 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -177,6 +172,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
 		return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
 
 	HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+		if (is_display_encryption_enabled(
+				&hdcp->displays[i])) {
+			hdcp->displays[i].state =
+								MOD_HDCP_DISPLAY_ACTIVE;
+			HDCP_HDCP1_DISABLED_TRACE(hdcp,
+					hdcp->displays[i].index);
+		}
 
 	return MOD_HDCP_STATUS_SUCCESS;
 }
@@ -210,6 +213,10 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
 	} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
 		   TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
 		hdcp->connection.is_repeater = 0;
+	} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+		   TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+		hdcp->connection.is_hdcp1_revoked = 1;
+		return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
 	} else
 		return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
 
@@ -221,7 +228,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
 {
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_hdcp_shared_memory *hdcp_cmd;
-	struct mod_hdcp_display *display = get_first_added_display(hdcp);
+	struct mod_hdcp_display *display = get_first_active_display(hdcp);
 
 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -245,6 +252,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
 {
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_hdcp_shared_memory *hdcp_cmd;
+	enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
 
 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -264,10 +272,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
 
 	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
 
-	if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
-		return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+	if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+	    hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+		    TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+		status = MOD_HDCP_STATUS_SUCCESS;
+	} else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+		   TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+		hdcp->connection.is_hdcp1_revoked = 1;
+		status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED;
+	} else {
+		status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+	}
 
-	return MOD_HDCP_STATUS_SUCCESS;
+	return status;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -281,14 +298,13 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
 
-		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
-		    hdcp->connection.displays[i].adjust.disable)
+		if (hdcp->displays[i].adjust.disable)
 			continue;
 
 		memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
 		hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
-		hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+		hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
 		hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
 
 		psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -296,8 +312,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
 		if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
 			return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
 
-		hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
-		HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+		hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+		HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
 	}
 
 	return MOD_HDCP_STATUS_SUCCESS;
@@ -344,7 +360,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 {
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_hdcp_shared_memory *hdcp_cmd;
-	struct mod_hdcp_display *display = get_first_added_display(hdcp);
+	struct mod_hdcp_display *display = get_first_active_display(hdcp);
 
 	if (!psp->hdcp_context.hdcp_initialized) {
 		DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
@@ -385,6 +401,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
 {
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_hdcp_shared_memory *hdcp_cmd;
+	uint8_t i = 0;
 
 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -398,6 +415,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
 		return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
 
 	HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+		if (is_display_encryption_enabled(
+				&hdcp->displays[i])) {
+			hdcp->displays[i].state =
+								MOD_HDCP_DISPLAY_ACTIVE;
+			HDCP_HDCP2_DISABLED_TRACE(hdcp,
+					hdcp->displays[i].index);
+		}
 
 	return MOD_HDCP_STATUS_SUCCESS;
 }
@@ -473,9 +498,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
 		hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
 		hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
 		return MOD_HDCP_STATUS_SUCCESS;
+	} else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+		hdcp->connection.is_hdcp2_revoked = 1;
+		return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
 	}
 
-	return MOD_HDCP_STATUS_FAILURE;
+	return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
 }
 
 enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -630,20 +658,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
 {
 	struct psp_context *psp = hdcp->config.psp.handle;
 	struct ta_hdcp_shared_memory *hdcp_cmd;
-	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
-	struct mod_hdcp_display *display = get_first_added_display(hdcp);
+	struct mod_hdcp_display *display = get_first_active_display(hdcp);
 
 	hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
 	memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
 
-	msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
-
-	hdcp2_message_init(hdcp, msg_in);
-
 	if (!display)
 		return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
 
-	hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+	hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
 
 	hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
 	psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -695,6 +718,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
 		hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
 		hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
 		return MOD_HDCP_STATUS_SUCCESS;
+	} else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+		hdcp->connection.is_hdcp2_revoked = 1;
+		return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
 	}
 
 
@@ -717,10 +743,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
 
 
 	for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
-		if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
-		    hdcp->connection.displays[i].adjust.disable)
+		if (hdcp->displays[i].adjust.disable)
 			continue;
-		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
 		hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
 
 		hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
@@ -729,8 +754,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
 		if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
 			break;
 
-		hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
-		HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+		hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+		HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
 	}
 
 	return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 82a5e99..1a663db 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -117,6 +117,8 @@ struct ta_dtm_shared_memory {
 int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
 		uint64_t fence_mc_addr);
 
+enum { PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE = 5120 };
+
 enum ta_hdcp_command {
 	TA_HDCP_COMMAND__INITIALIZE,
 	TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
@@ -134,7 +136,10 @@ enum ta_hdcp_command {
 	TA_HDCP_COMMAND__UNUSED_3,
 	TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
 	TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
-	TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+	TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION,
+	TA_HDCP_COMMAND__HDCP_DESTROY_ALL_SESSIONS,
+	TA_HDCP_COMMAND__HDCP_SET_SRM,
+	TA_HDCP_COMMAND__HDCP_GET_SRM
 };
 
 enum ta_hdcp2_msg_id {
@@ -235,7 +240,8 @@ enum ta_hdcp_authentication_status {
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
 	TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
-	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09,
+	TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED = 0x0A
 };
 
 enum ta_hdcp2_msg_authentication_status {
@@ -253,7 +259,8 @@ enum ta_hdcp2_msg_authentication_status {
 	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
 	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
 	TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
-	TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST,
+	TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED
 };
 
 enum ta_hdcp_content_type {
@@ -415,6 +422,22 @@ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
 	uint32_t display_handle;
 };
 
+struct ta_hdcp_cmd_set_srm_input {
+	uint32_t srm_buf_size;
+	uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_set_srm_output {
+	uint8_t valid_signature;
+	uint32_t srm_version;
+};
+
+struct ta_hdcp_cmd_get_srm_output {
+	uint32_t srm_version;
+	uint32_t srm_buf_size;
+	uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
 /**********************************************************/
 /* Common input structure for HDCP callbacks */
 union ta_hdcp_cmd_input {
@@ -432,6 +455,7 @@ union ta_hdcp_cmd_input {
 	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
 		hdcp2_prepare_process_authentication_message_v2;
 	struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
+	struct ta_hdcp_cmd_set_srm_input hdcp_set_srm;
 };
 
 /* Common output structure for HDCP callbacks */
@@ -444,6 +468,8 @@ union ta_hdcp_cmd_output {
 	struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
 	struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
 		hdcp2_prepare_process_authentication_message_v2;
+	struct ta_hdcp_cmd_set_srm_output hdcp_set_srm;
+	struct ta_hdcp_cmd_get_srm_output hdcp_get_srm;
 };
 /**********************************************************/
 
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f2a0e1a..eae9309 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -56,8 +56,10 @@ enum mod_hdcp_status {
 	MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
 	MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
 	MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+	MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
 	MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
 	MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+	MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
 	MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
 	MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
 	MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
@@ -100,6 +102,7 @@ enum mod_hdcp_status {
 struct mod_hdcp_displayport {
 	uint8_t rev;
 	uint8_t assr_supported;
+	uint8_t mst_supported;
 };
 
 struct mod_hdcp_hdmi {
@@ -108,14 +111,12 @@ struct mod_hdcp_hdmi {
 enum mod_hdcp_operation_mode {
 	MOD_HDCP_MODE_OFF,
 	MOD_HDCP_MODE_DEFAULT,
-	MOD_HDCP_MODE_DP,
-	MOD_HDCP_MODE_DP_MST
+	MOD_HDCP_MODE_DP
 };
 
 enum mod_hdcp_display_state {
 	MOD_HDCP_DISPLAY_INACTIVE = 0,
 	MOD_HDCP_DISPLAY_ACTIVE,
-	MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
 	MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
 };
 
@@ -155,7 +156,8 @@ struct mod_hdcp_display_adjustment {
 struct mod_hdcp_link_adjustment_hdcp1 {
 	uint8_t disable			: 1;
 	uint8_t postpone_encryption	: 1;
-	uint8_t reserved		: 6;
+	uint8_t min_auth_retries_wa : 1;
+	uint8_t reserved		: 5;
 };
 
 enum mod_hdcp_force_hdcp_type {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 42cbeff..13c57ff 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -34,8 +34,7 @@ struct dc_info_packet;
 struct mod_vrr_params;
 
 void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
-		struct dc_info_packet *info_packet,
-		bool *use_vsc_sdp_for_colorimetry);
+		struct dc_info_packet *info_packet);
 
 void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
 		struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 6a8a056..cff3ab1 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -130,8 +130,7 @@ enum ColorimetryYCCDP {
 };
 
 void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
-		struct dc_info_packet *info_packet,
-		bool *use_vsc_sdp_for_colorimetry)
+		struct dc_info_packet *info_packet)
 {
 	unsigned int vsc_packet_revision = vsc_packet_undefined;
 	unsigned int i;
@@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 	unsigned int colorimetryFormat = 0;
 	bool stereo3dSupport = false;
 
-	/* Initialize first, later if infopacket is valid determine if VSC SDP
-	 * should be used to signal colorimetry format and pixel encoding.
-	 */
-	*use_vsc_sdp_for_colorimetry = false;
-
 	if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
 		vsc_packet_revision = vsc_packet_rev1;
 		stereo3dSupport = true;
@@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 	if (stream->psr_version != 0)
 		vsc_packet_revision = vsc_packet_rev2;
 
-	/* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
-	if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
-			stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+	/* Update to revision 5 for extended colorimetry support */
+	if (stream->use_vsc_sdp_for_colorimetry)
 		vsc_packet_revision = vsc_packet_rev5;
 
 	/* VSC packet not needed based on the features
@@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 
 		info_packet->valid = true;
 
-		/* If we are using VSC SDP revision 05h, use this to signal for
-		 * colorimetry format and pixel encoding. HW should later be
-		 * programmed to set MSA MISC1 bit 6 to indicate ignore
-		 * colorimetry format and pixel encoding in the MSA.
-		 */
-		*use_vsc_sdp_for_colorimetry = true;
-
 		/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
 		 * Data Bytes DB 18~16
 		 * Bits 3:0 (Colorimetry Format)        |  Bits 7:4 (Pixel Encoding)
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index f0a1537..00f132f 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -40,14 +40,18 @@ struct core_vmid {
 
 static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb)
 {
-	core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
-	core_vmid->num_vmids_available--;
+	if (vmid < MAX_VMID) {
+		core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
+		core_vmid->num_vmids_available--;
+	}
 }
 
 static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid)
 {
-	core_vmid->ptb_assigned_to_vmid[vmid] = 0;
-	core_vmid->num_vmids_available++;
+	if (vmid < MAX_VMID) {
+		core_vmid->ptb_assigned_to_vmid[vmid] = 0;
+		core_vmid->num_vmids_available++;
+	}
 }
 
 static void evict_vmids(struct core_vmid *core_vmid)
@@ -57,7 +61,7 @@ static void evict_vmids(struct core_vmid *core_vmid)
 
 	// At this point any positions with value 0 are unused vmids, evict them
 	for (i = 1; i < core_vmid->num_vmid; i++) {
-		if (ord & (1u << i))
+		if (!(ord & (1u << i)))
 			clear_entry_from_vmid_table(core_vmid, i);
 	}
 }
@@ -91,7 +95,7 @@ static int get_next_available_vmid(struct core_vmid *core_vmid)
 uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
 {
 	struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
-	unsigned int vmid = 0;
+	int vmid = 0;
 
 	// Physical address gets vmid 0
 	if (ptb == 0)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
new file mode 100644
index 0000000..82b6cc2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SH_MASK_HEADER
+#define _wafl2_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT						0x0
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT						0x1
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT							0x5
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT						0x6
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT						0x7
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT					0x8
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT						0x9
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT					0xa
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT					0xb
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT					0xc
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT						0xd
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT					0xf
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT						0x10
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT					0x11
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT					0x12
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT					0x13
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT					0x14
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT					0x15
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT						0x17
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT						0x18
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK						0x00000001L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK						0x00000002L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK							0x00000020L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK						0x00000040L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK						0x00000080L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK						0x00000100L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK						0x00000200L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK					0x00000400L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK					0x00000800L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK					0x00001000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK							0x00002000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK					0x00008000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK						0x00010000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK						0x00020000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK					0x00040000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK					0x00080000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK						0x00100000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK					0x00200000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK						0x00800000L
+#define	PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK						0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
new file mode 100644
index 0000000..4a51a90
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SMN_HEADER
+#define _wafl2_4_0_0_SMN_HEADER
+
+#define	smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS						0x11cf0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
new file mode 100644
index 0000000..f37712f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SH_MASK_HEADER
+#define _xgmi_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX16_PCS_ERROR_STATUS
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT								0x0
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT								0x1
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT								0x5
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT							0x6
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT								0x7
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT							0x8
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT								0x9
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT							0xa
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT						0xb
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT						0xc
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT								0xd
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT							0xf
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT							0x10
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT							0x11
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT							0x12
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT							0x13
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT							0x14
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT						0x15
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT								0x17
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT							0x18
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK								0x00000001L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK								0x00000002L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK									0x00000020L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK								0x00000040L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK								0x00000080L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK							0x00000100L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK								0x00000200L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK							0x00000400L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK							0x00000800L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK							0x00001000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK								0x00002000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK							0x00008000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK							0x00010000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK							0x00020000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK							0x00040000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK							0x00080000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK							0x00100000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK						0x00200000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK								0x00800000L
+#define	XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK								0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
new file mode 100644
index 0000000..6ccbac4
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SMN_HEADER
+#define _xgmi_4_0_0_SMN_HEADER
+
+#define	smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS				0x11af0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a607b10..a3c238c 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
 	uint32_t num_queue_per_pipe;
 
 	/* Bit n == 1 means Queue n is available for KFD */
-	DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+	DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
 
 	/* SDMA doorbell assignments (SOC15 and later chips only). Only
 	 * specific doorbells are routed to each SDMA engine. Others
@@ -151,6 +151,7 @@ struct kgd2kfd_shared_resources {
 
 	/* Minor device number of the render node */
 	int drm_render_minor;
+
 };
 
 struct tile_config {
@@ -166,27 +167,6 @@ struct tile_config {
 
 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
 
-/*
- * Allocation flag domains
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_VRAM		(1 << 0)
-#define ALLOC_MEM_FLAGS_GTT		(1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
-#define ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
-#define ALLOC_MEM_FLAGS_MMIO_REMAP	(1 << 4)
-
-/*
- * Allocation flags attributes/access options.
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
-#define ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
-#define ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28) /* TODO */
-#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
-#define ALLOC_MEM_FLAGS_COHERENT	(1 << 26) /* For GFXv9 or later */
-
 /**
  * struct kfd2kgd_calls
  *
@@ -222,8 +202,6 @@ struct tile_config {
  * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
  * Only used for no cp scheduling mode
  *
- * @get_tile_config: Returns GPU-specific tiling mode information
- *
  * @set_vm_context_page_table_base: Program page table base for a VMID
  *
  * @invalidate_tlbs: Invalidate TLBs for a specific PASID
@@ -236,6 +214,8 @@ struct tile_config {
  *
  * @get_hive_id: Returns hive id of current  device,  0 if xgmi is not enabled
  *
+ * @get_unique_id: Returns uuid id of current  device
+ * 
  * This structure contains function pointers to services that the kgd driver
  * provides to amdkfd driver.
  *
@@ -307,12 +287,11 @@ struct kfd2kgd_calls {
 	void (*set_scratch_backing_va)(struct kgd_dev *kgd,
 				uint64_t va, uint32_t vmid);
 
-	int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
-
 	void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
 			uint32_t vmid, uint64_t page_table_base);
 	uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
 	uint64_t (*get_hive_id)(struct kgd_dev *kgd);
+	uint64_t (*get_unique_id)(struct kgd_dev *kgd);
 
 };
 
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 96e81c7..e8b27fa 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
 #include <linux/firmware.h>
 #include <linux/pci.h>
 
-#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
 #include "smu_v11_0.h"
 #include "smu_v12_0.h"
 #include "atom.h"
-#include "amd_pcie.h"
 #include "vega20_ppt.h"
 #include "arcturus_ppt.h"
 #include "navi10_ppt.h"
@@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
 
 	if (enabled) {
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
-						  feature_low);
+						  feature_low, NULL);
 		if (ret)
 			return ret;
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
-						  feature_high);
+						  feature_high, NULL);
 		if (ret)
 			return ret;
 	} else {
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
-						  feature_low);
+						  feature_low, NULL);
 		if (ret)
 			return ret;
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
-						  feature_high);
+						  feature_high, NULL);
 		if (ret)
 			return ret;
 	}
@@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
 		return -EINVAL;
 
 	if (if_version) {
-		ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
-		if (ret)
-			return ret;
-
-		ret = smu_read_smc_arg(smu, if_version);
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
 		if (ret)
 			return ret;
 	}
 
 	if (smu_version) {
-		ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
-		if (ret)
-			return ret;
-
-		ret = smu_read_smc_arg(smu, smu_version);
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
 		if (ret)
 			return ret;
 	}
@@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
 }
 
 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t min, uint32_t max)
+			    uint32_t min, uint32_t max, bool lock_needed)
 {
 	int ret = 0;
 
-	if (min < 0 && max < 0)
-		return -EINVAL;
-
 	if (!smu_clk_dpm_is_enabled(smu, clk_type))
 		return 0;
 
+	if (lock_needed)
+		mutex_lock(&smu->mutex);
 	ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
+	if (lock_needed)
+		mutex_unlock(&smu->mutex);
+
 	return ret;
 }
 
@@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 	if (max > 0) {
 		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
-						  param);
+						  param, NULL);
 		if (ret)
 			return ret;
 	}
@@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 	if (min > 0) {
 		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
-						  param);
+						  param, NULL);
 		if (ret)
 			return ret;
 	}
@@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
 
 	param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
 
-	ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
-					  param);
-	if (ret)
-		return ret;
-
-	ret = smu_read_smc_arg(smu, &param);
+	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
+					  param, &param);
 	if (ret)
 		return ret;
 
@@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
 	ret = smu_send_smc_msg_with_param(smu, drv2smu ?
 					  SMU_MSG_TransferTableDram2Smu :
 					  SMU_MSG_TransferTableSmu2Dram,
-					  table_id | ((argument & 0xFFFF) << 16));
+					  table_id | ((argument & 0xFFFF) << 16),
+					  NULL);
 	if (ret)
 		return ret;
 
@@ -900,6 +888,7 @@ static int smu_sw_init(void *handle)
 
 	mutex_init(&smu->sensor_lock);
 	mutex_init(&smu->metrics_lock);
+	mutex_init(&smu->message_lock);
 
 	smu->watermarks_bitmap = 0;
 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -943,6 +932,13 @@ static int smu_sw_init(void *handle)
 		return ret;
 	}
 
+	if (adev->smu.ppt_funcs->i2c_eeprom_init) {
+		ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+
+		if (ret)
+			return ret;
+	}
+
 	return 0;
 }
 
@@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle)
 	struct smu_context *smu = &adev->smu;
 	int ret;
 
+	if (adev->smu.ppt_funcs->i2c_eeprom_fini)
+		smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
 	kfree(smu->irq_source);
 	smu->irq_source = NULL;
 
@@ -1113,12 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 			return ret;
 	}
 
+	ret = smu_set_driver_table_location(smu);
+	if (ret)
+		return ret;
+
 	/* smu_dump_pptable(smu); */
 	if (!amdgpu_sriov_vf(adev)) {
-		ret = smu_set_driver_table_location(smu);
-		if (ret)
-			return ret;
-
 		/*
 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
 		 * SetDriverDramAddr and TransferTableDram2Smu.
@@ -1155,6 +1154,21 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
 				}
 			}
 		}
+
+		if (smu->ppt_funcs->set_power_source) {
+			/*
+			 * For Navi1X, manually switch it to AC mode as PMFW
+			 * may boot it with DC mode.
+			 */
+			if (adev->pm.ac_power)
+				ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+			else
+				ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+			if (ret) {
+				pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? "AC" : "DC");
+				return ret;
+			}
+		}
 	}
 	if (adev->asic_type != CHIP_ARCTURUS) {
 		ret = smu_notify_display_change(smu);
@@ -1454,29 +1468,84 @@ int smu_reset(struct smu_context *smu)
 	return ret;
 }
 
+static int smu_disable_dpm(struct smu_context *smu)
+{
+	struct amdgpu_device *adev = smu->adev;
+	uint32_t smu_version;
+	int ret = 0;
+	bool use_baco = !smu->is_apu &&
+		((adev->in_gpu_reset &&
+		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+		 (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+
+	ret = smu_get_smc_version(smu, NULL, &smu_version);
+	if (ret) {
+		pr_err("Failed to get smu version.\n");
+		return ret;
+	}
+
+	/*
+	 * Disable all enabled SMU features.
+	 * This should be handled in SMU FW, as a backup
+	 * driver can issue call to SMU FW until sequence
+	 * in SMU FW is operational.
+	 */
+	ret = smu_system_features_control(smu, false);
+	if (ret) {
+		pr_err("Failed to disable smu features.\n");
+		return ret;
+	}
+
+	/*
+	 * Arcturus does not have BACO bit in disable feature mask.
+	 * Enablement of BACO bit on Arcturus should be skipped.
+	 */
+	if (adev->asic_type == CHIP_ARCTURUS) {
+		if (use_baco && (smu_version > 0x360e00))
+			return 0;
+	}
+
+	/* For baco, need to leave BACO feature enabled */
+	if (use_baco) {
+		/*
+		 * Correct the way for checking whether SMU_FEATURE_BACO_BIT
+		 * is supported.
+		 *
+		 * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
+		 * always return false as the 'smu_system_features_control(smu, false)'
+		 * was just issued above which disabled all SMU features.
+		 *
+		 * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
+		 * now for the checking.
+		 */
+		if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
+			ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+			if (ret) {
+				pr_warn("set BACO feature enabled failed, return %d\n", ret);
+				return ret;
+			}
+		}
+	}
+
+	return ret;
+}
+
 static int smu_suspend(void *handle)
 {
-	int ret;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 	struct smu_context *smu = &adev->smu;
-	bool baco_feature_is_enabled = false;
+	int ret;
+
+	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
+		return 0;
 
 	if (!smu->pm_enabled)
 		return 0;
 
-	if(!smu->is_apu)
-		baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
-
-	ret = smu_system_features_control(smu, false);
-	if (ret)
-		return ret;
-
-	if (baco_feature_is_enabled) {
-		ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
-		if (ret) {
-			pr_warn("set BACO feature enabled failed, return %d\n", ret);
+	if(!amdgpu_sriov_vf(adev)) {
+		ret = smu_disable_dpm(smu);
+		if (ret)
 			return ret;
-		}
 	}
 
 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
@@ -1942,7 +2011,7 @@ int smu_set_mp1_state(struct smu_context *smu,
 		return 0;
 	}
 
-	ret = smu_send_smc_msg(smu, msg);
+	ret = smu_send_smc_msg(smu, msg, NULL);
 	if (ret)
 		pr_err("[PrepareMp1] Failed!\n");
 
@@ -2018,6 +2087,29 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
 	return 0;
 }
 
+int smu_set_ac_dc(struct smu_context *smu)
+{
+	int ret = 0;
+
+	/* controlled by firmware */
+	if (smu->dc_controlled_by_gpio)
+		return 0;
+
+	mutex_lock(&smu->mutex);
+	if (smu->ppt_funcs->set_power_source) {
+		if (smu->adev->pm.ac_power)
+			ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
+		else
+			ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
+		if (ret)
+			pr_err("Failed to switch to %s mode!\n",
+			       smu->adev->pm.ac_power ? "AC" : "DC");
+	}
+	mutex_unlock(&smu->mutex);
+
+	return ret;
+}
+
 const struct amd_ip_funcs smu_ip_funcs = {
 	.name = "smu",
 	.early_init = smu_early_init,
@@ -2620,12 +2712,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
 
 	return ret;
 }
-
-int smu_send_smc_msg(struct smu_context *smu,
-		     enum smu_message_type msg)
-{
-	int ret;
-
-	ret = smu_send_smc_msg_with_param(smu, msg, 0);
-	return ret;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 14ba6aa..c6d3bef 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "pp_debug.h"
 #include <linux/firmware.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
@@ -42,7 +41,7 @@
 #include <linux/pci.h>
 #include "amdgpu_ras.h"
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
 
 #define CTF_OFFSET_EDGE			5
 #define CTF_OFFSET_HOTSPOT		5
@@ -127,6 +126,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
 	MSG_MAP(WaflTest,			     PPSMC_MSG_WaflTest),
 	MSG_MAP(SetXgmiMode,			     PPSMC_MSG_SetXgmiMode),
 	MSG_MAP(SetMemoryChannelEnable,		     PPSMC_MSG_SetMemoryChannelEnable),
+	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl),
 };
 
 static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -373,13 +373,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
 
 	ret = smu_send_smc_msg_with_param(smu,
 			SMU_MSG_GetDpmFreqByIndex,
-			(clk_id << 16 | 0xFF));
+			(clk_id << 16 | 0xFF),
+			&num_of_levels);
 	if (ret) {
 		pr_err("[%s] failed to get dpm levels!\n", __func__);
 		return ret;
 	}
 
-	smu_read_smc_arg(smu, &num_of_levels);
 	if (!num_of_levels) {
 		pr_err("[%s] number of clk levels is invalid!\n", __func__);
 		return -EINVAL;
@@ -389,12 +389,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
 	for (i = 0; i < num_of_levels; i++) {
 		ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_GetDpmFreqByIndex,
-				(clk_id << 16 | i));
+				(clk_id << 16 | i),
+				&clk);
 		if (ret) {
 			pr_err("[%s] failed to get dpm freq by index!\n", __func__);
 			return ret;
 		}
-		smu_read_smc_arg(smu, &clk);
 		if (!clk) {
 			pr_err("[%s] clk value is invalid!\n", __func__);
 			return -EINVAL;
@@ -552,13 +552,13 @@ static int arcturus_run_btc(struct smu_context *smu)
 {
 	int ret = 0;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+	ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
 	if (ret) {
 		pr_err("RunAfllBtc failed!\n");
 		return ret;
 	}
 
-	return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
+	return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
 }
 
 static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -743,7 +743,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_GFXCLK << 16) | (freq & 0xffff));
+			(PPCLK_GFXCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s gfxclk !\n",
 						max ? "max" : "min");
@@ -758,7 +759,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_UCLK << 16) | (freq & 0xffff));
+			(PPCLK_UCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s memclk !\n",
 						max ? "max" : "min");
@@ -773,7 +775,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_SOCCLK << 16) | (freq & 0xffff));
+			(PPCLK_SOCCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s socclk !\n",
 						max ? "max" : "min");
@@ -1288,12 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
 				return -EINVAL;
 
 			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
-				power_src << 16);
+				power_src << 16, &asic_default_power_limit);
 			if (ret) {
 				pr_err("[%s] get PPT limit failed!", __func__);
 				return ret;
 			}
-			smu_read_smc_arg(smu, &asic_default_power_limit);
 		} else {
 			/* the last hope to figure out the ppt limit */
 			if (!pptable) {
@@ -1497,7 +1499,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
 
 	ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_SetWorkloadMask,
-					  1 << workload_type);
+					  1 << workload_type,
+					  NULL);
 	if (ret) {
 		pr_err("Fail to set workload type %d\n", workload_type);
 		return ret;
@@ -2187,7 +2190,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
 	control->class = I2C_CLASS_SPD;
 	control->dev.parent = &adev->pdev->dev;
 	control->algo = &arcturus_i2c_eeprom_i2c_algo;
-	snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+	snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
 
 	res = i2c_add_adapter(control);
 	if (res)
@@ -2214,6 +2217,27 @@ static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
 	return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
 }
 
+static int arcturus_set_df_cstate(struct smu_context *smu,
+				  enum pp_df_cstate state)
+{
+	uint32_t smu_version;
+	int ret;
+
+	ret = smu_get_smc_version(smu, NULL, &smu_version);
+	if (ret) {
+		pr_err("Failed to get smu version!\n");
+		return ret;
+	}
+
+	/* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */
+	if (smu_version < 0x360F00) {
+		pr_err("DFCstateControl is only supported by PMFW 54.15.0 and onwards\n");
+		return -EINVAL;
+	}
+
+	return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
 	/* translate smu index into arcturus specific index */
 	.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2277,7 +2301,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
 	.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
-	.read_smc_arg = smu_v11_0_read_arg,
 	.init_display_count = smu_v11_0_init_display_count,
 	.set_allowed_mask = smu_v11_0_set_allowed_mask,
 	.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2307,6 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
 	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
 	.get_pptable_power_limit = arcturus_get_pptable_power_limit,
+	.set_df_cstate = arcturus_set_df_cstate,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index bf04cfe..7740488 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
 	switch (sources) {
 	default:
 		pr_err("Unknown throttling event sources.");
-		/* fall through */
+		fallthrough;
 	case 0:
 		protection = false;
 		/* src is unused */
@@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change(
 			data->force_pcie_gen = PP_PCIEGen2;
 			if (current_link_speed == PP_PCIEGen2)
 				break;
-			/* fall through */
+			fallthrough;
 		case PP_PCIEGen2:
 			if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
 				break;
+			fallthrough;
 #endif
-			/* fall through */
 		default:
 			data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
 			break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 92a65e3d..f29f95b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3382,7 +3382,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
 	}
 
 	if (data->need_update_dpm_table &
-			(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+			(DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) {
 		result = vega10_populate_all_graphic_levels(hwmgr);
 		PP_ASSERT_WITH_CODE((0 == result),
 				"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -3390,7 +3390,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
 	}
 
 	if (data->need_update_dpm_table &
-			(DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+			(DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
 		result = vega10_populate_all_memory_levels(hwmgr);
 		PP_ASSERT_WITH_CODE((0 == result),
 				"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b3ec56..08b6ba3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -487,15 +487,16 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
 	int ret = 0;
+	bool use_baco = (adev->in_gpu_reset &&
+			 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+		(adev->in_runpm && amdgpu_asic_supports_baco(adev));
 
 	ret = vega20_init_sclk_threshold(hwmgr);
 	PP_ASSERT_WITH_CODE(!ret,
 			"Failed to init sclk threshold!",
 			return ret);
 
-	if (adev->in_gpu_reset &&
-	    (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
-
+	if (use_baco) {
 		ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
 		if (ret)
 			pr_err("Failed to apply vega20 baco workaround!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 97b6714..ae2c318 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -362,6 +362,7 @@ struct smu_context
 	struct mutex			mutex;
 	struct mutex			sensor_lock;
 	struct mutex			metrics_lock;
+	struct mutex			message_lock;
 	uint64_t pool_size;
 
 	struct smu_table_context	smu_table;
@@ -371,6 +372,9 @@ struct smu_context
 	struct amd_pp_display_configuration  *display_config;
 	struct smu_baco_context		smu_baco;
 	void *od_settings;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry                   *debugfs_sclk;
+#endif
 
 	uint32_t pstate_sclk;
 	uint32_t pstate_mclk;
@@ -404,6 +408,7 @@ struct smu_context
 	uint32_t smc_if_version;
 
 	bool uploading_custom_pp_table;
+	bool dc_controlled_by_gpio;
 };
 
 struct i2c_adapter;
@@ -514,8 +519,7 @@ struct pptable_funcs {
 	int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
 	int (*system_features_control)(struct smu_context *smu, bool en);
 	int (*send_smc_msg_with_param)(struct smu_context *smu,
-				       enum smu_message_type msg, uint32_t param);
-	int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+				       enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
 	int (*init_display_count)(struct smu_context *smu, uint32_t count);
 	int (*set_allowed_mask)(struct smu_context *smu);
 	int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
@@ -567,6 +571,7 @@ struct pptable_funcs {
 	int (*override_pcie_parameters)(struct smu_context *smu);
 	uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
 	int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu);
+	int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src);
 };
 
 int smu_load_microcode(struct smu_context *smu);
@@ -707,7 +712,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			   uint32_t *min, uint32_t *max, bool lock_needed);
 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
-			    uint32_t min, uint32_t max);
+			    uint32_t min, uint32_t max, bool lock_needed);
 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
 			    uint32_t min, uint32_t max);
 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -715,6 +720,7 @@ int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
 int smu_set_display_count(struct smu_context *smu, uint32_t count);
+int smu_set_ac_dc(struct smu_context *smu);
 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e329125..f736d77 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -110,7 +110,11 @@
 //Others
 #define PPSMC_MSG_SetMemoryChannelEnable         0x39
 
-#define PPSMC_Message_Count                      0x3A
+//OOB
+#define PPSMC_MSG_SetNumBadHbmPagesRetired	 0x3A
+
+#define PPSMC_MSG_DFCstateControl		 0x3B
+#define PPSMC_Message_Count			 0x3C
 
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index 822cd8b..cea6509 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -37,7 +37,7 @@
 #define PP_ASSERT_WITH_CODE(cond, msg, code)	\
 	do {					\
 		if (!(cond)) {			\
-			pr_warn("%s\n", msg);	\
+			pr_warn_ratelimited("%s\n", msg);	\
 			code;			\
 		}				\
 	} while (0)
@@ -45,7 +45,7 @@
 #define PP_ASSERT(cond, msg)	\
 	do {					\
 		if (!(cond)) {			\
-			pr_warn("%s\n", msg);	\
+			pr_warn_ratelimited("%s\n", msg);	\
 		}				\
 	} while (0)
 
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index ac0120e..4b2da98 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -701,7 +701,8 @@ typedef struct {
   // APCC Settings
   uint16_t     PccThresholdLow;
   uint16_t     PccThresholdHigh;
-  uint32_t     PaddingAPCC[6];  //FIXME pending SPEC
+  uint32_t     MGpuFanBoostLimitRpm;
+  uint32_t     PaddingAPCC[5];
 
   // Temperature Dependent Vmin
   uint16_t     VDDGFX_TVmin;       //Celcius
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index d5314d1..674e426 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -28,8 +28,9 @@
 #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU11_DRIVER_IF_VERSION_VG20 0x13
 #define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x33
-#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_NV12 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x36
 
 /* MP Apertures */
 #define MP0_Public			0x03800000
@@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
 int
 smu_v11_0_send_msg_with_param(struct smu_context *smu,
 			      enum smu_message_type msg,
-			      uint32_t param);
-
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+			      uint32_t param,
+			      uint32_t *read_arg);
 
 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
 
@@ -267,4 +267,7 @@ uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
 int smu_v11_0_set_performance_level(struct smu_context *smu,
 				    enum amd_dpm_forced_level level);
 
+int smu_v11_0_set_power_source(struct smu_context *smu,
+			       enum smu_power_src_type power_src);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index d79e54b..7fbebc1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping {
 int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
 					      uint16_t msg);
 
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
-
 int smu_v12_0_wait_for_response(struct smu_context *smu);
 
 int
 smu_v12_0_send_msg_with_param(struct smu_context *smu,
 			      enum smu_message_type msg,
-			      uint32_t param);
+			      uint32_t param,
+			      uint32_t *read_arg);
 
 int smu_v12_0_check_fw_status(struct smu_context *smu);
 
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index aed4d6e..9c60b38a 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "pp_debug.h"
 #include <linux/firmware.h>
 #include <linux/pci.h>
 #include "amdgpu.h"
@@ -31,7 +30,6 @@
 #include "amdgpu_atomfirmware.h"
 #include "smu_v11_0.h"
 #include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
 #include "atom.h"
 #include "navi10_ppt.h"
 #include "smu_v11_0_pptable.h"
@@ -349,7 +347,6 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
 				| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
 				| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
 				| FEATURE_MASK(FEATURE_BACO_BIT)
-				| FEATURE_MASK(FEATURE_ACDC_BIT)
 				| FEATURE_MASK(FEATURE_GFX_SS_BIT)
 				| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
 				| FEATURE_MASK(FEATURE_FW_CTF_BIT)
@@ -393,6 +390,9 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
 
+	if (smu->dc_controlled_by_gpio)
+		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
+
 	/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
 	if (is_asic_secure(smu)) {
 		/* only for navi10 A0 */
@@ -527,6 +527,9 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
 
 	table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
 
+	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
+		smu->dc_controlled_by_gpio = true;
+
 	mutex_lock(&smu_baco->mutex);
 	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
 	    powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
@@ -661,14 +664,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 	if (enable) {
 		/* vcn dpm on is a prerequisite for vcn power gate messages */
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
 			if (ret)
 				return ret;
 		}
 		power_gate->vcn_gated = false;
 	} else {
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
 			if (ret)
 				return ret;
 		}
@@ -686,14 +689,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 	if (enable) {
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-			ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+			ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
 			if (ret)
 				return ret;
 		}
 		power_gate->jpeg_gated = false;
 	} else {
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
 			if (ret)
 				return ret;
 		}
@@ -970,7 +973,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
 		if (ret)
 			return size;
 
-		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
 		if (ret)
 			return size;
 		break;
@@ -1042,7 +1045,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
 	int ret = 0;
 	uint32_t max_freq = 0;
 
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
 	if (ret)
 		return ret;
 
@@ -1066,7 +1069,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
 	    smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
 	    smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
-						  smu->display_config->num_display);
+						  smu->display_config->num_display,
+						  NULL);
 		if (ret)
 			return ret;
 	}
@@ -1093,7 +1097,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
 			return ret;
 
 		force_freq = highest ? max_freq : min_freq;
-		ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+		ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
 		if (ret)
 			return ret;
 	}
@@ -1119,7 +1123,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
 		if (ret)
 			return ret;
 
-		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
 		if (ret)
 			return ret;
 	}
@@ -1391,7 +1395,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
 	if (workload_type < 0)
 		return -EINVAL;
 	smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
-				    1 << workload_type);
+				    1 << workload_type, NULL);
 
 	return ret;
 }
@@ -1456,7 +1460,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
 			if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
 				ret = smu_send_smc_msg_with_param(smu,
 								  SMU_MSG_SetMinDeepSleepDcefclk,
-								  min_clocks.dcef_clock_in_sr/100);
+								  min_clocks.dcef_clock_in_sr/100,
+								  NULL);
 				if (ret) {
 					pr_err("Attempt to set divider for DCEFCLK Failed!");
 					return ret;
@@ -1678,10 +1683,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu)
 		return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
 	}
 
-	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
 	if (ret)
 		return ret;
-	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
 	if (ret)
 		return ret;
 
@@ -1746,10 +1751,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
 	if (ret)
 		return ret;
 
-	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
 	if (ret)
 		return ret;
-	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
 	if (ret)
 		return ret;
 
@@ -1859,12 +1864,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
 				return -EINVAL;
 
 			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
-				power_src << 16);
+				power_src << 16, &asic_default_power_limit);
 			if (ret) {
 				pr_err("[%s] get PPT limit failed!", __func__);
 				return ret;
 			}
-			smu_read_smc_arg(smu, &asic_default_power_limit);
 		} else {
 			/* the last hope to figure out the ppt limit */
 			if (!pptable) {
@@ -1904,7 +1908,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
 					pptable->PcieLaneCount[i] : pcie_width_cap);
 		ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_OverridePcieParameters,
-					  smu_pcie_arg);
+					  smu_pcie_arg,
+					  NULL);
 
 		if (ret)
 			return ret;
@@ -1950,13 +1955,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
 
 	ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_GetVoltageByDpm,
-					  param);
+					  param,
+					  &value);
 	if (ret) {
 		pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
 		return ret;
 	}
 
-	smu_read_smc_arg(smu, &value);
 	*voltage = (uint16_t)value;
 
 	return 0;
@@ -2213,7 +2218,7 @@ static int navi10_run_btc(struct smu_context *smu)
 {
 	int ret = 0;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+	ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
 	if (ret)
 		pr_err("RunBtc failed!\n");
 
@@ -2225,9 +2230,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
 	int result = 0;
 
 	if (!enable)
-		result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+		result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
 	else
-		result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+		result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
 
 	return result;
 }
@@ -2336,7 +2341,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
 	.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
-	.read_smc_arg = smu_v11_0_read_arg,
 	.init_display_count = smu_v11_0_init_display_count,
 	.set_allowed_mask = smu_v11_0_set_allowed_mask,
 	.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2370,6 +2374,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
 	.get_pptable_power_limit = navi10_get_pptable_power_limit,
 	.run_btc = navi10_run_btc,
 	.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
+	.set_power_source = smu_v11_0_set_power_source,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 3ad0f4a..7bf52ec 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
-#include "soc15_common.h"
 #include "smu_v12_0_ppsmc.h"
 #include "smu12_driver_if.h"
 #include "smu_v12_0.h"
@@ -342,14 +341,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 	if (enable) {
 		/* vcn dpm on is a prerequisite for vcn power gate messages */
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
 			if (ret)
 				return ret;
 		}
 		power_gate->vcn_gated = false;
 	} else {
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
 			if (ret)
 				return ret;
 		}
@@ -367,14 +366,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 	if (enable) {
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
 			if (ret)
 				return ret;
 		}
 		power_gate->jpeg_gated = false;
 	} else {
 		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
 			if (ret)
 				return ret;
 		}
@@ -423,7 +422,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
 			return ret;
 
 		force_freq = highest ? max_freq : min_freq;
-		ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+		ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
 		if (ret)
 			return ret;
 	}
@@ -456,7 +455,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
 		if (ret)
 			return ret;
 
-		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
 		if (ret)
 			return ret;
 	}
@@ -622,22 +621,24 @@ static int renoir_force_clk_levels(struct smu_context *smu,
 			return ret;
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
 					soft_max_level == 0 ? min_freq :
-					soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+					soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
+					NULL);
 		if (ret)
 			return ret;
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
 					soft_min_level == 2 ? max_freq :
-					soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+					soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
+					NULL);
 		if (ret)
 			return ret;
 		break;
 	case SMU_SOCCLK:
 		GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
 		GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
 		if (ret)
 			return ret;
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
 		if (ret)
 			return ret;
 		break;
@@ -645,10 +646,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
 	case SMU_FCLK:
 		GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
 		GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
 		if (ret)
 			return ret;
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
 		if (ret)
 			return ret;
 		break;
@@ -672,14 +673,19 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
 	workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
 	if (workload_type < 0) {
-		pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+		/*
+		 * TODO: If some case need switch to powersave/default power mode
+		 * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
+		 */
+		pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
 		return -EINVAL;
 	}
 
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
-				    1 << workload_type);
+				    1 << workload_type,
+				    NULL);
 	if (ret) {
-		pr_err("Fail to set workload type %d\n", workload_type);
+		pr_err_once("Fail to set workload type %d\n", workload_type);
 		return ret;
 	}
 
@@ -697,7 +703,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
 	if (ret)
 		return ret;
 
-	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
 	if (ret)
 		return ret;
 
@@ -705,7 +711,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
 	if (ret)
 		return ret;
 
-	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
 	if (ret)
 		return ret;
 
@@ -910,7 +916,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
 	.powergate_vcn = smu_v12_0_powergate_vcn,
 	.powergate_jpeg = smu_v12_0_powergate_jpeg,
 	.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
-	.read_smc_arg = smu_v12_0_read_arg,
 	.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
 	.gfx_off_control = smu_v12_0_gfx_off_control,
 	.init_smc_tables = smu_v12_0_init_smc_tables,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 7bd200f..40c35bc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -79,12 +79,13 @@
 #define smu_set_default_od_settings(smu, initialize) \
 	((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
 
-int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \
+	((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0)
 
-#define smu_send_smc_msg_with_param(smu, msg, param) \
-	((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
-	((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
+static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) {
+	return smu_send_smc_msg_with_param(smu, msg, 0, read_arg);
+}
+
 #define smu_alloc_dpm_context(smu) \
 	((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
 #define smu_init_display_count(smu, count) \
@@ -210,4 +211,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
 #define smu_disable_umc_cdr_12gbps_workaround(smu) \
 	((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0)
 
+#define smu_set_power_source(smu, power_src) \
+	((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c9e5ce1..d19e1d0 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@
 
 #define SMU_11_0_PARTIAL_PPTABLE
 
-#include "pp_debug.h"
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "smu_internal.h"
@@ -64,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
 	return 0;
 }
 
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
 {
 	struct amdgpu_device *adev = smu->adev;
 
@@ -92,7 +91,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
 int
 smu_v11_0_send_msg_with_param(struct smu_context *smu,
 			      enum smu_message_type msg,
-			      uint32_t param)
+			      uint32_t param,
+			      uint32_t *read_arg)
 {
 	struct amdgpu_device *adev = smu->adev;
 	int ret = 0, index = 0;
@@ -101,11 +101,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
 	if (index < 0)
 		return index;
 
+	mutex_lock(&smu->message_lock);
 	ret = smu_v11_0_wait_for_response(smu);
 	if (ret) {
 		pr_err("Msg issuing pre-check failed and "
 		       "SMU may be not in the right state!\n");
-		return ret;
+		goto out;
 	}
 
 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -115,10 +116,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
 	smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
 
 	ret = smu_v11_0_wait_for_response(smu);
-	if (ret)
+	if (ret) {
 		pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
 		       smu_get_message_name(smu, msg), index, param, ret);
-
+		goto out;
+	}
+	if (read_arg) {
+		ret = smu_v11_0_read_arg(smu, read_arg);
+		if (ret) {
+			pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
+			       smu_get_message_name(smu, msg), index, param, ret);
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&smu->message_lock);
 	return ret;
 }
 
@@ -262,6 +274,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
 	case CHIP_NAVI10:
 		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
 		break;
+	case CHIP_NAVI12:
+		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+		break;
 	case CHIP_NAVI14:
 		smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
 		break;
@@ -671,12 +686,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
 
 	ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_SetSystemVirtualDramAddrHigh,
-					  address_high);
+					  address_high,
+					  NULL);
 	if (ret)
 		return ret;
 	ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_SetSystemVirtualDramAddrLow,
-					  address_low);
+					  address_low,
+					  NULL);
 	if (ret)
 		return ret;
 
@@ -685,15 +702,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
 	address_low  = (uint32_t)lower_32_bits(address);
 
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
-					  address_high);
+					  address_high, NULL);
 	if (ret)
 		return ret;
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
-					  address_low);
+					  address_low, NULL);
 	if (ret)
 		return ret;
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
-					  (uint32_t)memory_pool->size);
+					  (uint32_t)memory_pool->size, NULL);
 	if (ret)
 		return ret;
 
@@ -757,7 +774,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
 	int ret;
 
 	ret = smu_send_smc_msg_with_param(smu,
-					  SMU_MSG_SetMinDeepSleepDcefclk, clk);
+					  SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
 	if (ret)
 		pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
 
@@ -784,11 +801,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
 	if (driver_table->mc_address) {
 		ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetDriverDramAddrHigh,
-				upper_32_bits(driver_table->mc_address));
+				upper_32_bits(driver_table->mc_address),
+				NULL);
 		if (!ret)
 			ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetDriverDramAddrLow,
-				lower_32_bits(driver_table->mc_address));
+				lower_32_bits(driver_table->mc_address),
+				NULL);
 	}
 
 	return ret;
@@ -802,11 +821,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
 	if (tool_table->mc_address) {
 		ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetToolsDramAddrHigh,
-				upper_32_bits(tool_table->mc_address));
+				upper_32_bits(tool_table->mc_address),
+				NULL);
 		if (!ret)
 			ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetToolsDramAddrLow,
-				lower_32_bits(tool_table->mc_address));
+				lower_32_bits(tool_table->mc_address),
+				NULL);
 	}
 
 	return ret;
@@ -819,7 +840,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
 	if (!smu->pm_enabled)
 		return ret;
 
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
+	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
 	return ret;
 }
 
@@ -837,12 +858,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
 	bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
 
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
-					  feature_mask[1]);
+					  feature_mask[1], NULL);
 	if (ret)
 		goto failed;
 
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
-					  feature_mask[0]);
+					  feature_mask[0], NULL);
 	if (ret)
 		goto failed;
 
@@ -862,17 +883,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
 		return -EINVAL;
 
 	if (bitmap_empty(feature->enabled, feature->feature_num)) {
-		ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
-		if (ret)
-			return ret;
-		ret = smu_read_smc_arg(smu, &feature_mask_high);
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
 		if (ret)
 			return ret;
 
-		ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
-		if (ret)
-			return ret;
-		ret = smu_read_smc_arg(smu, &feature_mask_low);
+		ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
 		if (ret)
 			return ret;
 
@@ -894,7 +909,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
 	int ret = 0;
 
 	ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
-				     SMU_MSG_DisableAllSmuFeatures));
+				     SMU_MSG_DisableAllSmuFeatures), NULL);
 	if (ret)
 		return ret;
 
@@ -923,7 +938,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
 		return ret;
 	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
 	    smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
 
 	return ret;
 }
@@ -947,30 +962,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
 		return -EINVAL;
 
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
-					  clk_id << 16);
+					  clk_id << 16, clock);
 	if (ret) {
 		pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
 		return ret;
 	}
 
-	ret = smu_read_smc_arg(smu, clock);
-	if (ret)
-		return ret;
-
 	if (*clock != 0)
 		return 0;
 
 	/* if DC limit is zero, return AC limit */
 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
-					  clk_id << 16);
+					  clk_id << 16, clock);
 	if (ret) {
 		pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
 		return ret;
 	}
 
-	ret = smu_read_smc_arg(smu, clock);
-
-	return ret;
+	return 0;
 }
 
 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
@@ -1106,7 +1115,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
 		return -EOPNOTSUPP;
 	}
 
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
 	if (ret) {
 		pr_err("[%s] Set power limit Failed!\n", __func__);
 		return ret;
@@ -1136,11 +1145,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
 		ret =  smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
 	else {
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
-						  (asic_clk_id << 16));
-		if (ret)
-			return ret;
-
-		ret = smu_read_smc_arg(smu, &freq);
+						  (asic_clk_id << 16), &freq);
 		if (ret)
 			return ret;
 	}
@@ -1375,9 +1380,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
 		if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
 			return 0;
 		if (enable)
-			ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+			ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
 		else
-			ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+			ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
 		break;
 	default:
 		break;
@@ -1515,10 +1520,18 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
 	int ret = 0;
 	ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_SetXgmiMode,
-					  pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+					  pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+					  NULL);
 	return ret;
 }
 
+static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
+{
+	return smu_send_smc_msg(smu,
+				SMU_MSG_ReenableAcDcInterrupt,
+				NULL);
+}
+
 #define THM_11_0__SRCID__THM_DIG_THERM_L2H		0		/* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
 #define THM_11_0__SRCID__THM_DIG_THERM_H2L		1		/* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
 
@@ -1552,6 +1565,9 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
 		break;
 
 		}
+	} else if (client_id == SOC15_IH_CLIENTID_MP1) {
+		if (src_id == 0xfe)
+			smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
 	}
 
 	return 0;
@@ -1591,6 +1607,12 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
 	if (ret)
 		return ret;
 
+	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
+				0xfe,
+				irq_src);
+	if (ret)
+		return ret;
+
 	return ret;
 }
 
@@ -1628,14 +1650,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
 {
 	int ret = 0;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
+	ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
 
 	return ret;
 }
 
 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
 {
-	return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
+	return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
 }
 
 bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1704,12 +1726,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
 			data |= 0x80000000;
 			WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
 
-			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
 		} else {
-			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
 		}
 	} else {
-		ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+		ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
 		if (ret)
 			goto out;
 
@@ -1777,19 +1799,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
 	param = (clk_id & 0xffff) << 16;
 
 	if (max) {
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
-		if (ret)
-			goto failed;
-		ret = smu_read_smc_arg(smu, max);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
 		if (ret)
 			goto failed;
 	}
 
 	if (min) {
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
-		if (ret)
-			goto failed;
-		ret = smu_read_smc_arg(smu, min);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
 		if (ret)
 			goto failed;
 	}
@@ -1811,7 +1827,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 	if (max > 0) {
 		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
-						  param);
+						  param, NULL);
 		if (ret)
 			return ret;
 	}
@@ -1819,7 +1835,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 	if (min > 0) {
 		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
-						  param);
+						  param, NULL);
 		if (ret)
 			return ret;
 	}
@@ -1939,3 +1955,18 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
 	return ret;
 }
 
+int smu_v11_0_set_power_source(struct smu_context *smu,
+			       enum smu_power_src_type power_src)
+{
+	int pwr_source;
+
+	pwr_source = smu_power_get_index(smu, (uint32_t)power_src);
+	if (pwr_source < 0)
+		return -EINVAL;
+
+	return smu_send_smc_msg_with_param(smu,
+					SMU_MSG_NotifyPowerSource,
+					pwr_source,
+					NULL);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 518e659..169ebda 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "pp_debug.h"
 #include <linux/firmware.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
@@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
 	return 0;
 }
 
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
 {
 	struct amdgpu_device *adev = smu->adev;
 
@@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
 int
 smu_v12_0_send_msg_with_param(struct smu_context *smu,
 			      enum smu_message_type msg,
-			      uint32_t param)
+			      uint32_t param,
+			      uint32_t *read_arg)
 {
 	struct amdgpu_device *adev = smu->adev;
 	int ret = 0, index = 0;
@@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
 	if (index < 0)
 		return index;
 
+	mutex_lock(&smu->message_lock);
 	ret = smu_v12_0_wait_for_response(smu);
 	if (ret) {
 		pr_err("Msg issuing pre-check failed and "
 		       "SMU may be not in the right state!\n");
-		return ret;
+		goto out;
 	}
 
 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
 	smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
 
 	ret = smu_v12_0_wait_for_response(smu);
-	if (ret)
+	if (ret) {
 		pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
 		       index, ret, param);
-
+		goto out;
+	}
+	if (read_arg) {
+		ret = smu_v12_0_read_arg(smu, read_arg);
+		if (ret) {
+			pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
+			       index, ret, param);
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&smu->message_lock);
 	return ret;
 }
 
@@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
 		return 0;
 
 	if (gate)
-		return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+		return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
 	else
-		return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+		return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
 }
 
 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
@@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
 		return 0;
 
 	if (gate)
-		return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+		return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
 	else
-		return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+		return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL);
 }
 
 int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
@@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
 		return 0;
 
 	if (gate)
-		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
 	else
-		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+		return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
 }
 
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
 		return 0;
 
 	return smu_v12_0_send_msg_with_param(smu,
-		SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+		SMU_MSG_SetGfxCGPG,
+		enable ? 1 : 0,
+		NULL);
 }
 
 int smu_v12_0_read_sensor(struct smu_context *smu,
@@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
 	int ret = 0, timeout = 500;
 
 	if (enable) {
-		ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+		ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
 
 	} else {
-		ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+		ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
 
 		/* confirm gfx is back to "on" state, timeout is 0.5 second */
 		while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu,
 	if (!feature_mask || num < 2)
 		return -EINVAL;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
-	if (ret)
-		return ret;
-	ret = smu_read_smc_arg(smu, &feature_mask_high);
+	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
 	if (ret)
 		return ret;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
-	if (ret)
-		return ret;
-	ret = smu_read_smc_arg(smu, &feature_mask_low);
+	ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
 	if (ret)
 		return ret;
 
@@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
 		switch (clk_type) {
 		case SMU_GFXCLK:
 		case SMU_SCLK:
-			ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+			ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
 			if (ret) {
 				pr_err("Attempt to get max GX frequency from SMC Failed !\n");
 				goto failed;
 			}
-			ret = smu_read_smc_arg(smu, max);
-			if (ret)
-				goto failed;
 			break;
 		case SMU_UCLK:
 		case SMU_FCLK:
@@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
 		switch (clk_type) {
 		case SMU_GFXCLK:
 		case SMU_SCLK:
-			ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+			ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
 			if (ret) {
 				pr_err("Attempt to get min GX frequency from SMC Failed !\n");
 				goto failed;
 			}
-			ret = smu_read_smc_arg(smu, min);
-			if (ret)
-				goto failed;
 			break;
 		case SMU_UCLK:
 		case SMU_FCLK:
@@ -450,7 +452,7 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
 }
 
 int smu_v12_0_mode2_reset(struct smu_context *smu){
-	return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+	return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
 }
 
 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -461,39 +463,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
 	switch (clk_type) {
 	case SMU_GFXCLK:
 	case SMU_SCLK:
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
 		if (ret)
 			return ret;
 
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
 		if (ret)
 			return ret;
 	break;
 	case SMU_FCLK:
 	case SMU_MCLK:
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
 		if (ret)
 			return ret;
 
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
 		if (ret)
 			return ret;
 	break;
 	case SMU_SOCCLK:
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
 		if (ret)
 			return ret;
 
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
 		if (ret)
 			return ret;
 	break;
 	case SMU_VCLK:
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
 		if (ret)
 			return ret;
 
-		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
 		if (ret)
 			return ret;
 	break;
@@ -512,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
 	if (driver_table->mc_address) {
 		ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetDriverDramAddrHigh,
-				upper_32_bits(driver_table->mc_address));
+				upper_32_bits(driver_table->mc_address),
+				NULL);
 		if (!ret)
 			ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetDriverDramAddrLow,
-				lower_32_bits(driver_table->mc_address));
+				lower_32_bits(driver_table->mc_address),
+				NULL);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 49e5ef3..16aa171 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -33,6 +33,8 @@
 #include "smu7_smumgr.h"
 #include "vega20_hwmgr.h"
 
+#include "smu_v11_0_i2c.h"
+
 /* MP Apertures */
 #define MP0_Public			0x03800000
 #define MP0_SRAM			0x03900000
@@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
 	struct vega20_smumgr *priv;
 	unsigned long tools_size = 0x19000;
 	int ret = 0;
+	struct amdgpu_device *adev = hwmgr->adev;
 
 	struct cgs_firmware_info info = {0};
 
@@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
 
+	ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+	if (ret)
+		goto err4;
+
 	return 0;
 
 err4:
@@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
 {
 	struct vega20_smumgr *priv =
 			(struct vega20_smumgr *)(hwmgr->smu_backend);
+	struct amdgpu_device *adev = hwmgr->adev;
+
+	smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
 
 	if (priv) {
 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
@@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
 		kfree(hwmgr->smu_backend);
 		hwmgr->smu_backend = NULL;
 	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 4ad8d6c..49ff375 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
  *
  */
 
-#include "pp_debug.h"
 #include <linux/firmware.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
@@ -587,7 +586,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu)
 
 static int vega20_run_btc_afll(struct smu_context *smu)
 {
-	return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+	return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
 }
 
 #define FEATURE_MASK(feature) (1ULL << feature)
@@ -670,13 +669,13 @@ vega20_set_single_dpm_table(struct smu_context *smu,
 
 	ret = smu_send_smc_msg_with_param(smu,
 			SMU_MSG_GetDpmFreqByIndex,
-			(clk_id << 16 | 0xFF));
+			(clk_id << 16 | 0xFF),
+			&num_of_levels);
 	if (ret) {
 		pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
 		return ret;
 	}
 
-	smu_read_smc_arg(smu, &num_of_levels);
 	if (!num_of_levels) {
 		pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
 		return -EINVAL;
@@ -687,12 +686,12 @@ vega20_set_single_dpm_table(struct smu_context *smu,
 	for (i = 0; i < num_of_levels; i++) {
 		ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_GetDpmFreqByIndex,
-				(clk_id << 16 | i));
+				(clk_id << 16 | i),
+				&clk);
 		if (ret) {
 			pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
 			return ret;
 		}
-		smu_read_smc_arg(smu, &clk);
 		if (!clk) {
 			pr_err("[GetDpmFreqByIndex] clk value is invalid!");
 			return -EINVAL;
@@ -1200,7 +1199,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_GFXCLK << 16) | (freq & 0xffff));
+			(PPCLK_GFXCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s gfxclk !\n",
 						max ? "max" : "min");
@@ -1215,7 +1215,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_UCLK << 16) | (freq & 0xffff));
+			(PPCLK_UCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s memclk !\n",
 						max ? "max" : "min");
@@ -1230,7 +1231,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_SOCCLK << 16) | (freq & 0xffff));
+			(PPCLK_SOCCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s socclk !\n",
 						max ? "max" : "min");
@@ -1245,7 +1247,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
 			single_dpm_table->dpm_state.soft_min_level;
 		ret = smu_send_smc_msg_with_param(smu,
 			(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
-			(PPCLK_FCLK << 16) | (freq & 0xffff));
+			(PPCLK_FCLK << 16) | (freq & 0xffff),
+			NULL);
 		if (ret) {
 			pr_err("Failed to set soft %s fclk !\n",
 						max ? "max" : "min");
@@ -1260,7 +1263,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
 		if (!max) {
 			ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetHardMinByFreq,
-				(PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+				(PPCLK_DCEFCLK << 16) | (freq & 0xffff),
+				NULL);
 			if (ret) {
 				pr_err("Failed to set hard min dcefclk !\n");
 				return ret;
@@ -1421,7 +1425,9 @@ static int vega20_force_clk_levels(struct smu_context *smu,
 		}
 
 		ret = smu_send_smc_msg_with_param(smu,
-				SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+				SMU_MSG_SetMinLinkDpmByIndex,
+				soft_min_level,
+				NULL);
 		if (ret)
 			pr_err("Failed to set min link dpm level!\n");
 
@@ -1477,13 +1483,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
 
 	ret = smu_send_smc_msg_with_param(smu,
 			SMU_MSG_GetAVFSVoltageByDpm,
-			((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+			((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+			voltage);
 	if (ret) {
 		pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
 		return ret;
 	}
 
-	smu_read_smc_arg(smu, voltage);
 	*voltage = *voltage / VOLTAGE_SCALE;
 
 	return 0;
@@ -1956,8 +1962,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
 	workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
 	if (workload_type < 0)
 		return -EINVAL;
-	smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
-				    1 << workload_type);
+	smu_send_smc_msg_with_param(smu,
+			SMU_MSG_SetWorkloadMask,
+			1 << workload_type,
+			NULL);
 
 	return ret;
 }
@@ -2029,7 +2037,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
 		dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
 		ret = smu_send_smc_msg_with_param(smu,
 				SMU_MSG_SetHardMinByFreq,
-				(PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+				(PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
+				NULL);
 		if (ret) {
 			pr_err("[%s] Set hard min uclk failed!", __func__);
 				return ret;
@@ -2047,7 +2056,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu)
 	if (!smu->smu_dpm.dpm_context)
 		return -EINVAL;
 
-	smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+	smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
 	ret = vega20_set_uclk_to_highest_dpm_level(smu,
 						   &dpm_table->mem_table);
 	if (ret)
@@ -2074,7 +2083,8 @@ static int vega20_display_config_changed(struct smu_context *smu)
 	    smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
 		smu_send_smc_msg_with_param(smu,
 					    SMU_MSG_NumOfDisplays,
-					    smu->display_config->num_display);
+					    smu->display_config->num_display,
+					    NULL);
 	}
 
 	return ret;
@@ -2247,7 +2257,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
 			if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
 				ret = smu_send_smc_msg_with_param(smu,
 								  SMU_MSG_SetMinDeepSleepDcefclk,
-								  min_clocks.dcef_clock_in_sr/100);
+								  min_clocks.dcef_clock_in_sr/100,
+								  NULL);
 				if (ret) {
 					pr_err("Attempt to set divider for DCEFCLK Failed!");
 					return ret;
@@ -2262,7 +2273,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
 		memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
 		ret = smu_send_smc_msg_with_param(smu,
 						  SMU_MSG_SetHardMinByFreq,
-						  (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+						  (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level,
+						  NULL);
 		if (ret) {
 			pr_err("[%s] Set hard min uclk failed!", __func__);
 			return ret;
@@ -2853,8 +2865,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
 	struct smu_table_context *table_context = &smu->smu_table;
 	PPTable_t *pptable = table_context->driver_pptable;
 
-	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
-			(uint32_t)pptable->FanTargetTemperature);
+	ret = smu_send_smc_msg_with_param(smu,
+			SMU_MSG_SetFanTemperatureTarget,
+			(uint32_t)pptable->FanTargetTemperature,
+			NULL);
 
 	return ret;
 }
@@ -2864,15 +2878,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu,
 {
 	int ret;
 
-	ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+	ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed);
 
 	if (ret) {
 		pr_err("Attempt to get current RPM from SMC Failed!\n");
 		return ret;
 	}
 
-	smu_read_smc_arg(smu, speed);
-
 	return 0;
 }
 
@@ -3137,7 +3149,7 @@ static int vega20_set_df_cstate(struct smu_context *smu,
 		return -EINVAL;
 	}
 
-	return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+	return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
 }
 
 static int vega20_update_pcie_parameters(struct smu_context *smu,
@@ -3155,7 +3167,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
 					pptable->PcieLaneCount[i] : pcie_width_cap);
 		ret = smu_send_smc_msg_with_param(smu,
 					  SMU_MSG_OverridePcieParameters,
-					  smu_pcie_arg);
+					  smu_pcie_arg,
+					  NULL);
 	}
 
 	return ret;
@@ -3229,7 +3242,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 	.system_features_control = smu_v11_0_system_features_control,
 	.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
-	.read_smc_arg = smu_v11_0_read_arg,
 	.init_display_count = smu_v11_0_init_display_count,
 	.set_allowed_mask = smu_v11_0_set_allowed_mask,
 	.get_enabled_mask = smu_v11_0_get_enabled_mask,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 8ae1e1f..be7c29c 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -9,7 +9,6 @@
 #include <drm/drm_device.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_vblank.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <linux/clk.h>
@@ -138,24 +137,9 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
 			      ~ARCPGU_CTRL_ENABLE_MASK);
 }
 
-static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
-				      struct drm_crtc_state *state)
-{
-	struct drm_pending_vblank_event *event = crtc->state->event;
-
-	if (event) {
-		crtc->state->event = NULL;
-
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, event);
-		spin_unlock_irq(&crtc->dev->event_lock);
-	}
-}
-
 static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
 	.mode_valid	= arc_pgu_crtc_mode_valid,
 	.mode_set_nofb	= arc_pgu_crtc_mode_set_nofb,
-	.atomic_begin	= arc_pgu_crtc_atomic_begin,
 	.atomic_enable	= arc_pgu_crtc_atomic_enable,
 	.atomic_disable	= arc_pgu_crtc_atomic_disable,
 };
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 8fd7094..5283993 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -40,7 +40,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
 		return ret;
 
 	/* Link drm_bridge to encoder */
-	ret = drm_bridge_attach(encoder, bridge, NULL);
+	ret = drm_bridge_attach(encoder, bridge, NULL, 0);
 	if (ret)
 		drm_encoder_cleanup(encoder);
 
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ac8a78b..f2dc371 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,18 +129,12 @@ int armada_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, fbh, 1);
+	ret = drm_fb_helper_init(dev, fbh);
 	if (ret) {
 		DRM_ERROR("failed to initialize drm fb helper\n");
 		goto err_fb_helper;
 	}
 
-	ret = drm_fb_helper_single_add_all_connectors(fbh);
-	if (ret) {
-		DRM_ERROR("failed to add fb connectors\n");
-		goto err_fb_setup;
-	}
-
 	ret = drm_fb_helper_initial_config(fbh, 32);
 	if (ret) {
 		DRM_ERROR("failed to set initial config\n");
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index f5d8780..656d591 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,7 @@ struct ast_private {
 		unsigned int next_index;
 	} cursor;
 
+	struct drm_encoder encoder;
 	struct drm_plane primary_plane;
 	struct drm_plane cursor_plane;
 
@@ -238,13 +239,8 @@ struct ast_crtc {
 	u8 offset_x, offset_y;
 };
 
-struct ast_encoder {
-	struct drm_encoder base;
-};
-
 #define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
 #define to_ast_connector(x) container_of(x, struct ast_connector, base)
-#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
 
 struct ast_vbios_stdtable {
 	u8 misc;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b79f484..18a0a4c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -388,31 +388,9 @@ static int ast_get_dram_info(struct drm_device *dev)
 	return 0;
 }
 
-enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
-						const struct drm_display_mode *mode)
-{
-	static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
-
-	struct ast_private *ast = dev->dev_private;
-	unsigned long fbsize, fbpages, max_fbpages;
-
-	/* To support double buffering, a framebuffer may not
-	 * consume more than half of the available VRAM.
-	 */
-	max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
-
-	fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
-	fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
-	if (fbpages > max_fbpages)
-		return MODE_MEM;
-
-	return MODE_OK;
-}
-
 static const struct drm_mode_config_funcs ast_mode_funcs = {
 	.fb_create = drm_gem_fb_create,
-	.mode_valid = ast_mode_config_mode_valid,
+	.mode_valid = drm_vram_helper_mode_valid,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 34608f0..cdd6c46 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -40,6 +40,7 @@
 #include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "ast_drv.h"
 #include "ast_tables.h"
@@ -833,8 +834,6 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
 	struct ast_vbios_mode_info *vbios_mode_info;
 	struct drm_display_mode *adjusted_mode;
 
-	crtc->state->no_vblank = true;
-
 	ast_state = to_ast_crtc_state(crtc->state);
 
 	format = ast_state->format;
@@ -959,28 +958,18 @@ static int ast_crtc_init(struct drm_device *dev)
  * Encoder
  */
 
-static void ast_encoder_destroy(struct drm_encoder *encoder)
-{
-	drm_encoder_cleanup(encoder);
-	kfree(encoder);
-}
-
-static const struct drm_encoder_funcs ast_enc_funcs = {
-	.destroy = ast_encoder_destroy,
-};
-
 static int ast_encoder_init(struct drm_device *dev)
 {
-	struct ast_encoder *ast_encoder;
+	struct ast_private *ast = dev->dev_private;
+	struct drm_encoder *encoder = &ast->encoder;
+	int ret;
 
-	ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
-	if (!ast_encoder)
-		return -ENOMEM;
+	ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+	if (ret)
+		return ret;
 
-	drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
-			 DRM_MODE_ENCODER_DAC, NULL);
+	encoder->possible_crtcs = 1;
 
-	ast_encoder->base.possible_crtcs = 1;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 121b6268..e2019fe 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -114,7 +114,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
 	}
 
 	if (bridge) {
-		ret = drm_bridge_attach(&output->encoder, bridge, NULL);
+		ret = drm_bridge_attach(&output->encoder, bridge, NULL, 0);
 		if (!ret)
 			return 0;
 
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 1046087..addb056 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -23,7 +23,6 @@ static void bochs_unload(struct drm_device *dev)
 
 	bochs_kms_fini(bochs);
 	bochs_mm_fini(bochs);
-	bochs_hw_fini(dev);
 	kfree(bochs);
 	dev->dev_private = NULL;
 }
@@ -69,6 +68,7 @@ static struct drm_driver bochs_driver = {
 	.major			= 1,
 	.minor			= 0,
 	DRM_GEM_VRAM_DRIVER,
+	.release                = bochs_unload,
 };
 
 /* ---------------------------------------------------------------------- */
@@ -148,9 +148,9 @@ static void bochs_pci_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 
+	drm_dev_unplug(dev);
 	drm_atomic_helper_shutdown(dev);
-	drm_dev_unregister(dev);
-	bochs_unload(dev);
+	bochs_hw_fini(dev);
 	drm_dev_put(dev);
 }
 
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index a4fc4e6..dce4672 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -4,6 +4,7 @@
 
 #include <linux/pci.h>
 
+#include <drm/drm_drv.h>
 #include <drm/drm_fourcc.h>
 
 #include "bochs.h"
@@ -192,6 +193,8 @@ void bochs_hw_fini(struct drm_device *dev)
 {
 	struct bochs_device *bochs = dev->dev_private;
 
+	/* TODO: shot down existing vram mappings */
+
 	if (bochs->mmio)
 		iounmap(bochs->mmio);
 	if (bochs->ioports)
@@ -205,6 +208,11 @@ void bochs_hw_fini(struct drm_device *dev)
 void bochs_hw_setmode(struct bochs_device *bochs,
 		      struct drm_display_mode *mode)
 {
+	int idx;
+
+	if (!drm_dev_enter(bochs->dev, &idx))
+		return;
+
 	bochs->xres = mode->hdisplay;
 	bochs->yres = mode->vdisplay;
 	bochs->bpp = 32;
@@ -230,11 +238,18 @@ void bochs_hw_setmode(struct bochs_device *bochs,
 
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
 			  VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+
+	drm_dev_exit(idx);
 }
 
 void bochs_hw_setformat(struct bochs_device *bochs,
 			const struct drm_format_info *format)
 {
+	int idx;
+
+	if (!drm_dev_enter(bochs->dev, &idx))
+		return;
+
 	DRM_DEBUG_DRIVER("format %c%c%c%c\n",
 			 (format->format >>  0) & 0xff,
 			 (format->format >>  8) & 0xff,
@@ -254,13 +269,18 @@ void bochs_hw_setformat(struct bochs_device *bochs,
 			  __func__, format->format);
 		break;
 	}
+
+	drm_dev_exit(idx);
 }
 
 void bochs_hw_setbase(struct bochs_device *bochs,
 		      int x, int y, int stride, u64 addr)
 {
 	unsigned long offset;
-	unsigned int vx, vy, vwidth;
+	unsigned int vx, vy, vwidth, idx;
+
+	if (!drm_dev_enter(bochs->dev, &idx))
+		return;
 
 	bochs->stride = stride;
 	offset = (unsigned long)addr +
@@ -275,4 +295,6 @@ void bochs_hw_setbase(struct bochs_device *bochs,
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
 	bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+
+	drm_dev_exit(idx);
 }
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 3f0006c..8066d7d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -7,7 +7,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
 
 #include "bochs.h"
 
@@ -57,16 +56,8 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
 			      struct drm_plane_state *old_state)
 {
 	struct bochs_device *bochs = pipe->crtc.dev->dev_private;
-	struct drm_crtc *crtc = &pipe->crtc;
 
 	bochs_plane_update(bochs, pipe->plane.state);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		crtc->state->event = NULL;
-		spin_unlock_irq(&crtc->dev->event_lock);
-	}
 }
 
 static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
@@ -92,32 +83,11 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
 	return count;
 }
 
-static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
-				      struct drm_display_mode *mode)
-{
-	struct bochs_device *bochs =
-		container_of(connector, struct bochs_device, connector);
-	unsigned long size = mode->hdisplay * mode->vdisplay * 4;
-
-	/*
-	 * Make sure we can fit two framebuffers into video memory.
-	 * This allows up to 1600x1200 with 16 MB (default size).
-	 * If you want more try this:
-	 *     'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
-	 */
-	if (size * 2 > bochs->fb_size)
-		return MODE_BAD;
-
-	return MODE_OK;
-}
-
 static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
 	.get_modes = bochs_connector_get_modes,
-	.mode_valid = bochs_connector_mode_valid,
 };
 
 static const struct drm_connector_funcs bochs_connector_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = drm_connector_cleanup,
 	.reset = drm_atomic_helper_connector_reset,
@@ -157,6 +127,7 @@ bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
 
 const struct drm_mode_config_funcs bochs_mode_funcs = {
 	.fb_create = bochs_gem_fb_create,
+	.mode_valid = drm_vram_helper_mode_valid,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
@@ -192,6 +163,9 @@ int bochs_kms_init(struct bochs_device *bochs)
 
 void bochs_kms_fini(struct bochs_device *bochs)
 {
+	if (!bochs->dev->mode_config.num_connector)
+		return;
+
 	drm_atomic_helper_shutdown(bochs->dev);
 	drm_mode_config_cleanup(bochs->dev);
 }
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 0b9ca58..aaed234 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,13 +27,16 @@
 	  Support Cadence DPI to DSI bridge. This is an internal
 	  bridge and is meant to be directly embedded in a SoC.
 
-config DRM_DUMB_VGA_DAC
-	tristate "Dumb VGA DAC Bridge support"
+config DRM_DISPLAY_CONNECTOR
+	tristate "Display connector support"
 	depends on OF
-	select DRM_KMS_HELPER
 	help
-	  Support for non-programmable RGB to VGA DAC bridges, such as ADI
-	  ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
+	  Driver for display connectors with support for DDC and hot-plug
+	  detection. Most display controller handle display connectors
+	  internally and don't need this driver, but the DRM subsystem is
+	  moving towards separating connector handling from display controllers
+	  on ARM-based platforms. Saying Y here when this driver is not needed
+	  will not cause any issue.
 
 config DRM_LVDS_CODEC
 	tristate "Transparent LVDS encoders and decoders support"
@@ -72,6 +75,17 @@
 	---help---
 	  Parade eDP-LVDS bridge chip driver.
 
+config DRM_PARADE_PS8640
+	tristate "Parade PS8640 MIPI DSI to eDP Converter"
+	depends on OF
+	select DRM_KMS_HELPER
+	select DRM_MIPI_DSI
+	select DRM_PANEL
+	help
+	  Choose this option if you have PS8640 for display
+	  The PS8640 is a high-performance and low-power
+	  MIPI DSI to eDP converter
+
 config DRM_SIL_SII8620
 	tristate "Silicon Image SII8620 HDMI/MHL bridge"
 	depends on OF
@@ -87,6 +101,7 @@
 	select DRM_KMS_HELPER
 	select REGMAP_I2C
 	select I2C_MUX
+	select SND_SOC_HDMI_CODEC if SND_SOC
 	---help---
 	  Silicon Image sii902x bridge chip driver.
 
@@ -98,6 +113,14 @@
 	  It is an I2C driver, that detects connection of MHL bridge
 	  and starts encapsulation of HDMI signal.
 
+config DRM_SIMPLE_BRIDGE
+	tristate "Simple DRM bridge support"
+	depends on OF
+	select DRM_KMS_HELPER
+	help
+	  Support for non-programmable DRM bridges, such as ADI ADV7123, TI
+	  THS8134 and THS8135 or passive resistor ladder DACs.
+
 config DRM_THINE_THC63LVD1024
 	tristate "Thine THC63LVD1024 LVDS decoder bridge"
 	depends on OF
@@ -122,6 +145,16 @@
 	---help---
 	  Toshiba TC358767 eDP bridge chip driver.
 
+config DRM_TOSHIBA_TC358768
+	tristate "Toshiba TC358768 MIPI DSI bridge"
+	depends on OF
+	select DRM_KMS_HELPER
+	select REGMAP_I2C
+	select DRM_PANEL
+	select DRM_MIPI_DSI
+	help
+	  Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
 config DRM_TI_TFP410
 	tristate "TI TFP410 DVI/HDMI bridge"
 	depends on OF
@@ -139,6 +172,14 @@
 	help
 	  Texas Instruments SN65DSI86 DSI to eDP Bridge driver
 
+config DRM_TI_TPD12S015
+	tristate "TI TPD12S015 HDMI level shifter and ESD protection"
+	depends on OF
+	select DRM_KMS_HELPER
+	help
+	  Texas Instruments TPD12S015 HDMI level shifter and ESD protection
+	  driver.
+
 source "drivers/gpu/drm/bridge/analogix/Kconfig"
 
 source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cd16ce8..6fb062b 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,19 +1,23 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
-obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
+obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
 obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
 obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
 obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
 obj-$(CONFIG_DRM_SII902X) += sii902x.o
 obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
 obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
 obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
 obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
 obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
 
 obj-y += analogix/
 obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 8a56ff8..47d4eb9 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -4,8 +4,9 @@
 	depends on OF
 	select DRM_KMS_HELPER
 	select REGMAP_I2C
+	select DRM_MIPI_DSI
 	help
-	  Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+	  Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
 
 config DRM_I2C_ADV7511_AUDIO
 	bool "ADV7511 HDMI Audio driver"
@@ -15,16 +16,8 @@
 	  Support the ADV7511 HDMI Audio interface. This is used in
 	  conjunction with the AV7511  HDMI driver.
 
-config DRM_I2C_ADV7533
-	bool "ADV7533 encoder"
-	depends on DRM_I2C_ADV7511
-	select DRM_MIPI_DSI
-	default y
-	help
-	  Support for the Analog Devices ADV7533 DSI to HDMI encoder.
-
 config DRM_I2C_ADV7511_CEC
-	bool "ADV7511/33 HDMI CEC driver"
+	bool "ADV7511/33/35 HDMI CEC driver"
 	depends on DRM_I2C_ADV7511
 	select CEC_CORE
 	default y
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index b46ebeb..d8ceb53 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-adv7511-y := adv7511_drv.o
+adv7511-y := adv7511_drv.o adv7533.o
 adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
 adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
-adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
 obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 52b2adfd..a9bb734 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -320,6 +320,7 @@ struct adv7511_video_config {
 enum adv7511_type {
 	ADV7511,
 	ADV7533,
+	ADV7535,
 };
 
 #define ADV7511_MAX_ADDRS 3
@@ -393,7 +394,6 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
 }
 #endif
 
-#ifdef CONFIG_DRM_I2C_ADV7533
 void adv7533_dsi_power_on(struct adv7511 *adv);
 void adv7533_dsi_power_off(struct adv7511 *adv);
 void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
@@ -402,44 +402,6 @@ int adv7533_patch_cec_registers(struct adv7511 *adv);
 int adv7533_attach_dsi(struct adv7511 *adv);
 void adv7533_detach_dsi(struct adv7511 *adv);
 int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
-#else
-static inline void adv7533_dsi_power_on(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_dsi_power_off(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_mode_set(struct adv7511 *adv,
-				    const struct drm_display_mode *mode)
-{
-}
-
-static inline int adv7533_patch_registers(struct adv7511 *adv)
-{
-	return -ENODEV;
-}
-
-static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
-{
-	return -ENODEV;
-}
-
-static inline int adv7533_attach_dsi(struct adv7511 *adv)
-{
-	return -ENODEV;
-}
-
-static inline void adv7533_detach_dsi(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
-{
-	return -ENODEV;
-}
-#endif
 
 #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
 int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 9e13e46..87b58c1 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -367,7 +367,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
 	 */
 	regcache_sync(adv7511->regmap);
 
-	if (adv7511->type == ADV7533)
+	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
 		adv7533_dsi_power_on(adv7511);
 	adv7511->powered = true;
 }
@@ -387,7 +387,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
 static void adv7511_power_off(struct adv7511 *adv7511)
 {
 	__adv7511_power_off(adv7511);
-	if (adv7511->type == ADV7533)
+	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
 		adv7533_dsi_power_off(adv7511);
 	adv7511->powered = false;
 }
@@ -761,7 +761,7 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
 	regmap_update_bits(adv7511->regmap, 0x17,
 		0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
 
-	if (adv7511->type == ADV7533)
+	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
 		adv7533_mode_set(adv7511, adj_mode);
 
 	drm_mode_copy(&adv7511->curr_mode, adj_mode);
@@ -847,11 +847,17 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
 	adv7511_mode_set(adv, mode, adj_mode);
 }
 
-static int adv7511_bridge_attach(struct drm_bridge *bridge)
+static int adv7511_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct adv7511 *adv = bridge_to_adv7511(bridge);
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
@@ -874,7 +880,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
 				 &adv7511_connector_helper_funcs);
 	drm_connector_attach_encoder(&adv->connector, bridge->encoder);
 
-	if (adv->type == ADV7533)
+	if (adv->type == ADV7533 || adv->type == ADV7535)
 		ret = adv7533_attach_dsi(adv);
 
 	if (adv->i2c_main->irq)
@@ -952,7 +958,7 @@ static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
 	struct i2c_client *i2c = to_i2c_client(dev);
 	struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
 
-	if (adv7511->type == ADV7533)
+	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
 		reg -= ADV7533_REG_CEC_OFFSET;
 
 	switch (reg) {
@@ -994,7 +1000,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
 		goto err;
 	}
 
-	if (adv->type == ADV7533) {
+	if (adv->type == ADV7533 || adv->type == ADV7535) {
 		ret = adv7533_patch_cec_registers(adv);
 		if (ret)
 			goto err;
@@ -1242,7 +1248,7 @@ static int adv7511_remove(struct i2c_client *i2c)
 {
 	struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
 
-	if (adv7511->type == ADV7533)
+	if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
 		adv7533_detach_dsi(adv7511);
 	i2c_unregister_device(adv7511->i2c_cec);
 	if (adv7511->cec_clk)
@@ -1266,9 +1272,8 @@ static const struct i2c_device_id adv7511_i2c_ids[] = {
 	{ "adv7511", ADV7511 },
 	{ "adv7511w", ADV7511 },
 	{ "adv7513", ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
 	{ "adv7533", ADV7533 },
-#endif
+	{ "adv7535", ADV7535 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
@@ -1277,9 +1282,8 @@ static const struct of_device_id adv7511_of_ids[] = {
 	{ .compatible = "adi,adv7511", .data = (void *)ADV7511 },
 	{ .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
 	{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
 	{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
-#endif
+	{ .compatible = "adi,adv7535", .data = (void *)ADV7535 },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, adv7511_of_ids);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 2dfa2fd..2bc6e4f 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -519,11 +519,17 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int anx6345_bridge_attach(struct drm_bridge *bridge)
+static int anx6345_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
 	int err;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
@@ -711,16 +717,20 @@ static int anx6345_i2c_probe(struct i2c_client *client,
 		DRM_DEBUG("No panel found\n");
 
 	/* 1.2V digital core power regulator  */
-	anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+	anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12");
 	if (IS_ERR(anx6345->dvdd12)) {
-		DRM_ERROR("dvdd12-supply not found\n");
+		if (PTR_ERR(anx6345->dvdd12) != -EPROBE_DEFER)
+			DRM_ERROR("Failed to get dvdd12 supply (%ld)\n",
+				  PTR_ERR(anx6345->dvdd12));
 		return PTR_ERR(anx6345->dvdd12);
 	}
 
 	/* 2.5V digital core power regulator  */
-	anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+	anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25");
 	if (IS_ERR(anx6345->dvdd25)) {
-		DRM_ERROR("dvdd25-supply not found\n");
+		if (PTR_ERR(anx6345->dvdd25) != -EPROBE_DEFER)
+			DRM_ERROR("Failed to get dvdd25 supply (%ld)\n",
+				  PTR_ERR(anx6345->dvdd25));
 		return PTR_ERR(anx6345->dvdd25);
 	}
 
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 41867be..0d5a5ad0 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -722,10 +722,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
 	if (err)
 		return err;
 
-	dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
-	dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
 	err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
-			   SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+			   SP_DP_MAIN_LINK_BW_SET_REG,
+			   anx78xx->dpcd[DP_MAX_LINK_RATE]);
 	if (err)
 		return err;
 
@@ -887,11 +886,17 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
 	int err;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6effe53..9ded2ce 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1216,13 +1216,19 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+				     enum drm_bridge_attach_flags flags)
 {
 	struct analogix_dp_device *dp = bridge->driver_private;
 	struct drm_encoder *encoder = dp->encoder;
 	struct drm_connector *connector = NULL;
 	int ret = 0;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
@@ -1289,19 +1295,21 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
 	return conn_state->crtc;
 }
 
-static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
-						 struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+				     struct drm_bridge_state *old_bridge_state)
 {
+	struct drm_atomic_state *old_state = old_bridge_state->base.state;
 	struct analogix_dp_device *dp = bridge->driver_private;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
 	int ret;
 
-	crtc = analogix_dp_get_new_crtc(dp, state);
+	crtc = analogix_dp_get_new_crtc(dp, old_state);
 	if (!crtc)
 		return;
 
-	old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+	old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
 	/* Don't touch the panel if we're coming back from PSR */
 	if (old_crtc_state && old_crtc_state->self_refresh_active)
 		return;
@@ -1366,20 +1374,22 @@ static int analogix_dp_set_bridge(struct analogix_dp_device *dp)
 	return ret;
 }
 
-static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
-					     struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+				 struct drm_bridge_state *old_bridge_state)
 {
+	struct drm_atomic_state *old_state = old_bridge_state->base.state;
 	struct analogix_dp_device *dp = bridge->driver_private;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
 	int timeout_loop = 0;
 	int ret;
 
-	crtc = analogix_dp_get_new_crtc(dp, state);
+	crtc = analogix_dp_get_new_crtc(dp, old_state);
 	if (!crtc)
 		return;
 
-	old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+	old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
 	/* Not a full enable, just disable PSR and continue */
 	if (old_crtc_state && old_crtc_state->self_refresh_active) {
 		ret = analogix_dp_disable_psr(dp);
@@ -1440,18 +1450,20 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
-static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
-					      struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+				  struct drm_bridge_state *old_bridge_state)
 {
+	struct drm_atomic_state *old_state = old_bridge_state->base.state;
 	struct analogix_dp_device *dp = bridge->driver_private;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *new_crtc_state = NULL;
 
-	crtc = analogix_dp_get_new_crtc(dp, state);
+	crtc = analogix_dp_get_new_crtc(dp, old_state);
 	if (!crtc)
 		goto out;
 
-	new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
 	if (!new_crtc_state)
 		goto out;
 
@@ -1463,20 +1475,21 @@ static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
 	analogix_dp_bridge_disable(bridge);
 }
 
-static
-void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
-					    struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+				struct drm_bridge_state *old_bridge_state)
 {
+	struct drm_atomic_state *old_state = old_bridge_state->base.state;
 	struct analogix_dp_device *dp = bridge->driver_private;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *new_crtc_state;
 	int ret;
 
-	crtc = analogix_dp_get_new_crtc(dp, state);
+	crtc = analogix_dp_get_new_crtc(dp, old_state);
 	if (!crtc)
 		return;
 
-	new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+	new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
 	if (!new_crtc_state || !new_crtc_state->self_refresh_active)
 		return;
 
@@ -1563,6 +1576,9 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
 }
 
 static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
 	.atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
 	.atomic_enable = analogix_dp_bridge_atomic_enable,
 	.atomic_disable = analogix_dp_bridge_atomic_disable,
@@ -1588,7 +1604,7 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev,
 	bridge->driver_private = dp;
 	bridge->funcs = &analogix_dp_bridge_funcs;
 
-	ret = drm_bridge_attach(dp->encoder, bridge, NULL);
+	ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
 	if (ret) {
 		DRM_ERROR("failed to attach drm bridge\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index b7c97f0..69c3892 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -644,7 +644,8 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
 	return 0;
 }
 
-static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+				  enum drm_bridge_attach_flags flags)
 {
 	struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
 	struct cdns_dsi *dsi = input_to_dsi(input);
@@ -656,7 +657,8 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
 		return -ENOTSUPP;
 	}
 
-	return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+	return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+				 flags);
 }
 
 static enum drm_mode_status
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
new file mode 100644
index 0000000..4d27857
--- /dev/null
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+
+struct display_connector {
+	struct drm_bridge	bridge;
+
+	struct gpio_desc	*hpd_gpio;
+	int			hpd_irq;
+};
+
+static inline struct display_connector *
+to_display_connector(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct display_connector, bridge);
+}
+
+static int display_connector_attach(struct drm_bridge *bridge,
+				    enum drm_bridge_attach_flags flags)
+{
+	return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static enum drm_connector_status
+display_connector_detect(struct drm_bridge *bridge)
+{
+	struct display_connector *conn = to_display_connector(bridge);
+
+	if (conn->hpd_gpio) {
+		if (gpiod_get_value_cansleep(conn->hpd_gpio))
+			return connector_status_connected;
+		else
+			return connector_status_disconnected;
+	}
+
+	if (conn->bridge.ddc && drm_probe_ddc(conn->bridge.ddc))
+		return connector_status_connected;
+
+	switch (conn->bridge.type) {
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		/*
+		 * For DVI and HDMI connectors a DDC probe failure indicates
+		 * that no cable is connected.
+		 */
+		return connector_status_disconnected;
+
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_VGA:
+	default:
+		/*
+		 * Composite and S-Video connectors have no other detection
+		 * mean than the HPD GPIO. For VGA connectors, even if we have
+		 * an I2C bus, we can't assume that the cable is disconnected
+		 * if drm_probe_ddc fails, as some cables don't wire the DDC
+		 * pins.
+		 */
+		return connector_status_unknown;
+	}
+}
+
+static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
+					       struct drm_connector *connector)
+{
+	struct display_connector *conn = to_display_connector(bridge);
+
+	return drm_get_edid(connector, conn->bridge.ddc);
+}
+
+static const struct drm_bridge_funcs display_connector_bridge_funcs = {
+	.attach = display_connector_attach,
+	.detect = display_connector_detect,
+	.get_edid = display_connector_get_edid,
+};
+
+static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
+{
+	struct display_connector *conn = arg;
+	struct drm_bridge *bridge = &conn->bridge;
+
+	drm_bridge_hpd_notify(bridge, display_connector_detect(bridge));
+
+	return IRQ_HANDLED;
+}
+
+static int display_connector_probe(struct platform_device *pdev)
+{
+	struct display_connector *conn;
+	unsigned int type;
+	const char *label;
+	int ret;
+
+	conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
+	if (!conn)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, conn);
+
+	type = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+	/* Get the exact connector type. */
+	switch (type) {
+	case DRM_MODE_CONNECTOR_DVII: {
+		bool analog, digital;
+
+		analog = of_property_read_bool(pdev->dev.of_node, "analog");
+		digital = of_property_read_bool(pdev->dev.of_node, "digital");
+		if (analog && !digital) {
+			conn->bridge.type = DRM_MODE_CONNECTOR_DVIA;
+		} else if (!analog && digital) {
+			conn->bridge.type = DRM_MODE_CONNECTOR_DVID;
+		} else if (analog && digital) {
+			conn->bridge.type = DRM_MODE_CONNECTOR_DVII;
+		} else {
+			dev_err(&pdev->dev, "DVI connector with no type\n");
+			return -EINVAL;
+		}
+		break;
+	}
+
+	case DRM_MODE_CONNECTOR_HDMIA: {
+		const char *hdmi_type;
+
+		ret = of_property_read_string(pdev->dev.of_node, "type",
+					      &hdmi_type);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "HDMI connector with no type\n");
+			return -EINVAL;
+		}
+
+		if (!strcmp(hdmi_type, "a") || !strcmp(hdmi_type, "c") ||
+		    !strcmp(hdmi_type, "d") || !strcmp(hdmi_type, "e")) {
+			conn->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+		} else if (!strcmp(hdmi_type, "b")) {
+			conn->bridge.type = DRM_MODE_CONNECTOR_HDMIB;
+		} else {
+			dev_err(&pdev->dev,
+				"Unsupported HDMI connector type '%s'\n",
+				hdmi_type);
+			return -EINVAL;
+		}
+
+		break;
+	}
+
+	default:
+		conn->bridge.type = type;
+		break;
+	}
+
+	/* All the supported connector types support interlaced modes. */
+	conn->bridge.interlace_allowed = true;
+
+	/* Get the optional connector label. */
+	of_property_read_string(pdev->dev.of_node, "label", &label);
+
+	/*
+	 * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+	 * edge interrupts, register an interrupt handler.
+	 */
+	if (type == DRM_MODE_CONNECTOR_DVII ||
+	    type == DRM_MODE_CONNECTOR_HDMIA) {
+		conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
+							 GPIOD_IN);
+		if (IS_ERR(conn->hpd_gpio)) {
+			if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"Unable to retrieve HPD GPIO\n");
+			return PTR_ERR(conn->hpd_gpio);
+		}
+
+		conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
+	} else {
+		conn->hpd_irq = -EINVAL;
+	}
+
+	if (conn->hpd_irq >= 0) {
+		ret = devm_request_threaded_irq(&pdev->dev, conn->hpd_irq,
+						NULL, display_connector_hpd_irq,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING |
+						IRQF_ONESHOT,
+						"HPD", conn);
+		if (ret) {
+			dev_info(&pdev->dev,
+				 "Failed to request HPD edge interrupt, falling back to polling\n");
+			conn->hpd_irq = -EINVAL;
+		}
+	}
+
+	/* Retrieve the DDC I2C adapter for DVI, HDMI and VGA connectors. */
+	if (type == DRM_MODE_CONNECTOR_DVII ||
+	    type == DRM_MODE_CONNECTOR_HDMIA ||
+	    type == DRM_MODE_CONNECTOR_VGA) {
+		struct device_node *phandle;
+
+		phandle = of_parse_phandle(pdev->dev.of_node, "ddc-i2c-bus", 0);
+		if (phandle) {
+			conn->bridge.ddc = of_get_i2c_adapter_by_node(phandle);
+			of_node_put(phandle);
+			if (!conn->bridge.ddc)
+				return -EPROBE_DEFER;
+		} else {
+			dev_dbg(&pdev->dev,
+				"No I2C bus specified, disabling EDID readout\n");
+		}
+	}
+
+	conn->bridge.funcs = &display_connector_bridge_funcs;
+	conn->bridge.of_node = pdev->dev.of_node;
+
+	if (conn->bridge.ddc)
+		conn->bridge.ops |= DRM_BRIDGE_OP_EDID
+				 |  DRM_BRIDGE_OP_DETECT;
+	if (conn->hpd_gpio)
+		conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
+	if (conn->hpd_irq >= 0)
+		conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+	dev_dbg(&pdev->dev,
+		"Found %s display connector '%s' %s DDC bus and %s HPD GPIO (ops 0x%x)\n",
+		drm_get_connector_type_name(conn->bridge.type),
+		label ? label : "<unlabelled>",
+		conn->bridge.ddc ? "with" : "without",
+		conn->hpd_gpio ? "with" : "without",
+		conn->bridge.ops);
+
+	drm_bridge_add(&conn->bridge);
+
+	return 0;
+}
+
+static int display_connector_remove(struct platform_device *pdev)
+{
+	struct display_connector *conn = platform_get_drvdata(pdev);
+
+	drm_bridge_remove(&conn->bridge);
+
+	if (!IS_ERR(conn->bridge.ddc))
+		i2c_put_adapter(conn->bridge.ddc);
+
+	return 0;
+}
+
+static const struct of_device_id display_connector_match[] = {
+	{
+		.compatible = "composite-video-connector",
+		.data = (void *)DRM_MODE_CONNECTOR_Composite,
+	}, {
+		.compatible = "dvi-connector",
+		.data = (void *)DRM_MODE_CONNECTOR_DVII,
+	}, {
+		.compatible = "hdmi-connector",
+		.data = (void *)DRM_MODE_CONNECTOR_HDMIA,
+	}, {
+		.compatible = "svideo-connector",
+		.data = (void *)DRM_MODE_CONNECTOR_SVIDEO,
+	}, {
+		.compatible = "vga-connector",
+		.data = (void *)DRM_MODE_CONNECTOR_VGA,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, display_connector_match);
+
+static struct platform_driver display_connector_driver = {
+	.probe	= display_connector_probe,
+	.remove	= display_connector_remove,
+	.driver		= {
+		.name		= "display-connector",
+		.of_match_table	= display_connector_match,
+	},
+};
+module_platform_driver(display_connector_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Display connector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
deleted file mode 100644
index cc33dc4..0000000
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015-2016 Free Electrons
- * Copyright (C) 2015-2016 NextThing Co
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- */
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-
-struct dumb_vga {
-	struct drm_bridge	bridge;
-	struct drm_connector	connector;
-
-	struct i2c_adapter	*ddc;
-	struct regulator	*vdd;
-};
-
-static inline struct dumb_vga *
-drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
-{
-	return container_of(bridge, struct dumb_vga, bridge);
-}
-
-static inline struct dumb_vga *
-drm_connector_to_dumb_vga(struct drm_connector *connector)
-{
-	return container_of(connector, struct dumb_vga, connector);
-}
-
-static int dumb_vga_get_modes(struct drm_connector *connector)
-{
-	struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
-	struct edid *edid;
-	int ret;
-
-	if (!vga->ddc)
-		goto fallback;
-
-	edid = drm_get_edid(connector, vga->ddc);
-	if (!edid) {
-		DRM_INFO("EDID readout failed, falling back to standard modes\n");
-		goto fallback;
-	}
-
-	drm_connector_update_edid_property(connector, edid);
-	ret = drm_add_edid_modes(connector, edid);
-	kfree(edid);
-	return ret;
-
-fallback:
-	/*
-	 * In case we cannot retrieve the EDIDs (broken or missing i2c
-	 * bus), fallback on the XGA standards
-	 */
-	ret = drm_add_modes_noedid(connector, 1920, 1200);
-
-	/* And prefer a mode pretty much anyone can handle */
-	drm_set_preferred_mode(connector, 1024, 768);
-
-	return ret;
-}
-
-static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
-	.get_modes	= dumb_vga_get_modes,
-};
-
-static enum drm_connector_status
-dumb_vga_connector_detect(struct drm_connector *connector, bool force)
-{
-	struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
-
-	/*
-	 * Even if we have an I2C bus, we can't assume that the cable
-	 * is disconnected if drm_probe_ddc fails. Some cables don't
-	 * wire the DDC pins, or the I2C bus might not be working at
-	 * all.
-	 */
-	if (vga->ddc && drm_probe_ddc(vga->ddc))
-		return connector_status_connected;
-
-	return connector_status_unknown;
-}
-
-static const struct drm_connector_funcs dumb_vga_con_funcs = {
-	.detect			= dumb_vga_connector_detect,
-	.fill_modes		= drm_helper_probe_single_connector_modes,
-	.destroy		= drm_connector_cleanup,
-	.reset			= drm_atomic_helper_connector_reset,
-	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
-	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
-};
-
-static int dumb_vga_attach(struct drm_bridge *bridge)
-{
-	struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-	int ret;
-
-	if (!bridge->encoder) {
-		DRM_ERROR("Missing encoder\n");
-		return -ENODEV;
-	}
-
-	drm_connector_helper_add(&vga->connector,
-				 &dumb_vga_con_helper_funcs);
-	ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
-					  &dumb_vga_con_funcs,
-					  DRM_MODE_CONNECTOR_VGA,
-					  vga->ddc);
-	if (ret) {
-		DRM_ERROR("Failed to initialize connector\n");
-		return ret;
-	}
-
-	drm_connector_attach_encoder(&vga->connector,
-					  bridge->encoder);
-
-	return 0;
-}
-
-static void dumb_vga_enable(struct drm_bridge *bridge)
-{
-	struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-	int ret = 0;
-
-	if (vga->vdd)
-		ret = regulator_enable(vga->vdd);
-
-	if (ret)
-		DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
-}
-
-static void dumb_vga_disable(struct drm_bridge *bridge)
-{
-	struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-
-	if (vga->vdd)
-		regulator_disable(vga->vdd);
-}
-
-static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
-	.attach		= dumb_vga_attach,
-	.enable		= dumb_vga_enable,
-	.disable	= dumb_vga_disable,
-};
-
-static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
-{
-	struct device_node *phandle, *remote;
-	struct i2c_adapter *ddc;
-
-	remote = of_graph_get_remote_node(dev->of_node, 1, -1);
-	if (!remote)
-		return ERR_PTR(-EINVAL);
-
-	phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
-	of_node_put(remote);
-	if (!phandle)
-		return ERR_PTR(-ENODEV);
-
-	ddc = of_get_i2c_adapter_by_node(phandle);
-	of_node_put(phandle);
-	if (!ddc)
-		return ERR_PTR(-EPROBE_DEFER);
-
-	return ddc;
-}
-
-static int dumb_vga_probe(struct platform_device *pdev)
-{
-	struct dumb_vga *vga;
-
-	vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
-	if (!vga)
-		return -ENOMEM;
-	platform_set_drvdata(pdev, vga);
-
-	vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
-	if (IS_ERR(vga->vdd)) {
-		int ret = PTR_ERR(vga->vdd);
-		if (ret == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		vga->vdd = NULL;
-		dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
-	}
-
-	vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
-	if (IS_ERR(vga->ddc)) {
-		if (PTR_ERR(vga->ddc) == -ENODEV) {
-			dev_dbg(&pdev->dev,
-				"No i2c bus specified. Disabling EDID readout\n");
-			vga->ddc = NULL;
-		} else {
-			dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
-			return PTR_ERR(vga->ddc);
-		}
-	}
-
-	vga->bridge.funcs = &dumb_vga_bridge_funcs;
-	vga->bridge.of_node = pdev->dev.of_node;
-	vga->bridge.timings = of_device_get_match_data(&pdev->dev);
-
-	drm_bridge_add(&vga->bridge);
-
-	return 0;
-}
-
-static int dumb_vga_remove(struct platform_device *pdev)
-{
-	struct dumb_vga *vga = platform_get_drvdata(pdev);
-
-	drm_bridge_remove(&vga->bridge);
-
-	if (vga->ddc)
-		i2c_put_adapter(vga->ddc);
-
-	return 0;
-}
-
-/*
- * We assume the ADV7123 DAC is the "default" for historical reasons
- * Information taken from the ADV7123 datasheet, revision D.
- * NOTE: the ADV7123EP seems to have other timings and need a new timings
- * set if used.
- */
-static const struct drm_bridge_timings default_dac_timings = {
-	/* Timing specifications, datasheet page 7 */
-	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
-	.setup_time_ps = 500,
-	.hold_time_ps = 1500,
-};
-
-/*
- * Information taken from the THS8134, THS8134A, THS8134B datasheet named
- * "SLVS205D", dated May 1990, revised March 2000.
- */
-static const struct drm_bridge_timings ti_ths8134_dac_timings = {
-	/* From timing diagram, datasheet page 9 */
-	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
-	/* From datasheet, page 12 */
-	.setup_time_ps = 3000,
-	/* I guess this means latched input */
-	.hold_time_ps = 0,
-};
-
-/*
- * Information taken from the THS8135 datasheet named "SLAS343B", dated
- * May 2001, revised April 2013.
- */
-static const struct drm_bridge_timings ti_ths8135_dac_timings = {
-	/* From timing diagram, datasheet page 14 */
-	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
-	/* From datasheet, page 16 */
-	.setup_time_ps = 2000,
-	.hold_time_ps = 500,
-};
-
-static const struct of_device_id dumb_vga_match[] = {
-	{
-		.compatible = "dumb-vga-dac",
-		.data = NULL,
-	},
-	{
-		.compatible = "adi,adv7123",
-		.data = &default_dac_timings,
-	},
-	{
-		.compatible = "ti,ths8135",
-		.data = &ti_ths8135_dac_timings,
-	},
-	{
-		.compatible = "ti,ths8134",
-		.data = &ti_ths8134_dac_timings,
-	},
-	{},
-};
-MODULE_DEVICE_TABLE(of, dumb_vga_match);
-
-static struct platform_driver dumb_vga_driver = {
-	.probe	= dumb_vga_probe,
-	.remove	= dumb_vga_remove,
-	.driver		= {
-		.name		= "dumb-vga-dac",
-		.of_match_table	= dumb_vga_match,
-	},
-};
-module_platform_driver(dumb_vga_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 5f04cc1..24fb1be 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -21,19 +21,23 @@ struct lvds_codec {
 	u32 connector_type;
 };
 
-static int lvds_codec_attach(struct drm_bridge *bridge)
+static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
 {
-	struct lvds_codec *lvds_codec = container_of(bridge,
-						     struct lvds_codec, bridge);
+	return container_of(bridge, struct lvds_codec, bridge);
+}
+
+static int lvds_codec_attach(struct drm_bridge *bridge,
+			     enum drm_bridge_attach_flags flags)
+{
+	struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
 
 	return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
-				 bridge);
+				 bridge, flags);
 }
 
 static void lvds_codec_enable(struct drm_bridge *bridge)
 {
-	struct lvds_codec *lvds_codec = container_of(bridge,
-						     struct lvds_codec, bridge);
+	struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
 
 	if (lvds_codec->powerdown_gpio)
 		gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -41,14 +45,13 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
 
 static void lvds_codec_disable(struct drm_bridge *bridge)
 {
-	struct lvds_codec *lvds_codec = container_of(bridge,
-						     struct lvds_codec, bridge);
+	struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
 
 	if (lvds_codec->powerdown_gpio)
 		gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
 }
 
-static struct drm_bridge_funcs funcs = {
+static const struct drm_bridge_funcs funcs = {
 	.attach = lvds_codec_attach,
 	.enable = lvds_codec_enable,
 	.disable = lvds_codec_disable,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e8a49f6..6200f12a 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -206,13 +206,19 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
+static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector;
 	struct i2c_client *stdp4028_i2c
 			= ge_b850v3_lvds_ptr->stdp4028_i2c;
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 57ff013..438e566 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -236,11 +236,17 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f66777e..8461ee8 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -53,12 +53,16 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int panel_bridge_attach(struct drm_bridge *bridge)
+static int panel_bridge_attach(struct drm_bridge *bridge,
+			       enum drm_bridge_attach_flags flags)
 {
 	struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
 	struct drm_connector *connector = &panel_bridge->connector;
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+		return 0;
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Missing encoder\n");
 		return -ENODEV;
@@ -120,6 +124,14 @@ static void panel_bridge_post_disable(struct drm_bridge *bridge)
 	drm_panel_unprepare(panel_bridge->panel);
 }
 
+static int panel_bridge_get_modes(struct drm_bridge *bridge,
+				  struct drm_connector *connector)
+{
+	struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+	return drm_panel_get_modes(panel_bridge->panel, connector);
+}
+
 static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
 	.attach = panel_bridge_attach,
 	.detach = panel_bridge_detach,
@@ -127,6 +139,11 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
 	.enable = panel_bridge_enable,
 	.disable = panel_bridge_disable,
 	.post_disable = panel_bridge_post_disable,
+	.get_modes = panel_bridge_get_modes,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
 };
 
 /**
@@ -151,7 +168,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
  * known type. Calling this function with a panel whose connector type is
  * DRM_MODE_CONNECTOR_Unknown will return NULL.
  *
- * See devm_drm_panel_bridge_add() for an automatically manged version of this
+ * See devm_drm_panel_bridge_add() for an automatically managed version of this
  * function.
  */
 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
@@ -196,6 +213,8 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
 #ifdef CONFIG_OF
 	panel_bridge->bridge.of_node = panel->dev->of_node;
 #endif
+	panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
+	panel_bridge->bridge.type = connector_type;
 
 	drm_bridge_add(&panel_bridge->bridge);
 
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 10c47c00..d789ea2 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,11 +476,17 @@ static const struct drm_connector_funcs ps8622_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int ps8622_attach(struct drm_bridge *bridge)
+static int ps8622_attach(struct drm_bridge *bridge,
+			 enum drm_bridge_attach_flags flags)
 {
 	struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	if (!bridge->encoder) {
 		DRM_ERROR("Parent encoder object not found");
 		return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
new file mode 100644
index 0000000..d3a5344
--- /dev/null
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define PAGE2_GPIO_H		0xa7
+#define PS_GPIO9		BIT(1)
+#define PAGE2_I2C_BYPASS	0xea
+#define I2C_BYPASS_EN		0xd0
+#define PAGE2_MCS_EN		0xf3
+#define MCS_EN			BIT(0)
+#define PAGE3_SET_ADD		0xfe
+#define VDO_CTL_ADD		0x13
+#define VDO_DIS			0x18
+#define VDO_EN			0x1c
+#define DP_NUM_LANES		4
+
+/*
+ * PS8640 uses multiple addresses:
+ * page[0]: for DP control
+ * page[1]: for VIDEO Bridge
+ * page[2]: for control top
+ * page[3]: for DSI Link Control1
+ * page[4]: for MIPI Phy
+ * page[5]: for VPLL
+ * page[6]: for DSI Link Control2
+ * page[7]: for SPI ROM mapping
+ */
+enum page_addr_offset {
+	PAGE0_DP_CNTL = 0,
+	PAGE1_VDO_BDG,
+	PAGE2_TOP_CNTL,
+	PAGE3_DSI_CNTL1,
+	PAGE4_MIPI_PHY,
+	PAGE5_VPLL,
+	PAGE6_DSI_CNTL2,
+	PAGE7_SPI_CNTL,
+	MAX_DEVS
+};
+
+enum ps8640_vdo_control {
+	DISABLE = VDO_DIS,
+	ENABLE = VDO_EN,
+};
+
+struct ps8640 {
+	struct drm_bridge bridge;
+	struct drm_bridge *panel_bridge;
+	struct mipi_dsi_device *dsi;
+	struct i2c_client *page[MAX_DEVS];
+	struct regulator_bulk_data supplies[2];
+	struct gpio_desc *gpio_reset;
+	struct gpio_desc *gpio_powerdown;
+};
+
+static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
+{
+	return container_of(e, struct ps8640, bridge);
+}
+
+static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
+				     const enum ps8640_vdo_control ctrl)
+{
+	struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+	u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
+	int ret;
+
+	ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
+					     sizeof(vdo_ctrl_buf),
+					     vdo_ctrl_buf);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+	struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+	struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
+	unsigned long timeout;
+	int ret, status;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
+				    ps_bridge->supplies);
+	if (ret < 0) {
+		DRM_ERROR("cannot enable regulators %d\n", ret);
+		return;
+	}
+
+	gpiod_set_value(ps_bridge->gpio_powerdown, 0);
+	gpiod_set_value(ps_bridge->gpio_reset, 1);
+	usleep_range(2000, 2500);
+	gpiod_set_value(ps_bridge->gpio_reset, 0);
+
+	/*
+	 * Wait for the ps8640 embedded MCU to be ready
+	 * First wait 200ms and then check the MCU ready flag every 20ms
+	 */
+	msleep(200);
+
+	timeout = jiffies + msecs_to_jiffies(200) + 1;
+
+	while (time_is_after_jiffies(timeout)) {
+		status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
+		if (status < 0) {
+			DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
+			goto err_regulators_disable;
+		}
+		if ((status & PS_GPIO9) == PS_GPIO9)
+			break;
+
+		msleep(20);
+	}
+
+	msleep(50);
+
+	/*
+	 * The Manufacturer Command Set (MCS) is a device dependent interface
+	 * intended for factory programming of the display module default
+	 * parameters. Once the display module is configured, the MCS shall be
+	 * disabled by the manufacturer. Once disabled, all MCS commands are
+	 * ignored by the display interface.
+	 */
+	status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
+	if (status < 0) {
+		DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
+		goto err_regulators_disable;
+	}
+
+	ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
+					status & ~MCS_EN);
+	if (ret < 0) {
+		DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
+		goto err_regulators_disable;
+	}
+
+	ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+	if (ret) {
+		DRM_ERROR("failed to enable VDO: %d\n", ret);
+		goto err_regulators_disable;
+	}
+
+	/* Switch access edp panel's edid through i2c */
+	ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
+					I2C_BYPASS_EN);
+	if (ret < 0) {
+		DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
+		goto err_regulators_disable;
+	}
+
+	return;
+
+err_regulators_disable:
+	regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+			       ps_bridge->supplies);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+	struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+	int ret;
+
+	ret = ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+	if (ret < 0)
+		DRM_ERROR("failed to disable VDO: %d\n", ret);
+
+	gpiod_set_value(ps_bridge->gpio_reset, 1);
+	gpiod_set_value(ps_bridge->gpio_powerdown, 1);
+	ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+				     ps_bridge->supplies);
+	if (ret < 0)
+		DRM_ERROR("cannot disable regulators %d\n", ret);
+}
+
+static int ps8640_bridge_attach(struct drm_bridge *bridge,
+				enum drm_bridge_attach_flags flags)
+{
+	struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+	struct device *dev = &ps_bridge->page[0]->dev;
+	struct device_node *in_ep, *dsi_node;
+	struct mipi_dsi_device *dsi;
+	struct mipi_dsi_host *host;
+	int ret;
+	const struct mipi_dsi_device_info info = { .type = "ps8640",
+						   .channel = 0,
+						   .node = NULL,
+						 };
+	/* port@0 is ps8640 dsi input port */
+	in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+	if (!in_ep)
+		return -ENODEV;
+
+	dsi_node = of_graph_get_remote_port_parent(in_ep);
+	of_node_put(in_ep);
+	if (!dsi_node)
+		return -ENODEV;
+
+	host = of_find_mipi_dsi_host_by_node(dsi_node);
+	of_node_put(dsi_node);
+	if (!host)
+		return -ENODEV;
+
+	dsi = mipi_dsi_device_register_full(host, &info);
+	if (IS_ERR(dsi)) {
+		dev_err(dev, "failed to create dsi device\n");
+		ret = PTR_ERR(dsi);
+		return ret;
+	}
+
+	ps_bridge->dsi = dsi;
+
+	dsi->host = host;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+			  MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->lanes = DP_NUM_LANES;
+	ret = mipi_dsi_attach(dsi);
+	if (ret)
+		goto err_dsi_attach;
+
+	/* Attach the panel-bridge to the dsi bridge */
+	return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+				 &ps_bridge->bridge, flags);
+
+err_dsi_attach:
+	mipi_dsi_device_unregister(dsi);
+	return ret;
+}
+
+static const struct drm_bridge_funcs ps8640_bridge_funcs = {
+	.attach = ps8640_bridge_attach,
+	.post_disable = ps8640_post_disable,
+	.pre_enable = ps8640_pre_enable,
+};
+
+static int ps8640_probe(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	struct device_node *np = dev->of_node;
+	struct ps8640 *ps_bridge;
+	struct drm_panel *panel;
+	int ret;
+	u32 i;
+
+	ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
+	if (!ps_bridge)
+		return -ENOMEM;
+
+	/* port@1 is ps8640 output port */
+	ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+	if (ret < 0)
+		return ret;
+	if (!panel)
+		return -ENODEV;
+
+	panel->connector_type = DRM_MODE_CONNECTOR_eDP;
+
+	ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+	if (IS_ERR(ps_bridge->panel_bridge))
+		return PTR_ERR(ps_bridge->panel_bridge);
+
+	ps_bridge->supplies[0].supply = "vdd33";
+	ps_bridge->supplies[1].supply = "vdd12";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+				      ps_bridge->supplies);
+	if (ret)
+		return ret;
+
+	ps_bridge->gpio_powerdown = devm_gpiod_get(&client->dev, "powerdown",
+						   GPIOD_OUT_HIGH);
+	if (IS_ERR(ps_bridge->gpio_powerdown))
+		return PTR_ERR(ps_bridge->gpio_powerdown);
+
+	/*
+	 * Assert the reset to avoid the bridge being initialized prematurely
+	 */
+	ps_bridge->gpio_reset = devm_gpiod_get(&client->dev, "reset",
+					       GPIOD_OUT_HIGH);
+	if (IS_ERR(ps_bridge->gpio_reset))
+		return PTR_ERR(ps_bridge->gpio_reset);
+
+	ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
+	ps_bridge->bridge.of_node = dev->of_node;
+
+	ps_bridge->page[PAGE0_DP_CNTL] = client;
+
+	for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
+		ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
+							     client->adapter,
+							     client->addr + i);
+		if (IS_ERR(ps_bridge->page[i])) {
+			dev_err(dev, "failed i2c dummy device, address %02x\n",
+				client->addr + i);
+			return PTR_ERR(ps_bridge->page[i]);
+		}
+	}
+
+	i2c_set_clientdata(client, ps_bridge);
+
+	drm_bridge_add(&ps_bridge->bridge);
+
+	return 0;
+}
+
+static int ps8640_remove(struct i2c_client *client)
+{
+	struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+
+	drm_bridge_remove(&ps_bridge->bridge);
+
+	return 0;
+}
+
+static const struct of_device_id ps8640_match[] = {
+	{ .compatible = "parade,ps8640" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ps8640_match);
+
+static struct i2c_driver ps8640_driver = {
+	.probe_new = ps8640_probe,
+	.remove = ps8640_remove,
+	.driver = {
+		.name = "ps8640",
+		.of_match_table = ps8640_match,
+	},
+};
+module_i2c_driver(ps8640_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_AUTHOR("CK Hu <ck.hu@mediatek.com>");
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index b70e8c5..6dad025 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -399,12 +399,18 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
 	mutex_unlock(&sii902x->mutex);
 }
 
-static int sii902x_bridge_attach(struct drm_bridge *bridge)
+static int sii902x_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct sii902x *sii902x = bridge_to_sii902x(bridge);
 	struct drm_device *drm = bridge->dev;
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	drm_connector_helper_add(&sii902x->connector,
 				 &sii902x_connector_helper_funcs);
 
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 4c0eef4..92acd33 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2202,7 +2202,8 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
 	return container_of(bridge, struct sii8620, bridge);
 }
 
-static int sii8620_attach(struct drm_bridge *bridge)
+static int sii8620_attach(struct drm_bridge *bridge,
+			  enum drm_bridge_attach_flags flags)
 {
 	struct sii8620 *ctx = bridge_to_sii8620(bridge);
 
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
new file mode 100644
index 0000000..a2dca7a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+struct simple_bridge_info {
+	const struct drm_bridge_timings *timings;
+	unsigned int connector_type;
+};
+
+struct simple_bridge {
+	struct drm_bridge	bridge;
+	struct drm_connector	connector;
+
+	const struct simple_bridge_info *info;
+
+	struct i2c_adapter	*ddc;
+	struct regulator	*vdd;
+	struct gpio_desc	*enable;
+};
+
+static inline struct simple_bridge *
+drm_bridge_to_simple_bridge(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct simple_bridge, bridge);
+}
+
+static inline struct simple_bridge *
+drm_connector_to_simple_bridge(struct drm_connector *connector)
+{
+	return container_of(connector, struct simple_bridge, connector);
+}
+
+static int simple_bridge_get_modes(struct drm_connector *connector)
+{
+	struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+	struct edid *edid;
+	int ret;
+
+	if (!sbridge->ddc)
+		goto fallback;
+
+	edid = drm_get_edid(connector, sbridge->ddc);
+	if (!edid) {
+		DRM_INFO("EDID readout failed, falling back to standard modes\n");
+		goto fallback;
+	}
+
+	drm_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
+	return ret;
+
+fallback:
+	/*
+	 * In case we cannot retrieve the EDIDs (broken or missing i2c
+	 * bus), fallback on the XGA standards
+	 */
+	ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+	/* And prefer a mode pretty much anyone can handle */
+	drm_set_preferred_mode(connector, 1024, 768);
+
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs simple_bridge_con_helper_funcs = {
+	.get_modes	= simple_bridge_get_modes,
+};
+
+static enum drm_connector_status
+simple_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+
+	/*
+	 * Even if we have an I2C bus, we can't assume that the cable
+	 * is disconnected if drm_probe_ddc fails. Some cables don't
+	 * wire the DDC pins, or the I2C bus might not be working at
+	 * all.
+	 */
+	if (sbridge->ddc && drm_probe_ddc(sbridge->ddc))
+		return connector_status_connected;
+
+	return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs simple_bridge_con_funcs = {
+	.detect			= simple_bridge_connector_detect,
+	.fill_modes		= drm_helper_probe_single_connector_modes,
+	.destroy		= drm_connector_cleanup,
+	.reset			= drm_atomic_helper_connector_reset,
+	.atomic_duplicate_state	= drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
+};
+
+static int simple_bridge_attach(struct drm_bridge *bridge,
+				enum drm_bridge_attach_flags flags)
+{
+	struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+	int ret;
+
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
+	if (!bridge->encoder) {
+		DRM_ERROR("Missing encoder\n");
+		return -ENODEV;
+	}
+
+	drm_connector_helper_add(&sbridge->connector,
+				 &simple_bridge_con_helper_funcs);
+	ret = drm_connector_init_with_ddc(bridge->dev, &sbridge->connector,
+					  &simple_bridge_con_funcs,
+					  sbridge->info->connector_type,
+					  sbridge->ddc);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector\n");
+		return ret;
+	}
+
+	drm_connector_attach_encoder(&sbridge->connector,
+					  bridge->encoder);
+
+	return 0;
+}
+
+static void simple_bridge_enable(struct drm_bridge *bridge)
+{
+	struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+	int ret;
+
+	if (sbridge->vdd) {
+		ret = regulator_enable(sbridge->vdd);
+		if (ret)
+			DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+	}
+
+	gpiod_set_value_cansleep(sbridge->enable, 1);
+}
+
+static void simple_bridge_disable(struct drm_bridge *bridge)
+{
+	struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+
+	gpiod_set_value_cansleep(sbridge->enable, 0);
+
+	if (sbridge->vdd)
+		regulator_disable(sbridge->vdd);
+}
+
+static const struct drm_bridge_funcs simple_bridge_bridge_funcs = {
+	.attach		= simple_bridge_attach,
+	.enable		= simple_bridge_enable,
+	.disable	= simple_bridge_disable,
+};
+
+static struct i2c_adapter *simple_bridge_retrieve_ddc(struct device *dev)
+{
+	struct device_node *phandle, *remote;
+	struct i2c_adapter *ddc;
+
+	remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+	if (!remote)
+		return ERR_PTR(-EINVAL);
+
+	phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+	of_node_put(remote);
+	if (!phandle)
+		return ERR_PTR(-ENODEV);
+
+	ddc = of_get_i2c_adapter_by_node(phandle);
+	of_node_put(phandle);
+	if (!ddc)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return ddc;
+}
+
+static int simple_bridge_probe(struct platform_device *pdev)
+{
+	struct simple_bridge *sbridge;
+
+	sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
+	if (!sbridge)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, sbridge);
+
+	sbridge->info = of_device_get_match_data(&pdev->dev);
+
+	sbridge->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+	if (IS_ERR(sbridge->vdd)) {
+		int ret = PTR_ERR(sbridge->vdd);
+		if (ret == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		sbridge->vdd = NULL;
+		dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+	}
+
+	sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
+						  GPIOD_OUT_LOW);
+	if (IS_ERR(sbridge->enable)) {
+		if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
+		return PTR_ERR(sbridge->enable);
+	}
+
+	sbridge->ddc = simple_bridge_retrieve_ddc(&pdev->dev);
+	if (IS_ERR(sbridge->ddc)) {
+		if (PTR_ERR(sbridge->ddc) == -ENODEV) {
+			dev_dbg(&pdev->dev,
+				"No i2c bus specified. Disabling EDID readout\n");
+			sbridge->ddc = NULL;
+		} else {
+			dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+			return PTR_ERR(sbridge->ddc);
+		}
+	}
+
+	sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
+	sbridge->bridge.of_node = pdev->dev.of_node;
+	sbridge->bridge.timings = sbridge->info->timings;
+
+	drm_bridge_add(&sbridge->bridge);
+
+	return 0;
+}
+
+static int simple_bridge_remove(struct platform_device *pdev)
+{
+	struct simple_bridge *sbridge = platform_get_drvdata(pdev);
+
+	drm_bridge_remove(&sbridge->bridge);
+
+	if (sbridge->ddc)
+		i2c_put_adapter(sbridge->ddc);
+
+	return 0;
+}
+
+/*
+ * We assume the ADV7123 DAC is the "default" for historical reasons
+ * Information taken from the ADV7123 datasheet, revision D.
+ * NOTE: the ADV7123EP seems to have other timings and need a new timings
+ * set if used.
+ */
+static const struct drm_bridge_timings default_bridge_timings = {
+	/* Timing specifications, datasheet page 7 */
+	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+	.setup_time_ps = 500,
+	.hold_time_ps = 1500,
+};
+
+/*
+ * Information taken from the THS8134, THS8134A, THS8134B datasheet named
+ * "SLVS205D", dated May 1990, revised March 2000.
+ */
+static const struct drm_bridge_timings ti_ths8134_bridge_timings = {
+	/* From timing diagram, datasheet page 9 */
+	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+	/* From datasheet, page 12 */
+	.setup_time_ps = 3000,
+	/* I guess this means latched input */
+	.hold_time_ps = 0,
+};
+
+/*
+ * Information taken from the THS8135 datasheet named "SLAS343B", dated
+ * May 2001, revised April 2013.
+ */
+static const struct drm_bridge_timings ti_ths8135_bridge_timings = {
+	/* From timing diagram, datasheet page 14 */
+	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+	/* From datasheet, page 16 */
+	.setup_time_ps = 2000,
+	.hold_time_ps = 500,
+};
+
+static const struct of_device_id simple_bridge_match[] = {
+	{
+		.compatible = "dumb-vga-dac",
+		.data = &(const struct simple_bridge_info) {
+			.connector_type = DRM_MODE_CONNECTOR_VGA,
+		},
+	}, {
+		.compatible = "adi,adv7123",
+		.data = &(const struct simple_bridge_info) {
+			.timings = &default_bridge_timings,
+			.connector_type = DRM_MODE_CONNECTOR_VGA,
+		},
+	}, {
+		.compatible = "ti,opa362",
+		.data = &(const struct simple_bridge_info) {
+			.connector_type = DRM_MODE_CONNECTOR_Composite,
+		},
+	}, {
+		.compatible = "ti,ths8135",
+		.data = &(const struct simple_bridge_info) {
+			.timings = &ti_ths8135_bridge_timings,
+			.connector_type = DRM_MODE_CONNECTOR_VGA,
+		},
+	}, {
+		.compatible = "ti,ths8134",
+		.data = &(const struct simple_bridge_info) {
+			.timings = &ti_ths8134_bridge_timings,
+			.connector_type = DRM_MODE_CONNECTOR_VGA,
+		},
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, simple_bridge_match);
+
+static struct platform_driver simple_bridge_driver = {
+	.probe	= simple_bridge_probe,
+	.remove	= simple_bridge_remove,
+	.driver		= {
+		.name		= "simple-bridge",
+		.of_match_table	= simple_bridge_match,
+	},
+};
+module_platform_driver(simple_bridge_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Simple DRM bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 24965e5..383b107 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1820,13 +1820,32 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 	int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
 	unsigned int vdisplay, hdisplay;
 
-	vmode->mtmdsclock = vmode->mpixelclock = mode->clock * 1000;
+	vmode->mpixelclock = mode->clock * 1000;
 
 	dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
 
+	vmode->mtmdsclock = vmode->mpixelclock;
+
+	if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+		switch (hdmi_bus_fmt_color_depth(
+				hdmi->hdmi_data.enc_out_bus_format)) {
+		case 16:
+			vmode->mtmdsclock = vmode->mpixelclock * 2;
+			break;
+		case 12:
+			vmode->mtmdsclock = vmode->mpixelclock * 3 / 2;
+			break;
+		case 10:
+			vmode->mtmdsclock = vmode->mpixelclock * 5 / 4;
+			break;
+		}
+	}
+
 	if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
 		vmode->mtmdsclock /= 2;
 
+	dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock);
+
 	/* Set up HDMI_FC_INVIDCONF */
 	inv_val = (hdmi->hdmi_data.hdcp_enable ||
 		   (dw_hdmi_support_scdc(hdmi) &&
@@ -2084,11 +2103,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 	hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
 	hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
 
-	/* TOFIX: Get input format from plat data or fallback to RGB888 */
 	if (hdmi->plat_data->input_bus_format)
 		hdmi->hdmi_data.enc_in_bus_format =
 			hdmi->plat_data->input_bus_format;
-	else
+	else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
 		hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
 	/* TOFIX: Get input encoding from plat data or fallback to none */
@@ -2098,8 +2116,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 	else
 		hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
 
-	/* TOFIX: Default to RGB888 output format */
-	hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+	if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
+		hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 
 	hdmi->hdmi_data.pix_repet_factor = 0;
 	hdmi->hdmi_data.hdcp_enable = 0;
@@ -2377,7 +2395,279 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
 	.atomic_check = dw_hdmi_connector_atomic_check,
 };
 
-static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
+/*
+ * Possible output formats :
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_YUV16_1X48,
+ * - MEDIA_BUS_FMT_RGB161616_1X48,
+ * - MEDIA_BUS_FMT_UYVY12_1X24,
+ * - MEDIA_BUS_FMT_YUV12_1X36,
+ * - MEDIA_BUS_FMT_RGB121212_1X36,
+ * - MEDIA_BUS_FMT_UYVY10_1X20,
+ * - MEDIA_BUS_FMT_YUV10_1X30,
+ * - MEDIA_BUS_FMT_RGB101010_1X30,
+ * - MEDIA_BUS_FMT_UYVY8_1X16,
+ * - MEDIA_BUS_FMT_YUV8_1X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
+ */
+
+/* Can return a maximum of 11 possible output formats for a mode/connector */
+#define MAX_OUTPUT_SEL_FORMATS	11
+
+static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					unsigned int *num_output_fmts)
+{
+	struct drm_connector *conn = conn_state->connector;
+	struct drm_display_info *info = &conn->display_info;
+	struct drm_display_mode *mode = &crtc_state->mode;
+	u8 max_bpc = conn_state->max_requested_bpc;
+	bool is_hdmi2_sink = info->hdmi.scdc.supported ||
+			     (info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+	u32 *output_fmts;
+	unsigned int i = 0;
+
+	*num_output_fmts = 0;
+
+	output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+			      GFP_KERNEL);
+	if (!output_fmts)
+		return NULL;
+
+	/* If dw-hdmi is the only bridge, avoid negociating with ourselves */
+	if (list_is_singular(&bridge->encoder->bridge_chain)) {
+		*num_output_fmts = 1;
+		output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+		return output_fmts;
+	}
+
+	/*
+	 * If the current mode enforces 4:2:0, force the output but format
+	 * to 4:2:0 and do not add the YUV422/444/RGB formats
+	 */
+	if (conn->ycbcr_420_allowed &&
+	    (drm_mode_is_420_only(info, mode) ||
+	     (is_hdmi2_sink && drm_mode_is_420_also(info, mode)))) {
+
+		/* Order bus formats from 16bit to 8bit if supported */
+		if (max_bpc >= 16 && info->bpc == 16 &&
+		    (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48))
+			output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48;
+
+		if (max_bpc >= 12 && info->bpc >= 12 &&
+		    (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+			output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36;
+
+		if (max_bpc >= 10 && info->bpc >= 10 &&
+		    (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30))
+			output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
+
+		/* Default 8bit fallback */
+		output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
+
+		*num_output_fmts = i;
+
+		return output_fmts;
+	}
+
+	/*
+	 * Order bus formats from 16bit to 8bit and from YUV422 to RGB
+	 * if supported. In any case the default RGB888 format is added
+	 */
+
+	if (max_bpc >= 16 && info->bpc == 16) {
+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+			output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+
+		output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+	}
+
+	if (max_bpc >= 12 && info->bpc >= 12) {
+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+			output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+
+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+			output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+
+		output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+	}
+
+	if (max_bpc >= 10 && info->bpc >= 10) {
+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+			output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+
+		if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+			output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+
+		output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+	}
+
+	if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+		output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+
+	if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+		output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+
+	/* Default 8bit RGB fallback */
+	output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+
+	*num_output_fmts = i;
+
+	return output_fmts;
+}
+
+/*
+ * Possible input formats :
+ * - MEDIA_BUS_FMT_RGB888_1X24
+ * - MEDIA_BUS_FMT_YUV8_1X24
+ * - MEDIA_BUS_FMT_UYVY8_1X16
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24
+ * - MEDIA_BUS_FMT_RGB101010_1X30
+ * - MEDIA_BUS_FMT_YUV10_1X30
+ * - MEDIA_BUS_FMT_UYVY10_1X20
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30
+ * - MEDIA_BUS_FMT_RGB121212_1X36
+ * - MEDIA_BUS_FMT_YUV12_1X36
+ * - MEDIA_BUS_FMT_UYVY12_1X24
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36
+ * - MEDIA_BUS_FMT_RGB161616_1X48
+ * - MEDIA_BUS_FMT_YUV16_1X48
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48
+ */
+
+/* Can return a maximum of 3 possible input formats for an output format */
+#define MAX_INPUT_SEL_FORMATS	3
+
+static u32 *dw_hdmi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					u32 output_fmt,
+					unsigned int *num_input_fmts)
+{
+	u32 *input_fmts;
+	unsigned int i = 0;
+
+	*num_input_fmts = 0;
+
+	input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+			     GFP_KERNEL);
+	if (!input_fmts)
+		return NULL;
+
+	switch (output_fmt) {
+	/* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
+	case MEDIA_BUS_FMT_FIXED:
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+		break;
+	/* 8bit */
+	case MEDIA_BUS_FMT_RGB888_1X24:
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+		break;
+	case MEDIA_BUS_FMT_YUV8_1X24:
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+		break;
+	case MEDIA_BUS_FMT_UYVY8_1X16:
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+		break;
+
+	/* 10bit */
+	case MEDIA_BUS_FMT_RGB101010_1X30:
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+		break;
+	case MEDIA_BUS_FMT_YUV10_1X30:
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+		break;
+	case MEDIA_BUS_FMT_UYVY10_1X20:
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+		break;
+
+	/* 12bit */
+	case MEDIA_BUS_FMT_RGB121212_1X36:
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+		break;
+	case MEDIA_BUS_FMT_YUV12_1X36:
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+		break;
+	case MEDIA_BUS_FMT_UYVY12_1X24:
+		input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+		break;
+
+	/* 16bit */
+	case MEDIA_BUS_FMT_RGB161616_1X48:
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+		break;
+	case MEDIA_BUS_FMT_YUV16_1X48:
+		input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+		input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+		break;
+
+	/*YUV 4:2:0 */
+	case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+	case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+	case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+	case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+		input_fmts[i++] = output_fmt;
+		break;
+	}
+
+	*num_input_fmts = i;
+
+	if (*num_input_fmts == 0) {
+		kfree(input_fmts);
+		input_fmts = NULL;
+	}
+
+	return input_fmts;
+}
+
+static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
+				       struct drm_bridge_state *bridge_state,
+				       struct drm_crtc_state *crtc_state,
+				       struct drm_connector_state *conn_state)
+{
+	struct dw_hdmi *hdmi = bridge->driver_private;
+
+	hdmi->hdmi_data.enc_out_bus_format =
+			bridge_state->output_bus_cfg.format;
+
+	hdmi->hdmi_data.enc_in_bus_format =
+			bridge_state->input_bus_cfg.format;
+
+	dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
+		bridge_state->input_bus_cfg.format,
+		bridge_state->output_bus_cfg.format);
+
+	return 0;
+}
+
+static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 	struct drm_encoder *encoder = bridge->encoder;
@@ -2385,6 +2675,11 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
 	struct cec_connector_info conn_info;
 	struct cec_notifier *notifier;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	connector->interlace_allowed = 1;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
@@ -2395,6 +2690,14 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
 				    DRM_MODE_CONNECTOR_HDMIA,
 				    hdmi->ddc);
 
+	/*
+	 * drm_connector_attach_max_bpc_property() requires the
+	 * connector to have a state.
+	 */
+	drm_atomic_helper_connector_reset(connector);
+
+	drm_connector_attach_max_bpc_property(connector, 8, 16);
+
 	if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
 		drm_object_attach_property(&connector->base,
 			connector->dev->mode_config.hdr_output_metadata_property, 0);
@@ -2479,8 +2782,14 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
 }
 
 static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
 	.attach = dw_hdmi_bridge_attach,
 	.detach = dw_hdmi_bridge_detach,
+	.atomic_check = dw_hdmi_bridge_atomic_check,
+	.atomic_get_output_bus_fmts = dw_hdmi_bridge_atomic_get_output_bus_fmts,
+	.atomic_get_input_bus_fmts = dw_hdmi_bridge_atomic_get_input_bus_fmts,
 	.enable = dw_hdmi_bridge_enable,
 	.disable = dw_hdmi_bridge_disable,
 	.mode_set = dw_hdmi_bridge_mode_set,
@@ -2949,6 +3258,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
 	hdmi->bridge.of_node = pdev->dev.of_node;
 #endif
 
+	if (hdmi->version >= 0x200a)
+		hdmi->connector.ycbcr_420_allowed =
+			hdmi->plat_data->ycbcr_420_allowed;
+	else
+		hdmi->connector.ycbcr_420_allowed = false;
+
 	memset(&pdevinfo, 0, sizeof(pdevinfo));
 	pdevinfo.parent = dev;
 	pdevinfo.id = PLATFORM_DEVID_AUTO;
@@ -3082,7 +3397,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
 	if (IS_ERR(hdmi))
 		return hdmi;
 
-	ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
+	ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
 	if (ret) {
 		dw_hdmi_remove(hdmi);
 		DRM_ERROR("Failed to initialize bridge with drm\n");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b18351b..5ef0f15 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -824,7 +824,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
 	 * This needs to be fixed in the drm_bridge framework and the API
 	 * needs to be updated to manage our own call chains...
 	 */
-	dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+	if (dsi->panel_bridge->funcs->post_disable)
+		dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
 
 	if (phy_ops->power_off)
 		phy_ops->power_off(dsi->plat_data->priv_data);
@@ -935,7 +936,8 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
 	return mode_status;
 }
 
-static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+				     enum drm_bridge_attach_flags flags)
 {
 	struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
 
@@ -948,7 +950,8 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
 	bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
 
 	/* Attach the panel-bridge to the dsi bridge */
-	return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+	return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+				 flags);
 }
 
 static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
@@ -1119,7 +1122,7 @@ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
 {
 	int ret;
 
-	ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+	ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
 	if (ret) {
 		DRM_ERROR("Failed to initialize bridge with drm\n");
 		return ret;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 96207fc..5ac1430 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -349,12 +349,18 @@ static void tc358764_enable(struct drm_bridge *bridge)
 		dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
 }
 
-static int tc358764_attach(struct drm_bridge *bridge)
+static int tc358764_attach(struct drm_bridge *bridge,
+			   enum drm_bridge_attach_flags flags)
 {
 	struct tc358764 *ctx = bridge_to_tc358764(bridge);
 	struct drm_device *drm = bridge->dev;
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
 	ret = drm_connector_init(drm, &ctx->connector,
 				 &tc358764_connector_funcs,
@@ -369,7 +375,6 @@ static int tc358764_attach(struct drm_bridge *bridge)
 	drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
 	drm_panel_attach(ctx->panel, &ctx->connector);
 	ctx->connector.funcs->reset(&ctx->connector);
-	drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
 	drm_connector_register(&ctx->connector);
 
 	return 0;
@@ -378,10 +383,8 @@ static int tc358764_attach(struct drm_bridge *bridge)
 static void tc358764_detach(struct drm_bridge *bridge)
 {
 	struct tc358764 *ctx = bridge_to_tc358764(bridge);
-	struct drm_device *drm = bridge->dev;
 
 	drm_connector_unregister(&ctx->connector);
-	drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
 	drm_panel_detach(ctx->panel);
 	ctx->panel = NULL;
 	drm_connector_put(&ctx->connector);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index fbdb42d..e4c0ea0 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -31,6 +31,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
 
 /* Registers */
@@ -1403,13 +1404,19 @@ static const struct drm_connector_funcs tc_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static int tc_bridge_attach(struct drm_bridge *bridge)
+static int tc_bridge_attach(struct drm_bridge *bridge,
+			    enum drm_bridge_attach_flags flags)
 {
 	u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
 	struct tc_data *tc = bridge_to_tc(bridge);
 	struct drm_device *drm = bridge->dev;
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	/* Create DP/eDP connector */
 	drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
 	ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
new file mode 100644
index 0000000..1b39e8d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* Global (16-bit addressable) */
+#define TC358768_CHIPID			0x0000
+#define TC358768_SYSCTL			0x0002
+#define TC358768_CONFCTL		0x0004
+#define TC358768_VSDLY			0x0006
+#define TC358768_DATAFMT		0x0008
+#define TC358768_GPIOEN			0x000E
+#define TC358768_GPIODIR		0x0010
+#define TC358768_GPIOIN			0x0012
+#define TC358768_GPIOOUT		0x0014
+#define TC358768_PLLCTL0		0x0016
+#define TC358768_PLLCTL1		0x0018
+#define TC358768_CMDBYTE		0x0022
+#define TC358768_PP_MISC		0x0032
+#define TC358768_DSITX_DT		0x0050
+#define TC358768_FIFOSTATUS		0x00F8
+
+/* Debug (16-bit addressable) */
+#define TC358768_VBUFCTRL		0x00E0
+#define TC358768_DBG_WIDTH		0x00E2
+#define TC358768_DBG_VBLANK		0x00E4
+#define TC358768_DBG_DATA		0x00E8
+
+/* TX PHY (32-bit addressable) */
+#define TC358768_CLW_DPHYCONTTX		0x0100
+#define TC358768_D0W_DPHYCONTTX		0x0104
+#define TC358768_D1W_DPHYCONTTX		0x0108
+#define TC358768_D2W_DPHYCONTTX		0x010C
+#define TC358768_D3W_DPHYCONTTX		0x0110
+#define TC358768_CLW_CNTRL		0x0140
+#define TC358768_D0W_CNTRL		0x0144
+#define TC358768_D1W_CNTRL		0x0148
+#define TC358768_D2W_CNTRL		0x014C
+#define TC358768_D3W_CNTRL		0x0150
+
+/* TX PPI (32-bit addressable) */
+#define TC358768_STARTCNTRL		0x0204
+#define TC358768_DSITXSTATUS		0x0208
+#define TC358768_LINEINITCNT		0x0210
+#define TC358768_LPTXTIMECNT		0x0214
+#define TC358768_TCLK_HEADERCNT		0x0218
+#define TC358768_TCLK_TRAILCNT		0x021C
+#define TC358768_THS_HEADERCNT		0x0220
+#define TC358768_TWAKEUP		0x0224
+#define TC358768_TCLK_POSTCNT		0x0228
+#define TC358768_THS_TRAILCNT		0x022C
+#define TC358768_HSTXVREGCNT		0x0230
+#define TC358768_HSTXVREGEN		0x0234
+#define TC358768_TXOPTIONCNTRL		0x0238
+#define TC358768_BTACNTRL1		0x023C
+
+/* TX CTRL (32-bit addressable) */
+#define TC358768_DSI_CONTROL		0x040C
+#define TC358768_DSI_STATUS		0x0410
+#define TC358768_DSI_INT		0x0414
+#define TC358768_DSI_INT_ENA		0x0418
+#define TC358768_DSICMD_RDFIFO		0x0430
+#define TC358768_DSI_ACKERR		0x0434
+#define TC358768_DSI_ACKERR_INTENA	0x0438
+#define TC358768_DSI_ACKERR_HALT	0x043c
+#define TC358768_DSI_RXERR		0x0440
+#define TC358768_DSI_RXERR_INTENA	0x0444
+#define TC358768_DSI_RXERR_HALT		0x0448
+#define TC358768_DSI_ERR		0x044C
+#define TC358768_DSI_ERR_INTENA		0x0450
+#define TC358768_DSI_ERR_HALT		0x0454
+#define TC358768_DSI_CONFW		0x0500
+#define TC358768_DSI_LPCMD		0x0500
+#define TC358768_DSI_RESET		0x0504
+#define TC358768_DSI_INT_CLR		0x050C
+#define TC358768_DSI_START		0x0518
+
+/* DSITX CTRL (16-bit addressable) */
+#define TC358768_DSICMD_TX		0x0600
+#define TC358768_DSICMD_TYPE		0x0602
+#define TC358768_DSICMD_WC		0x0604
+#define TC358768_DSICMD_WD0		0x0610
+#define TC358768_DSICMD_WD1		0x0612
+#define TC358768_DSICMD_WD2		0x0614
+#define TC358768_DSICMD_WD3		0x0616
+#define TC358768_DSI_EVENT		0x0620
+#define TC358768_DSI_VSW		0x0622
+#define TC358768_DSI_VBPR		0x0624
+#define TC358768_DSI_VACT		0x0626
+#define TC358768_DSI_HSW		0x0628
+#define TC358768_DSI_HBPR		0x062A
+#define TC358768_DSI_HACT		0x062C
+
+/* TC358768_DSI_CONTROL (0x040C) register */
+#define TC358768_DSI_CONTROL_DIS_MODE	BIT(15)
+#define TC358768_DSI_CONTROL_TXMD	BIT(7)
+#define TC358768_DSI_CONTROL_HSCKMD	BIT(5)
+#define TC358768_DSI_CONTROL_EOTDIS	BIT(0)
+
+/* TC358768_DSI_CONFW (0x0500) register */
+#define TC358768_DSI_CONFW_MODE_SET	(5 << 29)
+#define TC358768_DSI_CONFW_MODE_CLR	(6 << 29)
+#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL	(0x3 << 24)
+
+static const char * const tc358768_supplies[] = {
+	"vddc", "vddmipi", "vddio"
+};
+
+struct tc358768_dsi_output {
+	struct mipi_dsi_device *dev;
+	struct drm_panel *panel;
+	struct drm_bridge *bridge;
+};
+
+struct tc358768_priv {
+	struct device *dev;
+	struct regmap *regmap;
+	struct gpio_desc *reset_gpio;
+	struct regulator_bulk_data supplies[ARRAY_SIZE(tc358768_supplies)];
+	struct clk *refclk;
+	int enabled;
+	int error;
+
+	struct mipi_dsi_host dsi_host;
+	struct drm_bridge bridge;
+	struct tc358768_dsi_output output;
+
+	u32 pd_lines; /* number of Parallel Port Input Data Lines */
+	u32 dsi_lanes; /* number of DSI Lanes */
+
+	/* Parameters for PLL programming */
+	u32 fbd;	/* PLL feedback divider */
+	u32 prd;	/* PLL input divider */
+	u32 frs;	/* PLL Freqency range for HSCK (post divider) */
+
+	u32 dsiclk;	/* pll_clk / 2 */
+};
+
+static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+							 *host)
+{
+	return container_of(host, struct tc358768_priv, dsi_host);
+}
+
+static inline struct tc358768_priv *bridge_to_tc358768(struct drm_bridge
+						       *bridge)
+{
+	return container_of(bridge, struct tc358768_priv, bridge);
+}
+
+static int tc358768_clear_error(struct tc358768_priv *priv)
+{
+	int ret = priv->error;
+
+	priv->error = 0;
+	return ret;
+}
+
+static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
+{
+	size_t count = 2;
+
+	if (priv->error)
+		return;
+
+	/* 16-bit register? */
+	if (reg < 0x100 || reg >= 0x600)
+		count = 1;
+
+	priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+}
+
+static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
+{
+	size_t count = 2;
+
+	if (priv->error)
+		return;
+
+	/* 16-bit register? */
+	if (reg < 0x100 || reg >= 0x600) {
+		*val = 0;
+		count = 1;
+	}
+
+	priv->error = regmap_bulk_read(priv->regmap, reg, val, count);
+}
+
+static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+				 u32 val)
+{
+	u32 tmp, orig;
+
+	tc358768_read(priv, reg, &orig);
+	tmp = orig & ~mask;
+	tmp |= val & mask;
+	if (tmp != orig)
+		tc358768_write(priv, reg, tmp);
+}
+
+static int tc358768_sw_reset(struct tc358768_priv *priv)
+{
+	/* Assert Reset */
+	tc358768_write(priv, TC358768_SYSCTL, 1);
+	/* Release Reset, Exit Sleep */
+	tc358768_write(priv, TC358768_SYSCTL, 0);
+
+	return tc358768_clear_error(priv);
+}
+
+static void tc358768_hw_enable(struct tc358768_priv *priv)
+{
+	int ret;
+
+	if (priv->enabled)
+		return;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+	if (ret < 0)
+		dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+	if (priv->reset_gpio)
+		usleep_range(200, 300);
+
+	/*
+	 * The RESX is active low (GPIO_ACTIVE_LOW).
+	 * DEASSERT (value = 0) the reset_gpio to enable the chip
+	 */
+	gpiod_set_value_cansleep(priv->reset_gpio, 0);
+
+	/* wait for encoder clocks to stabilize */
+	usleep_range(1000, 2000);
+
+	priv->enabled = true;
+}
+
+static void tc358768_hw_disable(struct tc358768_priv *priv)
+{
+	int ret;
+
+	if (!priv->enabled)
+		return;
+
+	/*
+	 * The RESX is active low (GPIO_ACTIVE_LOW).
+	 * ASSERT (value = 1) the reset_gpio to disable the chip
+	 */
+	gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+				     priv->supplies);
+	if (ret < 0)
+		dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+	priv->enabled = false;
+}
+
+static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+{
+	return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
+}
+
+static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+{
+	return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
+}
+
+static int tc358768_calc_pll(struct tc358768_priv *priv,
+			     const struct drm_display_mode *mode,
+			     bool verify_only)
+{
+	const u32 frs_limits[] = {
+		1000000000,
+		500000000,
+		250000000,
+		125000000,
+		62500000
+	};
+	unsigned long refclk;
+	u32 prd, target_pll, i, max_pll, min_pll;
+	u32 frs, best_diff, best_pll, best_prd, best_fbd;
+
+	target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
+
+	/* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
+
+	for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
+		if (target_pll >= frs_limits[i])
+			break;
+
+	if (i == ARRAY_SIZE(frs_limits) || i == 0)
+		return -EINVAL;
+
+	frs = i - 1;
+	max_pll = frs_limits[i - 1];
+	min_pll = frs_limits[i];
+
+	refclk = clk_get_rate(priv->refclk);
+
+	best_diff = UINT_MAX;
+	best_pll = 0;
+	best_prd = 0;
+	best_fbd = 0;
+
+	for (prd = 0; prd < 16; ++prd) {
+		u32 divisor = (prd + 1) * (1 << frs);
+		u32 fbd;
+
+		for (fbd = 0; fbd < 512; ++fbd) {
+			u32 pll, diff;
+
+			pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+
+			if (pll >= max_pll || pll < min_pll)
+				continue;
+
+			diff = max(pll, target_pll) - min(pll, target_pll);
+
+			if (diff < best_diff) {
+				best_diff = diff;
+				best_pll = pll;
+				best_prd = prd;
+				best_fbd = fbd;
+
+				if (best_diff == 0)
+					goto found;
+			}
+		}
+	}
+
+	if (best_diff == UINT_MAX) {
+		dev_err(priv->dev, "could not find suitable PLL setup\n");
+		return -EINVAL;
+	}
+
+found:
+	if (verify_only)
+		return 0;
+
+	priv->fbd = best_fbd;
+	priv->prd = best_prd;
+	priv->frs = frs;
+	priv->dsiclk = best_pll / 2;
+
+	return 0;
+}
+
+static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+				    struct mipi_dsi_device *dev)
+{
+	struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+	struct drm_bridge *bridge;
+	struct drm_panel *panel;
+	struct device_node *ep;
+	int ret;
+
+	if (dev->lanes > 4) {
+		dev_err(priv->dev, "unsupported number of data lanes(%u)\n",
+			dev->lanes);
+		return -EINVAL;
+	}
+
+	/*
+	 * tc358768 supports both Video and Pulse mode, but the driver only
+	 * implements Video (event) mode currently
+	 */
+	if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+		dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+		return -ENOTSUPP;
+	}
+
+	/*
+	 * tc358768 supports RGB888, RGB666, RGB666_PACKED and RGB565, but only
+	 * RGB888 is verified.
+	 */
+	if (dev->format != MIPI_DSI_FMT_RGB888) {
+		dev_warn(priv->dev, "Only MIPI_DSI_FMT_RGB888 tested!\n");
+		return -ENOTSUPP;
+	}
+
+	ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel,
+					  &bridge);
+	if (ret)
+		return ret;
+
+	if (panel) {
+		bridge = drm_panel_bridge_add_typed(panel,
+						    DRM_MODE_CONNECTOR_DSI);
+		if (IS_ERR(bridge))
+			return PTR_ERR(bridge);
+	}
+
+	priv->output.dev = dev;
+	priv->output.bridge = bridge;
+	priv->output.panel = panel;
+
+	priv->dsi_lanes = dev->lanes;
+
+	/* get input ep (port0/endpoint0) */
+	ret = -EINVAL;
+	ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+	if (ep) {
+		ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+
+		of_node_put(ep);
+	}
+
+	if (ret)
+		priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+	drm_bridge_add(&priv->bridge);
+
+	return 0;
+}
+
+static int tc358768_dsi_host_detach(struct mipi_dsi_host *host,
+				    struct mipi_dsi_device *dev)
+{
+	struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+
+	drm_bridge_remove(&priv->bridge);
+	if (priv->output.panel)
+		drm_panel_bridge_remove(priv->output.bridge);
+
+	return 0;
+}
+
+static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
+					  const struct mipi_dsi_msg *msg)
+{
+	struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+	struct mipi_dsi_packet packet;
+	int ret;
+
+	if (!priv->enabled) {
+		dev_err(priv->dev, "Bridge is not enabled\n");
+		return -ENODEV;
+	}
+
+	if (msg->rx_len) {
+		dev_warn(priv->dev, "MIPI rx is not supported\n");
+		return -ENOTSUPP;
+	}
+
+	if (msg->tx_len > 8) {
+		dev_warn(priv->dev, "Maximum 8 byte MIPI tx is supported\n");
+		return -ENOTSUPP;
+	}
+
+	ret = mipi_dsi_create_packet(&packet, msg);
+	if (ret)
+		return ret;
+
+	if (mipi_dsi_packet_format_is_short(msg->type)) {
+		tc358768_write(priv, TC358768_DSICMD_TYPE,
+			       (0x10 << 8) | (packet.header[0] & 0x3f));
+		tc358768_write(priv, TC358768_DSICMD_WC, 0);
+		tc358768_write(priv, TC358768_DSICMD_WD0,
+			       (packet.header[2] << 8) | packet.header[1]);
+	} else {
+		int i;
+
+		tc358768_write(priv, TC358768_DSICMD_TYPE,
+			       (0x40 << 8) | (packet.header[0] & 0x3f));
+		tc358768_write(priv, TC358768_DSICMD_WC, packet.payload_length);
+		for (i = 0; i < packet.payload_length; i += 2) {
+			u16 val = packet.payload[i];
+
+			if (i + 1 < packet.payload_length)
+				val |= packet.payload[i + 1] << 8;
+
+			tc358768_write(priv, TC358768_DSICMD_WD0 + i, val);
+		}
+	}
+
+	/* start transfer */
+	tc358768_write(priv, TC358768_DSICMD_TX, 1);
+
+	ret = tc358768_clear_error(priv);
+	if (ret)
+		dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+	else
+		ret = packet.size;
+
+	return ret;
+}
+
+static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
+	.attach = tc358768_dsi_host_attach,
+	.detach = tc358768_dsi_host_detach,
+	.transfer = tc358768_dsi_host_transfer,
+};
+
+static int tc358768_bridge_attach(struct drm_bridge *bridge,
+				  enum drm_bridge_attach_flags flags)
+{
+	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+	if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+		dev_err(priv->dev, "needs atomic updates support\n");
+		return -ENOTSUPP;
+	}
+
+	return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+				 flags);
+}
+
+static enum drm_mode_status
+tc358768_bridge_mode_valid(struct drm_bridge *bridge,
+			   const struct drm_display_mode *mode)
+{
+	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+	if (tc358768_calc_pll(priv, mode, true))
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static void tc358768_bridge_disable(struct drm_bridge *bridge)
+{
+	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+	int ret;
+
+	/* set FrmStop */
+	tc358768_update_bits(priv, TC358768_PP_MISC, BIT(15), BIT(15));
+
+	/* wait at least for one frame */
+	msleep(50);
+
+	/* clear PP_en */
+	tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), 0);
+
+	/* set RstPtr */
+	tc358768_update_bits(priv, TC358768_PP_MISC, BIT(14), BIT(14));
+
+	ret = tc358768_clear_error(priv);
+	if (ret)
+		dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+}
+
+static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+{
+	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+	tc358768_hw_disable(priv);
+}
+
+static int tc358768_setup_pll(struct tc358768_priv *priv,
+			      const struct drm_display_mode *mode)
+{
+	u32 fbd, prd, frs;
+	int ret;
+
+	ret = tc358768_calc_pll(priv, mode, false);
+	if (ret) {
+		dev_err(priv->dev, "PLL calculation failed: %d\n", ret);
+		return ret;
+	}
+
+	fbd = priv->fbd;
+	prd = priv->prd;
+	frs = priv->frs;
+
+	dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+		clk_get_rate(priv->refclk), fbd, prd, frs);
+	dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
+		priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+	dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+		tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+		mode->clock * 1000);
+
+	/* PRD[15:12] FBD[8:0] */
+	tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
+
+	/* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
+	tc358768_write(priv, TC358768_PLLCTL1,
+		       (frs << 10) | (0x2 << 8) | BIT(1) | BIT(0));
+
+	/* wait for lock */
+	usleep_range(1000, 2000);
+
+	/* FRS[11:10] LBWS[9:8] CKEN[4] PLL_CKEN[4] RESETB[1] EN[0] */
+	tc358768_write(priv, TC358768_PLLCTL1,
+		       (frs << 10) | (0x2 << 8) | BIT(4) | BIT(1) | BIT(0));
+
+	return tc358768_clear_error(priv);
+}
+
+#define TC358768_PRECISION	1000
+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
+{
+	return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
+}
+
+static u32 tc358768_to_ns(u32 nsk)
+{
+	return (nsk / TC358768_PRECISION);
+}
+
+static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+{
+	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+	struct mipi_dsi_device *dsi_dev = priv->output.dev;
+	u32 val, val2, lptxcnt, hact, data_type;
+	const struct drm_display_mode *mode;
+	u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+	u32 dsiclk, dsibclk;
+	int ret, i;
+
+	tc358768_hw_enable(priv);
+
+	ret = tc358768_sw_reset(priv);
+	if (ret) {
+		dev_err(priv->dev, "Software reset failed: %d\n", ret);
+		tc358768_hw_disable(priv);
+		return;
+	}
+
+	mode = &bridge->encoder->crtc->state->adjusted_mode;
+	ret = tc358768_setup_pll(priv, mode);
+	if (ret) {
+		dev_err(priv->dev, "PLL setup failed: %d\n", ret);
+		tc358768_hw_disable(priv);
+		return;
+	}
+
+	dsiclk = priv->dsiclk;
+	dsibclk = dsiclk / 4;
+
+	/* Data Format Control Register */
+	val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+	switch (dsi_dev->format) {
+	case MIPI_DSI_FMT_RGB888:
+		val |= (0x3 << 4);
+		hact = mode->hdisplay * 3;
+		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		val |= (0x4 << 4);
+		hact = mode->hdisplay * 3;
+		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+		break;
+
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		val |= (0x4 << 4) | BIT(3);
+		hact = mode->hdisplay * 18 / 8;
+		data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+		break;
+
+	case MIPI_DSI_FMT_RGB565:
+		val |= (0x5 << 4);
+		hact = mode->hdisplay * 2;
+		data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+		break;
+	default:
+		dev_err(priv->dev, "Invalid data format (%u)\n",
+			dsi_dev->format);
+		tc358768_hw_disable(priv);
+		return;
+	}
+
+	/* VSDly[9:0] */
+	tc358768_write(priv, TC358768_VSDLY, 1);
+
+	tc358768_write(priv, TC358768_DATAFMT, val);
+	tc358768_write(priv, TC358768_DSITX_DT, data_type);
+
+	/* Enable D-PHY (HiZ->LP11) */
+	tc358768_write(priv, TC358768_CLW_CNTRL, 0x0000);
+	/* Enable lanes */
+	for (i = 0; i < dsi_dev->lanes; i++)
+		tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+	/* DSI Timings */
+	dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+				  dsibclk);
+	dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+	ui_nsk = dsiclk_nsk / 2;
+	phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+	dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+	dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+	dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+	dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
+
+	/* LP11 > 100us for D-PHY Rx Init */
+	val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+	dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+	/* LPTimeCnt > 50ns */
+	val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
+	lptxcnt = val;
+	dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+	/* 38ns < TCLK_PREPARE < 95ns */
+	val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+	/* TCLK_PREPARE > 300ns */
+	val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+				  dsibclk_nsk);
+	val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
+	dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+	/* TCLK_TRAIL > 60ns + 3*UI */
+	val = 60 + tc358768_to_ns(3 * ui_nsk);
+	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
+	dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+	/* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+	val = 50 + tc358768_to_ns(4 * ui_nsk);
+	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+	/* THS_ZERO > 145ns + 10*UI */
+	val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+	val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
+	dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+	/* TWAKEUP > 1ms in lptxcnt steps */
+	val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
+	val = val / (lptxcnt + 1) - 1;
+	dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
+	tc358768_write(priv, TC358768_TWAKEUP, val);
+
+	/* TCLK_POSTCNT > 60ns + 52*UI */
+	val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+				 dsibclk_nsk) - 3;
+	dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+	/* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+	val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+				 dsibclk_nsk) - 5;
+	dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+	tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+	val = BIT(0);
+	for (i = 0; i < dsi_dev->lanes; i++)
+		val |= BIT(i + 1);
+	tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+	if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+		tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
+
+	/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+	val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+	val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+	val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+				  dsibclk_nsk) - 2;
+	val |= val2 << 16;
+	dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+	tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+	/* START[0] */
+	tc358768_write(priv, TC358768_STARTCNTRL, 1);
+
+	/* Set event mode */
+	tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+	/* vsw (+ vbp) */
+	tc358768_write(priv, TC358768_DSI_VSW,
+		       mode->vtotal - mode->vsync_start);
+	/* vbp (not used in event mode) */
+	tc358768_write(priv, TC358768_DSI_VBPR, 0);
+	/* vact */
+	tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+	/* (hsw + hbp) * byteclk * ndl / pclk */
+	val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+			   ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+			   mode->clock * 1000);
+	tc358768_write(priv, TC358768_DSI_HSW, val);
+	/* hbp (not used in event mode) */
+	tc358768_write(priv, TC358768_DSI_HBPR, 0);
+	/* hact (bytes) */
+	tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+	/* VSYNC polarity */
+	if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+		tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
+	/* HSYNC polarity */
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
+
+	/* Start DSI Tx */
+	tc358768_write(priv, TC358768_DSI_START, 0x1);
+
+	/* Configure DSI_Control register */
+	val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+	val |= TC358768_DSI_CONTROL_TXMD | TC358768_DSI_CONTROL_HSCKMD |
+	       0x3 << 1 | TC358768_DSI_CONTROL_EOTDIS;
+	tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+	val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+	val |= (dsi_dev->lanes - 1) << 1;
+
+	if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+		val |= TC358768_DSI_CONTROL_TXMD;
+
+	if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+		val |= TC358768_DSI_CONTROL_HSCKMD;
+
+	if (dsi_dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
+		val |= TC358768_DSI_CONTROL_EOTDIS;
+
+	tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+	val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+	val |= TC358768_DSI_CONTROL_DIS_MODE; /* DSI mode */
+	tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+	ret = tc358768_clear_error(priv);
+	if (ret) {
+		dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
+		tc358768_bridge_disable(bridge);
+		tc358768_bridge_post_disable(bridge);
+	}
+}
+
+static void tc358768_bridge_enable(struct drm_bridge *bridge)
+{
+	struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+	int ret;
+
+	if (!priv->enabled) {
+		dev_err(priv->dev, "Bridge is not enabled\n");
+		return;
+	}
+
+	/* clear FrmStop and RstPtr */
+	tc358768_update_bits(priv, TC358768_PP_MISC, 0x3 << 14, 0);
+
+	/* set PP_en */
+	tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
+
+	ret = tc358768_clear_error(priv);
+	if (ret) {
+		dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
+		tc358768_bridge_disable(bridge);
+		tc358768_bridge_post_disable(bridge);
+	}
+}
+
+static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+	.attach = tc358768_bridge_attach,
+	.mode_valid = tc358768_bridge_mode_valid,
+	.pre_enable = tc358768_bridge_pre_enable,
+	.enable = tc358768_bridge_enable,
+	.disable = tc358768_bridge_disable,
+	.post_disable = tc358768_bridge_post_disable,
+};
+
+static const struct drm_bridge_timings default_tc358768_timings = {
+	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+		 | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+		 | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static bool tc358768_is_reserved_reg(unsigned int reg)
+{
+	switch (reg) {
+	case 0x114 ... 0x13f:
+	case 0x200:
+	case 0x20c:
+	case 0x400 ... 0x408:
+	case 0x41c ... 0x42f:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool tc358768_writeable_reg(struct device *dev, unsigned int reg)
+{
+	if (tc358768_is_reserved_reg(reg))
+		return false;
+
+	switch (reg) {
+	case TC358768_CHIPID:
+	case TC358768_FIFOSTATUS:
+	case TC358768_DSITXSTATUS ... (TC358768_DSITXSTATUS + 2):
+	case TC358768_DSI_CONTROL ... (TC358768_DSI_INT_ENA + 2):
+	case TC358768_DSICMD_RDFIFO ... (TC358768_DSI_ERR_HALT + 2):
+		return false;
+	default:
+		return true;
+	}
+}
+
+static bool tc358768_readable_reg(struct device *dev, unsigned int reg)
+{
+	if (tc358768_is_reserved_reg(reg))
+		return false;
+
+	switch (reg) {
+	case TC358768_STARTCNTRL:
+	case TC358768_DSI_CONFW ... (TC358768_DSI_CONFW + 2):
+	case TC358768_DSI_INT_CLR ... (TC358768_DSI_INT_CLR + 2):
+	case TC358768_DSI_START ... (TC358768_DSI_START + 2):
+	case TC358768_DBG_DATA:
+		return false;
+	default:
+		return true;
+	}
+}
+
+static const struct regmap_config tc358768_regmap_config = {
+	.name = "tc358768",
+	.reg_bits = 16,
+	.val_bits = 16,
+	.max_register = TC358768_DSI_HACT,
+	.cache_type = REGCACHE_NONE,
+	.writeable_reg = tc358768_writeable_reg,
+	.readable_reg = tc358768_readable_reg,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct i2c_device_id tc358768_i2c_ids[] = {
+	{ "tc358768", 0 },
+	{ "tc358778", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
+
+static const struct of_device_id tc358768_of_ids[] = {
+	{ .compatible = "toshiba,tc358768", },
+	{ .compatible = "toshiba,tc358778", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tc358768_of_ids);
+
+static int tc358768_get_regulators(struct tc358768_priv *priv)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(priv->supplies); ++i)
+		priv->supplies[i].supply = tc358768_supplies[i];
+
+	ret = devm_regulator_bulk_get(priv->dev, ARRAY_SIZE(priv->supplies),
+				      priv->supplies);
+	if (ret < 0)
+		dev_err(priv->dev, "failed to get regulators: %d\n", ret);
+
+	return ret;
+}
+
+static int tc358768_i2c_probe(struct i2c_client *client,
+			      const struct i2c_device_id *id)
+{
+	struct tc358768_priv *priv;
+	struct device *dev = &client->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	if (!np)
+		return -ENODEV;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, priv);
+	priv->dev = dev;
+
+	ret = tc358768_get_regulators(priv);
+	if (ret)
+		return ret;
+
+	priv->refclk = devm_clk_get(dev, "refclk");
+	if (IS_ERR(priv->refclk))
+		return PTR_ERR(priv->refclk);
+
+	/*
+	 * RESX is low active, to disable tc358768 initially (keep in reset)
+	 * the gpio line must be LOW. This is the ASSERTED state of
+	 * GPIO_ACTIVE_LOW (GPIOD_OUT_HIGH == ASSERTED).
+	 */
+	priv->reset_gpio  = devm_gpiod_get_optional(dev, "reset",
+						    GPIOD_OUT_HIGH);
+	if (IS_ERR(priv->reset_gpio))
+		return PTR_ERR(priv->reset_gpio);
+
+	priv->regmap = devm_regmap_init_i2c(client, &tc358768_regmap_config);
+	if (IS_ERR(priv->regmap)) {
+		dev_err(dev, "Failed to init regmap\n");
+		return PTR_ERR(priv->regmap);
+	}
+
+	priv->dsi_host.dev = dev;
+	priv->dsi_host.ops = &tc358768_dsi_host_ops;
+
+	priv->bridge.funcs = &tc358768_bridge_funcs;
+	priv->bridge.timings = &default_tc358768_timings;
+	priv->bridge.of_node = np;
+
+	i2c_set_clientdata(client, priv);
+
+	return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static int tc358768_i2c_remove(struct i2c_client *client)
+{
+	struct tc358768_priv *priv = i2c_get_clientdata(client);
+
+	mipi_dsi_host_unregister(&priv->dsi_host);
+
+	return 0;
+}
+
+static struct i2c_driver tc358768_driver = {
+	.driver = {
+		.name = "tc358768",
+		.of_match_table = tc358768_of_ids,
+	},
+	.id_table = tc358768_i2c_ids,
+	.probe = tc358768_i2c_probe,
+	.remove	= tc358768_i2c_remove,
+};
+module_i2c_driver(tc358768_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_DESCRIPTION("TC358768AXBG/TC358778XBG DSI bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 3d74129b..97d8129 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -42,11 +42,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
 	return container_of(bridge, struct thc63_dev, bridge);
 }
 
-static int thc63_attach(struct drm_bridge *bridge)
+static int thc63_attach(struct drm_bridge *bridge,
+			enum drm_bridge_attach_flags flags)
 {
 	struct thc63_dev *thc63 = to_thc63(bridge);
 
-	return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+	return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
 }
 
 static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9a2dd98..6ad688b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -51,6 +51,7 @@
 #define SN_ENH_FRAME_REG			0x5A
 #define  VSTREAM_ENABLE				BIT(3)
 #define SN_DATA_FORMAT_REG			0x5B
+#define  BPP_18_RGB				BIT(0)
 #define SN_HPD_DISABLE_REG			0x5C
 #define  HPD_DISABLE				BIT(0)
 #define SN_AUX_WDATA_REG(x)			(0x64 + (x))
@@ -100,6 +101,7 @@ struct ti_sn_bridge {
 	struct drm_panel		*panel;
 	struct gpio_desc		*enable_gpio;
 	struct regulator_bulk_data	supplies[SN_REGULATOR_SUPPLY_NUM];
+	int				dp_lanes;
 };
 
 static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
@@ -264,7 +266,8 @@ static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
 				       pdata->supplies);
 }
 
-static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+			       enum drm_bridge_attach_flags flags)
 {
 	int ret, val;
 	struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
@@ -275,6 +278,11 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
 						   .node = NULL,
 						 };
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	ret = drm_connector_init(bridge->dev, &pdata->connector,
 				 &ti_sn_bridge_connector_funcs,
 				 DRM_MODE_CONNECTOR_eDP);
@@ -312,7 +320,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
 		goto err_dsi_host;
 	}
 
-	/* TODO: setting to 4 lanes always for now */
+	/* TODO: setting to 4 MIPI lanes always for now */
 	dsi->lanes = 4;
 	dsi->format = MIPI_DSI_FMT_RGB888;
 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
@@ -417,19 +425,10 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
 			   REFCLK_FREQ(i));
 }
 
-/**
- * LUT index corresponds to register value and
- * LUT values corresponds to dp data rate supported
- * by the bridge in Mbps unit.
- */
-static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
-	0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
-};
-
-static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
 {
-	unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
-	unsigned int val, i;
+	unsigned int bit_rate_mhz, clk_freq_mhz;
+	unsigned int val;
 	struct drm_display_mode *mode =
 		&pdata->bridge.encoder->crtc->state->adjusted_mode;
 
@@ -442,16 +441,125 @@ static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
 	val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
 		(((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
 	regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+}
 
-	/* set DP data rate */
-	dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
-							DP_CLK_FUDGE_DEN;
-	for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
+{
+	if (pdata->connector.display_info.bpc <= 6)
+		return 18;
+	else
+		return 24;
+}
+
+/**
+ * LUT index corresponds to register value and
+ * LUT values corresponds to dp data rate supported
+ * by the bridge in Mbps unit.
+ */
+static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
+	0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
+};
+
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
+{
+	unsigned int bit_rate_khz, dp_rate_mhz;
+	unsigned int i;
+	struct drm_display_mode *mode =
+		&pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+	/* Calculate minimum bit rate based on our pixel clock. */
+	bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
+
+	/* Calculate minimum DP data rate, taking 80% as per DP spec */
+	dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
+				   1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN);
+
+	for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
 		if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
 			break;
 
-	regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
-			   DP_DATARATE_MASK, DP_DATARATE(i));
+	return i;
+}
+
+static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
+					  bool rate_valid[])
+{
+	unsigned int rate_per_200khz;
+	unsigned int rate_mhz;
+	u8 dpcd_val;
+	int ret;
+	int i, j;
+
+	ret = drm_dp_dpcd_readb(&pdata->aux, DP_EDP_DPCD_REV, &dpcd_val);
+	if (ret != 1) {
+		DRM_DEV_ERROR(pdata->dev,
+			      "Can't read eDP rev (%d), assuming 1.1\n", ret);
+		dpcd_val = DP_EDP_11;
+	}
+
+	if (dpcd_val >= DP_EDP_14) {
+		/* eDP 1.4 devices must provide a custom table */
+		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+		ret = drm_dp_dpcd_read(&pdata->aux, DP_SUPPORTED_LINK_RATES,
+				       sink_rates, sizeof(sink_rates));
+
+		if (ret != sizeof(sink_rates)) {
+			DRM_DEV_ERROR(pdata->dev,
+				"Can't read supported rate table (%d)\n", ret);
+
+			/* By zeroing we'll fall back to DP_MAX_LINK_RATE. */
+			memset(sink_rates, 0, sizeof(sink_rates));
+		}
+
+		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+			rate_per_200khz = le16_to_cpu(sink_rates[i]);
+
+			if (!rate_per_200khz)
+				break;
+
+			rate_mhz = rate_per_200khz * 200 / 1000;
+			for (j = 0;
+			     j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+			     j++) {
+				if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz)
+					rate_valid[j] = true;
+			}
+		}
+
+		for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) {
+			if (rate_valid[i])
+				return;
+		}
+		DRM_DEV_ERROR(pdata->dev,
+			      "No matching eDP rates in table; falling back\n");
+	}
+
+	/* On older versions best we can do is use DP_MAX_LINK_RATE */
+	ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LINK_RATE, &dpcd_val);
+	if (ret != 1) {
+		DRM_DEV_ERROR(pdata->dev,
+			      "Can't read max rate (%d); assuming 5.4 GHz\n",
+			      ret);
+		dpcd_val = DP_LINK_BW_5_4;
+	}
+
+	switch (dpcd_val) {
+	default:
+		DRM_DEV_ERROR(pdata->dev,
+			      "Unexpected max rate (%#x); assuming 5.4 GHz\n",
+			      (int)dpcd_val);
+		/* fall through */
+	case DP_LINK_BW_5_4:
+		rate_valid[7] = 1;
+		/* fall through */
+	case DP_LINK_BW_2_7:
+		rate_valid[4] = 1;
+		/* fall through */
+	case DP_LINK_BW_1_62:
+		rate_valid[1] = 1;
+		break;
+	}
 }
 
 static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
@@ -493,24 +601,30 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
 	usleep_range(10000, 10500); /* 10ms delay recommended by spec */
 }
 
-static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
 {
-	struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+	u8 data;
+	int ret;
+
+	ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LANE_COUNT, &data);
+	if (ret != 1) {
+		DRM_DEV_ERROR(pdata->dev,
+			      "Can't read lane count (%d); assuming 4\n", ret);
+		return 4;
+	}
+
+	return data & DP_LANE_COUNT_MASK;
+}
+
+static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
+			       const char **last_err_str)
+{
 	unsigned int val;
 	int ret;
 
-	/* DSI_A lane config */
-	val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
-	regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
-			   CHA_DSI_LANES_MASK, val);
-
-	/* DP lane config */
-	val = DP_NUM_LANES(pdata->dsi->lanes - 1);
-	regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
-			   val);
-
-	/* set dsi/dp clk frequency value */
-	ti_sn_bridge_set_dsi_dp_rate(pdata);
+	/* set dp clk frequency value */
+	regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+			   DP_DATARATE_MASK, DP_DATARATE(dp_rate_idx));
 
 	/* enable DP PLL */
 	regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
@@ -519,10 +633,62 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
 				       val & DPPLL_SRC_DP_PLL_LOCK, 1000,
 				       50 * 1000);
 	if (ret) {
-		DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
-		return;
+		*last_err_str = "DP_PLL_LOCK polling failed";
+		goto exit;
 	}
 
+	/* Semi auto link training mode */
+	regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+	ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+				       val == ML_TX_MAIN_LINK_OFF ||
+				       val == ML_TX_NORMAL_MODE, 1000,
+				       500 * 1000);
+	if (ret) {
+		*last_err_str = "Training complete polling failed";
+	} else if (val == ML_TX_MAIN_LINK_OFF) {
+		*last_err_str = "Link training failed, link is off";
+		ret = -EIO;
+	}
+
+exit:
+	/* Disable the PLL if we failed */
+	if (ret)
+		regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+	return ret;
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+	struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+	bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { };
+	const char *last_err_str = "No supported DP rate";
+	int dp_rate_idx;
+	unsigned int val;
+	int ret = -EINVAL;
+
+	/*
+	 * Run with the maximum number of lanes that the DP sink supports.
+	 *
+	 * Depending use cases, we might want to revisit this later because:
+	 * - It's plausible that someone may have run fewer lines to the
+	 *   sink than the sink actually supports, assuming that the lines
+	 *   will just be driven at a higher rate.
+	 * - The DP spec seems to indicate that it's more important to minimize
+	 *   the number of lanes than the link rate.
+	 *
+	 * If we do revisit, it would be important to measure the power impact.
+	 */
+	pdata->dp_lanes = ti_sn_get_max_lanes(pdata);
+
+	/* DSI_A lane config */
+	val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+	regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+			   CHA_DSI_LANES_MASK, val);
+
+	/* set dsi clk frequency value */
+	ti_sn_bridge_set_dsi_rate(pdata);
+
 	/**
 	 * The SN65DSI86 only supports ASSR Display Authentication method and
 	 * this method is enabled by default. An eDP panel must support this
@@ -532,17 +698,30 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
 	drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
 			   DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
 
-	/* Semi auto link training mode */
-	regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
-	ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
-				       val == ML_TX_MAIN_LINK_OFF ||
-				       val == ML_TX_NORMAL_MODE, 1000,
-				       500 * 1000);
+	/* Set the DP output format (18 bpp or 24 bpp) */
+	val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+	regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
+
+	/* DP lane config */
+	val = DP_NUM_LANES(min(pdata->dp_lanes, 3));
+	regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+			   val);
+
+	ti_sn_bridge_read_valid_rates(pdata, rate_valid);
+
+	/* Train until we run out of rates */
+	for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+	     dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+	     dp_rate_idx++) {
+		if (!rate_valid[dp_rate_idx])
+			continue;
+
+		ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str);
+		if (!ret)
+			break;
+	}
 	if (ret) {
-		DRM_ERROR("Training complete polling failed (%d)\n", ret);
-		return;
-	} else if (val == ML_TX_MAIN_LINK_OFF) {
-		DRM_ERROR("Link training failed, link is off\n");
+		DRM_DEV_ERROR(pdata->dev, "%s (%d)\n", last_err_str, ret);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index f195a47..e3eb636 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -4,14 +4,12 @@
  * Author: Jyri Sarha <jsarha@ti.com>
  */
 
-#include <linux/delay.h>
-#include <linux/fwnode.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
-#include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
 #include <linux/platform_device.h>
+#include <linux/workqueue.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
@@ -24,16 +22,13 @@
 struct tfp410 {
 	struct drm_bridge	bridge;
 	struct drm_connector	connector;
-	unsigned int		connector_type;
 
 	u32			bus_format;
-	struct i2c_adapter	*ddc;
-	struct gpio_desc	*hpd;
-	int			hpd_irq;
 	struct delayed_work	hpd_work;
 	struct gpio_desc	*powerdown;
 
 	struct drm_bridge_timings timings;
+	struct drm_bridge	*next_bridge;
 
 	struct device *dev;
 };
@@ -56,13 +51,18 @@ static int tfp410_get_modes(struct drm_connector *connector)
 	struct edid *edid;
 	int ret;
 
-	if (!dvi->ddc)
-		goto fallback;
+	edid = drm_bridge_get_edid(dvi->next_bridge, connector);
+	if (IS_ERR_OR_NULL(edid)) {
+		if (edid != ERR_PTR(-ENOTSUPP))
+			DRM_INFO("EDID read failed. Fallback to standard modes\n");
 
-	edid = drm_get_edid(connector, dvi->ddc);
-	if (!edid) {
-		DRM_INFO("EDID read failed. Fallback to standard modes\n");
-		goto fallback;
+		/*
+		 * No EDID, fallback on the XGA standard modes and prefer a mode
+		 * pretty much anything can handle.
+		 */
+		ret = drm_add_modes_noedid(connector, 1920, 1200);
+		drm_set_preferred_mode(connector, 1024, 768);
+		return ret;
 	}
 
 	drm_connector_update_edid_property(connector, edid);
@@ -72,15 +72,6 @@ static int tfp410_get_modes(struct drm_connector *connector)
 	kfree(edid);
 
 	return ret;
-
-fallback:
-	/* No EDID, fallback on the XGA standard modes */
-	ret = drm_add_modes_noedid(connector, 1920, 1200);
-
-	/* And prefer a mode pretty much anything can handle */
-	drm_set_preferred_mode(connector, 1024, 768);
-
-	return ret;
 }
 
 static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
@@ -92,21 +83,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
 {
 	struct tfp410 *dvi = drm_connector_to_tfp410(connector);
 
-	if (dvi->hpd) {
-		if (gpiod_get_value_cansleep(dvi->hpd))
-			return connector_status_connected;
-		else
-			return connector_status_disconnected;
-	}
-
-	if (dvi->ddc) {
-		if (drm_probe_ddc(dvi->ddc))
-			return connector_status_connected;
-		else
-			return connector_status_disconnected;
-	}
-
-	return connector_status_unknown;
+	return drm_bridge_detect(dvi->next_bridge);
 }
 
 static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -118,27 +95,60 @@ static const struct drm_connector_funcs tfp410_con_funcs = {
 	.atomic_destroy_state	= drm_atomic_helper_connector_destroy_state,
 };
 
-static int tfp410_attach(struct drm_bridge *bridge)
+static void tfp410_hpd_work_func(struct work_struct *work)
+{
+	struct tfp410 *dvi;
+
+	dvi = container_of(work, struct tfp410, hpd_work.work);
+
+	if (dvi->bridge.dev)
+		drm_helper_hpd_irq_event(dvi->bridge.dev);
+}
+
+static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
+{
+	struct tfp410 *dvi = arg;
+
+	mod_delayed_work(system_wq, &dvi->hpd_work,
+			 msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+}
+
+static int tfp410_attach(struct drm_bridge *bridge,
+			 enum drm_bridge_attach_flags flags)
 {
 	struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
 	int ret;
 
+	ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+	if (ret < 0)
+		return ret;
+
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+		return 0;
+
 	if (!bridge->encoder) {
 		dev_err(dvi->dev, "Missing encoder\n");
 		return -ENODEV;
 	}
 
-	if (dvi->hpd_irq >= 0)
+	if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT)
 		dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
 	else
 		dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
 
+	if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+		INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
+		drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback,
+				      dvi);
+	}
+
 	drm_connector_helper_add(&dvi->connector,
 				 &tfp410_con_helper_funcs);
 	ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
 					  &tfp410_con_funcs,
-					  dvi->connector_type,
-					  dvi->ddc);
+					  dvi->next_bridge->type,
+					  dvi->next_bridge->ddc);
 	if (ret) {
 		dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
 			ret);
@@ -148,12 +158,21 @@ static int tfp410_attach(struct drm_bridge *bridge)
 	drm_display_info_set_bus_formats(&dvi->connector.display_info,
 					 &dvi->bus_format, 1);
 
-	drm_connector_attach_encoder(&dvi->connector,
-					  bridge->encoder);
+	drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
 
 	return 0;
 }
 
+static void tfp410_detach(struct drm_bridge *bridge)
+{
+	struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+	if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+		drm_bridge_hpd_disable(dvi->next_bridge);
+		cancel_delayed_work_sync(&dvi->hpd_work);
+	}
+}
+
 static void tfp410_enable(struct drm_bridge *bridge)
 {
 	struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
@@ -168,32 +187,26 @@ static void tfp410_disable(struct drm_bridge *bridge)
 	gpiod_set_value_cansleep(dvi->powerdown, 1);
 }
 
+static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
+					      const struct drm_display_mode *mode)
+{
+	if (mode->clock < 25000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->clock > 165000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
 static const struct drm_bridge_funcs tfp410_bridge_funcs = {
 	.attach		= tfp410_attach,
+	.detach		= tfp410_detach,
 	.enable		= tfp410_enable,
 	.disable	= tfp410_disable,
+	.mode_valid	= tfp410_mode_valid,
 };
 
-static void tfp410_hpd_work_func(struct work_struct *work)
-{
-	struct tfp410 *dvi;
-
-	dvi = container_of(work, struct tfp410, hpd_work.work);
-
-	if (dvi->bridge.dev)
-		drm_helper_hpd_irq_event(dvi->bridge.dev);
-}
-
-static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
-{
-	struct tfp410 *dvi = arg;
-
-	mod_delayed_work(system_wq, &dvi->hpd_work,
-			msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
-
-	return IRQ_HANDLED;
-}
-
 static const struct drm_bridge_timings tfp410_default_timings = {
 	.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
 			 | DRM_BUS_FLAG_DE_HIGH,
@@ -271,51 +284,9 @@ static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
 	return 0;
 }
 
-static int tfp410_get_connector_properties(struct tfp410 *dvi)
-{
-	struct device_node *connector_node, *ddc_phandle;
-	int ret = 0;
-
-	/* port@1 is the connector node */
-	connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
-	if (!connector_node)
-		return -ENODEV;
-
-	if (of_device_is_compatible(connector_node, "hdmi-connector"))
-		dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-	else
-		dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
-
-	dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
-					  "hpd", 0, GPIOD_IN, "hpd");
-	if (IS_ERR(dvi->hpd)) {
-		ret = PTR_ERR(dvi->hpd);
-		dvi->hpd = NULL;
-		if (ret == -ENOENT)
-			ret = 0;
-		else
-			goto fail;
-	}
-
-	ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
-	if (!ddc_phandle)
-		goto fail;
-
-	dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
-	if (dvi->ddc)
-		dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
-	else
-		ret = -EPROBE_DEFER;
-
-	of_node_put(ddc_phandle);
-
-fail:
-	of_node_put(connector_node);
-	return ret;
-}
-
 static int tfp410_init(struct device *dev, bool i2c)
 {
+	struct device_node *node;
 	struct tfp410 *dvi;
 	int ret;
 
@@ -327,21 +298,31 @@ static int tfp410_init(struct device *dev, bool i2c)
 	dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
 	if (!dvi)
 		return -ENOMEM;
+
+	dvi->dev = dev;
 	dev_set_drvdata(dev, dvi);
 
 	dvi->bridge.funcs = &tfp410_bridge_funcs;
 	dvi->bridge.of_node = dev->of_node;
 	dvi->bridge.timings = &dvi->timings;
-	dvi->dev = dev;
+	dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
 
 	ret = tfp410_parse_timings(dvi, i2c);
 	if (ret)
-		goto fail;
+		return ret;
 
-	ret = tfp410_get_connector_properties(dvi);
-	if (ret)
-		goto fail;
+	/* Get the next bridge, connected to port@1. */
+	node = of_graph_get_remote_node(dev->of_node, 1, -1);
+	if (!node)
+		return -ENODEV;
 
+	dvi->next_bridge = of_drm_find_bridge(node);
+	of_node_put(node);
+
+	if (!dvi->next_bridge)
+		return -EPROBE_DEFER;
+
+	/* Get the powerdown GPIO. */
 	dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
 						 GPIOD_OUT_HIGH);
 	if (IS_ERR(dvi->powerdown)) {
@@ -349,48 +330,18 @@ static int tfp410_init(struct device *dev, bool i2c)
 		return PTR_ERR(dvi->powerdown);
 	}
 
-	if (dvi->hpd)
-		dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
-	else
-		dvi->hpd_irq = -ENXIO;
-
-	if (dvi->hpd_irq >= 0) {
-		INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
-
-		ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
-			NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
-			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-			"hdmi-hpd", dvi);
-		if (ret) {
-			DRM_ERROR("failed to register hpd interrupt\n");
-			goto fail;
-		}
-	}
-
+	/*  Register the DRM bridge. */
 	drm_bridge_add(&dvi->bridge);
 
 	return 0;
-fail:
-	i2c_put_adapter(dvi->ddc);
-	if (dvi->hpd)
-		gpiod_put(dvi->hpd);
-	return ret;
 }
 
 static int tfp410_fini(struct device *dev)
 {
 	struct tfp410 *dvi = dev_get_drvdata(dev);
 
-	if (dvi->hpd_irq >= 0)
-		cancel_delayed_work_sync(&dvi->hpd_work);
-
 	drm_bridge_remove(&dvi->bridge);
 
-	if (dvi->ddc)
-		i2c_put_adapter(dvi->ddc);
-	if (dvi->hpd)
-		gpiod_put(dvi->hpd);
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
new file mode 100644
index 0000000..514cbf0
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPD12S015 HDMI ESD protection & level shifter chip driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific encoder-opa362 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+
+struct tpd12s015_device {
+	struct drm_bridge bridge;
+
+	struct gpio_desc *ct_cp_hpd_gpio;
+	struct gpio_desc *ls_oe_gpio;
+	struct gpio_desc *hpd_gpio;
+	int hpd_irq;
+
+	struct drm_bridge *next_bridge;
+};
+
+static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
+{
+	return container_of(bridge, struct tpd12s015_device, bridge);
+}
+
+static int tpd12s015_attach(struct drm_bridge *bridge,
+			    enum drm_bridge_attach_flags flags)
+{
+	struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+	int ret;
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+				bridge, flags);
+	if (ret < 0)
+		return ret;
+
+	gpiod_set_value_cansleep(tpd->ls_oe_gpio, 1);
+
+	/* DC-DC converter needs at max 300us to get to 90% of 5V. */
+	usleep_range(300, 1000);
+
+	return 0;
+}
+
+static void tpd12s015_detach(struct drm_bridge *bridge)
+{
+	struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+	gpiod_set_value_cansleep(tpd->ls_oe_gpio, 0);
+}
+
+static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
+{
+	struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+	if (gpiod_get_value_cansleep(tpd->hpd_gpio))
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
+{
+	struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+	gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 1);
+}
+
+static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
+{
+	struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+	gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 0);
+}
+
+static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
+	.attach			= tpd12s015_attach,
+	.detach			= tpd12s015_detach,
+	.detect			= tpd12s015_detect,
+	.hpd_enable		= tpd12s015_hpd_enable,
+	.hpd_disable		= tpd12s015_hpd_disable,
+};
+
+static irqreturn_t tpd12s015_hpd_isr(int irq, void *data)
+{
+	struct tpd12s015_device *tpd = data;
+	struct drm_bridge *bridge = &tpd->bridge;
+
+	drm_bridge_hpd_notify(bridge, tpd12s015_detect(bridge));
+
+	return IRQ_HANDLED;
+}
+
+static int tpd12s015_probe(struct platform_device *pdev)
+{
+	struct tpd12s015_device *tpd;
+	struct device_node *node;
+	struct gpio_desc *gpio;
+	int ret;
+
+	tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
+	if (!tpd)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, tpd);
+
+	tpd->bridge.funcs = &tpd12s015_bridge_funcs;
+	tpd->bridge.of_node = pdev->dev.of_node;
+	tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+	tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
+
+	/* Get the next bridge, connected to port@1. */
+	node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+	if (!node)
+		return -ENODEV;
+
+	tpd->next_bridge = of_drm_find_bridge(node);
+	of_node_put(node);
+
+	if (!tpd->next_bridge)
+		return -EPROBE_DEFER;
+
+	/* Get the control and HPD GPIOs. */
+	gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+					     GPIOD_OUT_LOW);
+	if (IS_ERR(gpio))
+		return PTR_ERR(gpio);
+
+	tpd->ct_cp_hpd_gpio = gpio;
+
+	gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+					     GPIOD_OUT_LOW);
+	if (IS_ERR(gpio))
+		return PTR_ERR(gpio);
+
+	tpd->ls_oe_gpio = gpio;
+
+	gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_IN);
+	if (IS_ERR(gpio))
+		return PTR_ERR(gpio);
+
+	tpd->hpd_gpio = gpio;
+
+	/* Register the IRQ if the HPD GPIO is IRQ-capable. */
+	tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio);
+	if (tpd->hpd_irq) {
+		ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL,
+						tpd12s015_hpd_isr,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING |
+						IRQF_ONESHOT,
+						"tpd12s015 hpd", tpd);
+		if (ret)
+			return ret;
+
+		tpd->bridge.ops |= DRM_BRIDGE_OP_HPD;
+	}
+
+	/* Register the DRM bridge. */
+	drm_bridge_add(&tpd->bridge);
+
+	return 0;
+}
+
+static int __exit tpd12s015_remove(struct platform_device *pdev)
+{
+	struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+	drm_bridge_remove(&tpd->bridge);
+
+	return 0;
+}
+
+static const struct of_device_id tpd12s015_of_match[] = {
+	{ .compatible = "ti,tpd12s015", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+static struct platform_driver tpd12s015_driver = {
+	.probe	= tpd12s015_probe,
+	.remove	= __exit_p(tpd12s015_remove),
+	.driver	= {
+		.name	= "tpd12s015",
+		.of_match_table = tpd12s015_of_match,
+	},
+};
+
+module_platform_driver(tpd12s015_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TPD12S015 HDMI level shifter and ESD protection driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 248c9f7..d2ff63c 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -38,7 +38,6 @@
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
 
 #define DRIVER_NAME "cirrus"
 #define DRIVER_DESC "qemu cirrus vga"
@@ -152,9 +151,13 @@ static int cirrus_pitch(struct drm_framebuffer *fb)
 
 static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
 {
+	int idx;
 	u32 addr;
 	u8 tmp;
 
+	if (!drm_dev_enter(&cirrus->dev, &idx))
+		return;
+
 	addr = offset >> 2;
 	wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
 	wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
@@ -169,6 +172,8 @@ static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
 	tmp &= 0x7f;
 	tmp |= (addr >> 12) & 0x80;
 	wreg_crt(cirrus, 0x1d, tmp);
+
+	drm_dev_exit(idx);
 }
 
 static int cirrus_mode_set(struct cirrus_device *cirrus,
@@ -177,9 +182,12 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
 {
 	int hsyncstart, hsyncend, htotal, hdispend;
 	int vtotal, vdispend;
-	int tmp;
+	int tmp, idx;
 	int sr07 = 0, hdr = 0;
 
+	if (!drm_dev_enter(&cirrus->dev, &idx))
+		return -1;
+
 	htotal = mode->htotal / 8;
 	hsyncend = mode->hsync_end / 8;
 	hsyncstart = mode->hsync_start / 8;
@@ -265,6 +273,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
 		hdr = 0xc5;
 		break;
 	default:
+		drm_dev_exit(idx);
 		return -1;
 	}
 
@@ -293,6 +302,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
 
 	/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
 	outb(0x20, 0x3c0);
+
+	drm_dev_exit(idx);
 	return 0;
 }
 
@@ -301,10 +312,16 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
 {
 	struct cirrus_device *cirrus = fb->dev->dev_private;
 	void *vmap;
+	int idx, ret;
 
+	ret = -ENODEV;
+	if (!drm_dev_enter(&cirrus->dev, &idx))
+		goto out;
+
+	ret = -ENOMEM;
 	vmap = drm_gem_shmem_vmap(fb->obj[0]);
 	if (!vmap)
-		return -ENOMEM;
+		goto out_dev_exit;
 
 	if (cirrus->cpp == fb->format->cpp[0])
 		drm_fb_memcpy_dstclip(cirrus->vram,
@@ -324,7 +341,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
 		WARN_ON_ONCE("cpp mismatch");
 
 	drm_gem_shmem_vunmap(fb->obj[0], vmap);
-	return 0;
+	ret = 0;
+
+out_dev_exit:
+	drm_dev_exit(idx);
+out:
+	return ret;
 }
 
 static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
@@ -434,13 +456,6 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
 
 	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
 		cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		crtc->state->event = NULL;
-		spin_unlock_irq(&crtc->dev->event_lock);
-	}
 }
 
 static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
@@ -510,6 +525,14 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
 
 /* ------------------------------------------------------------------ */
 
+static void cirrus_release(struct drm_device *dev)
+{
+	struct cirrus_device *cirrus = dev->dev_private;
+
+	drm_mode_config_cleanup(dev);
+	kfree(cirrus);
+}
+
 DEFINE_DRM_GEM_FOPS(cirrus_fops);
 
 static struct drm_driver cirrus_driver = {
@@ -523,6 +546,7 @@ static struct drm_driver cirrus_driver = {
 
 	.fops		 = &cirrus_fops,
 	DRM_GEM_SHMEM_DRIVER_OPS,
+	.release         = cirrus_release,
 };
 
 static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -606,12 +630,11 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct cirrus_device *cirrus = dev->dev_private;
 
-	drm_dev_unregister(dev);
-	drm_mode_config_cleanup(dev);
+	drm_dev_unplug(dev);
+	drm_atomic_helper_shutdown(dev);
 	iounmap(cirrus->mmio);
 	iounmap(cirrus->vram);
 	drm_dev_put(dev);
-	kfree(cirrus);
 	pci_release_regions(pdev);
 }
 
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index d336915..9ccfbf2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_debugfs.h>
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
@@ -1018,6 +1019,122 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
 }
 
 /**
+ * drm_atomic_get_bridge_state - get bridge state
+ * @state: global atomic state object
+ * @bridge: bridge to get state object for
+ *
+ * This function returns the bridge state for the given bridge, allocating it
+ * if needed. It will also grab the relevant bridge lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted.
+ */
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+			    struct drm_bridge *bridge)
+{
+	struct drm_private_state *obj_state;
+
+	obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
+	if (IS_ERR(obj_state))
+		return ERR_CAST(obj_state);
+
+	return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_bridge_state);
+
+/**
+ * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the old bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+				struct drm_bridge *bridge)
+{
+	struct drm_private_state *obj_state;
+
+	obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
+	if (!obj_state)
+		return NULL;
+
+	return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
+
+/**
+ * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the new bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+				struct drm_bridge *bridge)
+{
+	struct drm_private_state *obj_state;
+
+	obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
+	if (!obj_state)
+		return NULL;
+
+	return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
+
+/**
+ * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
+ * @state: atomic state
+ * @encoder: DRM encoder
+ *
+ * This function adds all bridges attached to @encoder. This is needed to add
+ * bridge states to @state and make them available when
+ * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
+ * &drm_bridge_funcs.atomic_enable(),
+ * &drm_bridge_funcs.atomic_disable_post_disable() are called.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+			       struct drm_encoder *encoder)
+{
+	struct drm_bridge_state *bridge_state;
+	struct drm_bridge *bridge;
+
+	if (!encoder)
+		return 0;
+
+	DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
+			 encoder->base.id, encoder->name, state);
+
+	drm_for_each_bridge_in_chain(encoder, bridge) {
+		/* Skip bridges that don't implement the atomic state hooks. */
+		if (!bridge->funcs->atomic_duplicate_state)
+			continue;
+
+		bridge_state = drm_atomic_get_bridge_state(state, bridge);
+		if (IS_ERR(bridge_state))
+			return PTR_ERR(bridge_state);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
+
+/**
  * drm_atomic_add_affected_connectors - add connectors for CRTC
  * @state: atomic state
  * @crtc: DRM CRTC
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4511c2e..85d163f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -437,12 +437,12 @@ mode_fixup(struct drm_atomic_state *state)
 		funcs = encoder->helper_private;
 
 		bridge = drm_bridge_chain_get_first_bridge(encoder);
-		ret = drm_bridge_chain_mode_fixup(bridge,
-					&new_crtc_state->mode,
-					&new_crtc_state->adjusted_mode);
-		if (!ret) {
-			DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
-			return -EINVAL;
+		ret = drm_atomic_bridge_chain_check(bridge,
+						    new_crtc_state,
+						    new_conn_state);
+		if (ret) {
+			DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
+			return ret;
 		}
 
 		if (funcs && funcs->atomic_check) {
@@ -583,6 +583,7 @@ mode_valid(struct drm_atomic_state *state)
  * &drm_crtc_state.connectors_changed is set when a connector is added or
  * removed from the CRTC.  &drm_crtc_state.active_changed is set when
  * &drm_crtc_state.active changes, which is used for DPMS.
+ * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
  * See also: drm_atomic_crtc_needs_modeset()
  *
  * IMPORTANT:
@@ -649,6 +650,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 
 			return -EINVAL;
 		}
+
+		if (drm_dev_has_vblank(dev))
+			new_crtc_state->no_vblank = false;
+		else
+			new_crtc_state->no_vblank = true;
 	}
 
 	ret = handle_conflicting_encoders(state, false);
@@ -730,6 +736,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 			return ret;
 	}
 
+	/*
+	 * Iterate over all connectors again, and add all affected bridges to
+	 * the state.
+	 */
+	for_each_oldnew_connector_in_state(state, connector,
+					   old_connector_state,
+					   new_connector_state, i) {
+		struct drm_encoder *encoder;
+
+		encoder = old_connector_state->best_encoder;
+		ret = drm_atomic_add_encoder_bridges(state, encoder);
+		if (ret)
+			return ret;
+
+		encoder = new_connector_state->best_encoder;
+		ret = drm_atomic_add_encoder_bridges(state, encoder);
+		if (ret)
+			return ret;
+	}
+
 	ret = mode_valid(state);
 	if (ret)
 		return ret;
@@ -2215,7 +2241,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
  * when a job is queued, and any change to the pipeline that does not touch the
  * connector is leading to timeouts when calling
  * drm_atomic_helper_wait_for_vblanks() or
- * drm_atomic_helper_wait_for_flip_done().
+ * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
+ * connectors, this function can also fake VBLANK events for CRTCs without
+ * VBLANK interrupt.
  *
  * This is part of the atomic helper support for nonblocking commits, see
  * drm_atomic_helper_setup_commit() for an overview.
@@ -3508,3 +3536,44 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 	return ret;
 }
 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
+ *						  the input end of a bridge
+ * @bridge: bridge control structure
+ * @bridge_state: new bridge state
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ * @output_fmt: tested output bus format
+ * @num_input_fmts: will contain the size of the returned array
+ *
+ * This helper is a pluggable implementation of the
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
+ * modify the bus configuration between their input and their output. It
+ * returns an array of input formats with a single element set to @output_fmt.
+ *
+ * RETURNS:
+ * a valid format array of size @num_input_fmts, or NULL if the allocation
+ * failed
+ */
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					u32 output_fmt,
+					unsigned int *num_input_fmts)
+{
+	u32 *input_fmts;
+
+	input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+	if (!input_fmts) {
+		*num_input_fmts = 0;
+		return NULL;
+	}
+
+	*num_input_fmts = 1;
+	input_fmts[0] = output_fmt;
+	return input_fmts;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 7cf3cf9..8fce6a1 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -26,6 +26,7 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_connector.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_device.h>
@@ -551,3 +552,104 @@ void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj
 	memcpy(state, obj->state, sizeof(*state));
 }
 EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
+
+/**
+ * __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state
+ * @bridge: bridge object
+ * @state: atomic bridge state
+ *
+ * Copies atomic state from a bridge's current state and resets inferred values.
+ * This is useful for drivers that subclass the bridge state.
+ */
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+						struct drm_bridge_state *state)
+{
+	__drm_atomic_helper_private_obj_duplicate_state(&bridge->base,
+							&state->base);
+	state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_duplicate_state() - Duplicate a bridge state object
+ * @bridge: bridge object
+ *
+ * Allocates a new bridge state and initializes it with the current bridge
+ * state values. This helper is meant to be used as a bridge
+ * &drm_bridge_funcs.atomic_duplicate_state hook for bridges that don't
+ * subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge)
+{
+	struct drm_bridge_state *new;
+
+	if (WARN_ON(!bridge->base.state))
+		return NULL;
+
+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (new)
+		__drm_atomic_helper_bridge_duplicate_state(bridge, new);
+
+	return new;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_destroy_state() - Destroy a bridge state object
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to destroy
+ *
+ * Destroys a bridge state previously created by
+ * &drm_atomic_helper_bridge_reset() or
+ * &drm_atomic_helper_bridge_duplicate_state(). This helper is meant to be
+ * used as a bridge &drm_bridge_funcs.atomic_destroy_state hook for bridges
+ * that don't subclass the bridge state.
+ */
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+					    struct drm_bridge_state *state)
+{
+	kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_destroy_state);
+
+/**
+ * __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its
+ *					default
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to initialize
+ *
+ * Initializes the bridge state to default values. This is meant to be called
+ * by the bridge &drm_bridge_funcs.atomic_reset hook for bridges that subclass
+ * the bridge state.
+ */
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+				      struct drm_bridge_state *state)
+{
+	memset(state, 0, sizeof(*state));
+	state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);
+
+/**
+ * drm_atomic_helper_bridge_reset() - Allocate and initialize a bridge state
+ *				      to its default
+ * @bridge: the bridge this state refers to
+ *
+ * Allocates the bridge state and initializes it to default values. This helper
+ * is meant to be used as a bridge &drm_bridge_funcs.atomic_reset hook for
+ * bridges that don't subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge)
+{
+	struct drm_bridge_state *bridge_state;
+
+	bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL);
+	if (!bridge_state)
+		return ERR_PTR(-ENOMEM);
+
+	__drm_atomic_helper_bridge_reset(bridge, bridge_state);
+	return bridge_state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cc9acd9..531b876 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -153,11 +153,6 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
 		return -ENOMEM;
 	}
 
-	if (dev->driver->master_create) {
-		ret = dev->driver->master_create(dev, fpriv->master);
-		if (ret)
-			goto out_err;
-	}
 	fpriv->is_master = 1;
 	fpriv->authenticated = 1;
 
@@ -332,9 +327,6 @@ static void drm_master_destroy(struct kref *kref)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_lease_destroy(master);
 
-	if (dev->driver->master_destroy)
-		dev->driver->master_destroy(dev, master);
-
 	drm_legacy_master_rmmaps(dev, master);
 
 	idr_destroy(&master->magic_map);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c2cf0c9..afdec8e 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 
+#include <drm/drm_atomic_state_helper.h>
 #include <drm/drm_bridge.h>
 #include <drm/drm_encoder.h>
 
@@ -38,26 +39,56 @@
  * encoder chain.
  *
  * A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge::
+ * either connected to it directly, or through a chain of bridges::
  *
- *     encoder ---> bridge B ---> bridge A
+ *     [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
  *
- * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
- * bridge A.
+ * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
+ * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
+ * Chaining multiple bridges to the output of a bridge, or the same bridge to
+ * the output of different bridges, is not supported.
  *
- * The driver using the bridge is responsible to make the associations between
- * the encoder and bridges. Once these links are made, the bridges will
- * participate along with encoder functions to perform mode_set/enable/disable
- * through the ops provided in &drm_bridge_funcs.
+ * Display drivers are responsible for linking encoders with the first bridge
+ * in the chains. This is done by acquiring the appropriate bridge with
+ * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
+ * panel with drm_panel_bridge_add_typed() (or the managed version
+ * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
+ * attached to the encoder with a call to drm_bridge_attach().
  *
- * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
+ * Bridges are responsible for linking themselves with the next bridge in the
+ * chain, if any. This is done the same way as for encoders, with the call to
+ * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
+ *
+ * Once these links are created, the bridges can participate along with encoder
+ * functions to perform mode validation and fixup (through
+ * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
+ * setting (through drm_bridge_chain_mode_set()), enable (through
+ * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
+ * and disable (through drm_atomic_bridge_chain_disable() and
+ * drm_atomic_bridge_chain_post_disable()). Those functions call the
+ * corresponding operations provided in &drm_bridge_funcs in sequence for all
+ * bridges in the chain.
+ *
+ * For display drivers that use the atomic helpers
+ * drm_atomic_helper_check_modeset(),
+ * drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
+ * commit check and commit tail handlers, or through the higher-level
+ * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
+ * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
+ * requires no intervention from the driver. For other drivers, the relevant
+ * DRM bridge chain functions shall be called manually.
+ *
+ * Bridges also participate in implementing the &drm_connector at the end of
+ * the bridge chain. Display drivers may use the drm_bridge_connector_init()
+ * helper to create the &drm_connector, or implement it manually on top of the
+ * connector-related operations exposed by the bridge (see the overview
+ * documentation of bridge operations for more details).
+ *
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
  * CRTCs, encoders or connectors and hence are not visible to userspace. They
  * just provide additional hooks to get the desired output at the end of the
  * encoder chain.
- *
- * Bridges can also be chained up using the &drm_bridge.chain_node field.
- *
- * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
  */
 
 static DEFINE_MUTEX(bridge_lock);
@@ -70,6 +101,8 @@ static LIST_HEAD(bridge_list);
  */
 void drm_bridge_add(struct drm_bridge *bridge)
 {
+	mutex_init(&bridge->hpd_mutex);
+
 	mutex_lock(&bridge_lock);
 	list_add_tail(&bridge->list, &bridge_list);
 	mutex_unlock(&bridge_lock);
@@ -86,15 +119,43 @@ void drm_bridge_remove(struct drm_bridge *bridge)
 	mutex_lock(&bridge_lock);
 	list_del_init(&bridge->list);
 	mutex_unlock(&bridge_lock);
+
+	mutex_destroy(&bridge->hpd_mutex);
 }
 EXPORT_SYMBOL(drm_bridge_remove);
 
+static struct drm_private_state *
+drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
+{
+	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+	struct drm_bridge_state *state;
+
+	state = bridge->funcs->atomic_duplicate_state(bridge);
+	return state ? &state->base : NULL;
+}
+
+static void
+drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
+				     struct drm_private_state *s)
+{
+	struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
+	struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+
+	bridge->funcs->atomic_destroy_state(bridge, state);
+}
+
+static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
+	.atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
+	.atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
+};
+
 /**
  * drm_bridge_attach - attach the bridge to an encoder's chain
  *
  * @encoder: DRM encoder
  * @bridge: bridge to attach
  * @previous: previous bridge in the chain (optional)
+ * @flags: DRM_BRIDGE_ATTACH_* flags
  *
  * Called by a kms driver to link the bridge to an encoder's chain. The previous
  * argument specifies the previous bridge in the chain. If NULL, the bridge is
@@ -112,7 +173,8 @@ EXPORT_SYMBOL(drm_bridge_remove);
  * Zero on success, error code on failure
  */
 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
-		      struct drm_bridge *previous)
+		      struct drm_bridge *previous,
+		      enum drm_bridge_attach_flags flags)
 {
 	int ret;
 
@@ -134,16 +196,36 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
 		list_add(&bridge->chain_node, &encoder->bridge_chain);
 
 	if (bridge->funcs->attach) {
-		ret = bridge->funcs->attach(bridge);
-		if (ret < 0) {
-			list_del(&bridge->chain_node);
-			bridge->dev = NULL;
-			bridge->encoder = NULL;
-			return ret;
+		ret = bridge->funcs->attach(bridge, flags);
+		if (ret < 0)
+			goto err_reset_bridge;
+	}
+
+	if (bridge->funcs->atomic_reset) {
+		struct drm_bridge_state *state;
+
+		state = bridge->funcs->atomic_reset(bridge);
+		if (IS_ERR(state)) {
+			ret = PTR_ERR(state);
+			goto err_detach_bridge;
 		}
+
+		drm_atomic_private_obj_init(bridge->dev, &bridge->base,
+					    &state->base,
+					    &drm_bridge_priv_state_funcs);
 	}
 
 	return 0;
+
+err_detach_bridge:
+	if (bridge->funcs->detach)
+		bridge->funcs->detach(bridge);
+
+err_reset_bridge:
+	bridge->dev = NULL;
+	bridge->encoder = NULL;
+	list_del(&bridge->chain_node);
+	return ret;
 }
 EXPORT_SYMBOL(drm_bridge_attach);
 
@@ -155,6 +237,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
 	if (WARN_ON(!bridge->dev))
 		return;
 
+	if (bridge->funcs->atomic_reset)
+		drm_atomic_private_obj_fini(&bridge->base);
+
 	if (bridge->funcs->detach)
 		bridge->funcs->detach(bridge);
 
@@ -163,14 +248,92 @@ void drm_bridge_detach(struct drm_bridge *bridge)
 }
 
 /**
- * DOC: bridge callbacks
+ * DOC: bridge operations
  *
- * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
- * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
- * These helpers call a specific &drm_bridge_funcs op for all the bridges
- * during encoder configuration.
+ * Bridge drivers expose operations through the &drm_bridge_funcs structure.
+ * The DRM internals (atomic and CRTC helpers) use the helpers defined in
+ * drm_bridge.c to call bridge operations. Those operations are divided in
+ * three big categories to support different parts of the bridge usage.
  *
- * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
+ * - The encoder-related operations support control of the bridges in the
+ *   chain, and are roughly counterparts to the &drm_encoder_helper_funcs
+ *   operations. They are used by the legacy CRTC and the atomic modeset
+ *   helpers to perform mode validation, fixup and setting, and enable and
+ *   disable the bridge automatically.
+ *
+ *   The enable and disable operations are split in
+ *   &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
+ *   &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
+ *   finer-grained control.
+ *
+ *   Bridge drivers may implement the legacy version of those operations, or
+ *   the atomic version (prefixed with atomic\_), in which case they shall also
+ *   implement the atomic state bookkeeping operations
+ *   (&drm_bridge_funcs.atomic_duplicate_state,
+ *   &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
+ *   Mixing atomic and non-atomic versions of the operations is not supported.
+ *
+ * - The bus format negotiation operations
+ *   &drm_bridge_funcs.atomic_get_output_bus_fmts and
+ *   &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
+ *   negotiate the formats transmitted between bridges in the chain when
+ *   multiple formats are supported. Negotiation for formats is performed
+ *   transparently for display drivers by the atomic modeset helpers. Only
+ *   atomic versions of those operations exist, bridge drivers that need to
+ *   implement them shall thus also implement the atomic version of the
+ *   encoder-related operations. This feature is not supported by the legacy
+ *   CRTC helpers.
+ *
+ * - The connector-related operations support implementing a &drm_connector
+ *   based on a chain of bridges. DRM bridges traditionally create a
+ *   &drm_connector for bridges meant to be used at the end of the chain. This
+ *   puts additional burden on bridge drivers, especially for bridges that may
+ *   be used in the middle of a chain or at the end of it. Furthermore, it
+ *   requires all operations of the &drm_connector to be handled by a single
+ *   bridge, which doesn't always match the hardware architecture.
+ *
+ *   To simplify bridge drivers and make the connector implementation more
+ *   flexible, a new model allows bridges to unconditionally skip creation of
+ *   &drm_connector and instead expose &drm_bridge_funcs operations to support
+ *   an externally-implemented &drm_connector. Those operations are
+ *   &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
+ *   &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
+ *   &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
+ *   implemented, display drivers shall create a &drm_connector instance for
+ *   each chain of bridges, and implement those connector instances based on
+ *   the bridge connector operations.
+ *
+ *   Bridge drivers shall implement the connector-related operations for all
+ *   the features that the bridge hardware support. For instance, if a bridge
+ *   supports reading EDID, the &drm_bridge_funcs.get_edid shall be
+ *   implemented. This however doesn't mean that the DDC lines are wired to the
+ *   bridge on a particular platform, as they could also be connected to an I2C
+ *   controller of the SoC. Support for the connector-related operations on the
+ *   running platform is reported through the &drm_bridge.ops flags. Bridge
+ *   drivers shall detect which operations they can support on the platform
+ *   (usually this information is provided by ACPI or DT), and set the
+ *   &drm_bridge.ops flags for all supported operations. A flag shall only be
+ *   set if the corresponding &drm_bridge_funcs operation is implemented, but
+ *   an implemented operation doesn't necessarily imply that the corresponding
+ *   flag will be set. Display drivers shall use the &drm_bridge.ops flags to
+ *   decide which bridge to delegate a connector operation to. This mechanism
+ *   allows providing a single static const &drm_bridge_funcs instance in
+ *   bridge drivers, improving security by storing function pointers in
+ *   read-only memory.
+ *
+ *   In order to ease transition, bridge drivers may support both the old and
+ *   new models by making connector creation optional and implementing the
+ *   connected-related bridge operations. Connector creation is then controlled
+ *   by the flags argument to the drm_bridge_attach() function. Display drivers
+ *   that support the new model and create connectors themselves shall set the
+ *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
+ *   connector creation. For intermediate bridges in the chain, the flag shall
+ *   be passed to the drm_bridge_attach() call for the downstream bridge.
+ *   Bridge drivers that implement the new model only shall return an error
+ *   from their &drm_bridge_funcs.attach handler when the
+ *   %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
+ *   should use the new model, and convert the bridge drivers they use if
+ *   needed, in order to gradually transition to the new model.
  */
 
 /**
@@ -409,10 +572,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
 
 	encoder = bridge->encoder;
 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
-		if (iter->funcs->atomic_disable)
-			iter->funcs->atomic_disable(iter, old_state);
-		else if (iter->funcs->disable)
+		if (iter->funcs->atomic_disable) {
+			struct drm_bridge_state *old_bridge_state;
+
+			old_bridge_state =
+				drm_atomic_get_old_bridge_state(old_state,
+								iter);
+			if (WARN_ON(!old_bridge_state))
+				return;
+
+			iter->funcs->atomic_disable(iter, old_bridge_state);
+		} else if (iter->funcs->disable) {
 			iter->funcs->disable(iter);
+		}
 
 		if (iter == bridge)
 			break;
@@ -443,10 +615,20 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
 
 	encoder = bridge->encoder;
 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
-		if (bridge->funcs->atomic_post_disable)
-			bridge->funcs->atomic_post_disable(bridge, old_state);
-		else if (bridge->funcs->post_disable)
+		if (bridge->funcs->atomic_post_disable) {
+			struct drm_bridge_state *old_bridge_state;
+
+			old_bridge_state =
+				drm_atomic_get_old_bridge_state(old_state,
+								bridge);
+			if (WARN_ON(!old_bridge_state))
+				return;
+
+			bridge->funcs->atomic_post_disable(bridge,
+							   old_bridge_state);
+		} else if (bridge->funcs->post_disable) {
 			bridge->funcs->post_disable(bridge);
+		}
 	}
 }
 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
@@ -475,10 +657,19 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
 
 	encoder = bridge->encoder;
 	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
-		if (iter->funcs->atomic_pre_enable)
-			iter->funcs->atomic_pre_enable(iter, old_state);
-		else if (iter->funcs->pre_enable)
+		if (iter->funcs->atomic_pre_enable) {
+			struct drm_bridge_state *old_bridge_state;
+
+			old_bridge_state =
+				drm_atomic_get_old_bridge_state(old_state,
+								iter);
+			if (WARN_ON(!old_bridge_state))
+				return;
+
+			iter->funcs->atomic_pre_enable(iter, old_bridge_state);
+		} else if (iter->funcs->pre_enable) {
 			iter->funcs->pre_enable(iter);
+		}
 
 		if (iter == bridge)
 			break;
@@ -508,14 +699,498 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
 
 	encoder = bridge->encoder;
 	list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
-		if (bridge->funcs->atomic_enable)
-			bridge->funcs->atomic_enable(bridge, old_state);
-		else if (bridge->funcs->enable)
+		if (bridge->funcs->atomic_enable) {
+			struct drm_bridge_state *old_bridge_state;
+
+			old_bridge_state =
+				drm_atomic_get_old_bridge_state(old_state,
+								bridge);
+			if (WARN_ON(!old_bridge_state))
+				return;
+
+			bridge->funcs->atomic_enable(bridge, old_bridge_state);
+		} else if (bridge->funcs->enable) {
 			bridge->funcs->enable(bridge);
+		}
 	}
 }
 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
 
+static int drm_atomic_bridge_check(struct drm_bridge *bridge,
+				   struct drm_crtc_state *crtc_state,
+				   struct drm_connector_state *conn_state)
+{
+	if (bridge->funcs->atomic_check) {
+		struct drm_bridge_state *bridge_state;
+		int ret;
+
+		bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+							       bridge);
+		if (WARN_ON(!bridge_state))
+			return -EINVAL;
+
+		ret = bridge->funcs->atomic_check(bridge, bridge_state,
+						  crtc_state, conn_state);
+		if (ret)
+			return ret;
+	} else if (bridge->funcs->mode_fixup) {
+		if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
+					       &crtc_state->adjusted_mode))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
+				    struct drm_bridge *cur_bridge,
+				    struct drm_crtc_state *crtc_state,
+				    struct drm_connector_state *conn_state,
+				    u32 out_bus_fmt)
+{
+	struct drm_bridge_state *cur_state;
+	unsigned int num_in_bus_fmts, i;
+	struct drm_bridge *prev_bridge;
+	u32 *in_bus_fmts;
+	int ret;
+
+	prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
+	cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+						    cur_bridge);
+
+	/*
+	 * If bus format negotiation is not supported by this bridge, let's
+	 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
+	 * hope that it can handle this situation gracefully (by providing
+	 * appropriate default values).
+	 */
+	if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
+		if (cur_bridge != first_bridge) {
+			ret = select_bus_fmt_recursive(first_bridge,
+						       prev_bridge, crtc_state,
+						       conn_state,
+						       MEDIA_BUS_FMT_FIXED);
+			if (ret)
+				return ret;
+		}
+
+		/*
+		 * Driver does not implement the atomic state hooks, but that's
+		 * fine, as long as it does not access the bridge state.
+		 */
+		if (cur_state) {
+			cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
+			cur_state->output_bus_cfg.format = out_bus_fmt;
+		}
+
+		return 0;
+	}
+
+	/*
+	 * If the driver implements ->atomic_get_input_bus_fmts() it
+	 * should also implement the atomic state hooks.
+	 */
+	if (WARN_ON(!cur_state))
+		return -EINVAL;
+
+	in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
+							cur_state,
+							crtc_state,
+							conn_state,
+							out_bus_fmt,
+							&num_in_bus_fmts);
+	if (!num_in_bus_fmts)
+		return -ENOTSUPP;
+	else if (!in_bus_fmts)
+		return -ENOMEM;
+
+	if (first_bridge == cur_bridge) {
+		cur_state->input_bus_cfg.format = in_bus_fmts[0];
+		cur_state->output_bus_cfg.format = out_bus_fmt;
+		kfree(in_bus_fmts);
+		return 0;
+	}
+
+	for (i = 0; i < num_in_bus_fmts; i++) {
+		ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
+					       crtc_state, conn_state,
+					       in_bus_fmts[i]);
+		if (ret != -ENOTSUPP)
+			break;
+	}
+
+	if (!ret) {
+		cur_state->input_bus_cfg.format = in_bus_fmts[i];
+		cur_state->output_bus_cfg.format = out_bus_fmt;
+	}
+
+	kfree(in_bus_fmts);
+	return ret;
+}
+
+/*
+ * This function is called by &drm_atomic_bridge_chain_check() just before
+ * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
+ * It performs bus format negotiation between bridge elements. The negotiation
+ * happens in reverse order, starting from the last element in the chain up to
+ * @bridge.
+ *
+ * Negotiation starts by retrieving supported output bus formats on the last
+ * bridge element and testing them one by one. The test is recursive, meaning
+ * that for each tested output format, the whole chain will be walked backward,
+ * and each element will have to choose an input bus format that can be
+ * transcoded to the requested output format. When a bridge element does not
+ * support transcoding into a specific output format -ENOTSUPP is returned and
+ * the next bridge element will have to try a different format. If none of the
+ * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
+ *
+ * This implementation is relying on
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
+ * input/output formats.
+ *
+ * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
+ * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
+ * tries a single format: &drm_connector.display_info.bus_formats[0] if
+ * available, MEDIA_BUS_FMT_FIXED otherwise.
+ *
+ * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
+ * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
+ * bridge element that lacks this hook and asks the previous element in the
+ * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
+ * to do in that case (fail if they want to enforce bus format negotiation, or
+ * provide a reasonable default if they need to support pipelines where not
+ * all elements support bus format negotiation).
+ */
+static int
+drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state)
+{
+	struct drm_connector *conn = conn_state->connector;
+	struct drm_encoder *encoder = bridge->encoder;
+	struct drm_bridge_state *last_bridge_state;
+	unsigned int i, num_out_bus_fmts;
+	struct drm_bridge *last_bridge;
+	u32 *out_bus_fmts;
+	int ret = 0;
+
+	last_bridge = list_last_entry(&encoder->bridge_chain,
+				      struct drm_bridge, chain_node);
+	last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+							    last_bridge);
+
+	if (last_bridge->funcs->atomic_get_output_bus_fmts) {
+		const struct drm_bridge_funcs *funcs = last_bridge->funcs;
+
+		/*
+		 * If the driver implements ->atomic_get_output_bus_fmts() it
+		 * should also implement the atomic state hooks.
+		 */
+		if (WARN_ON(!last_bridge_state))
+			return -EINVAL;
+
+		out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
+							last_bridge_state,
+							crtc_state,
+							conn_state,
+							&num_out_bus_fmts);
+		if (!num_out_bus_fmts)
+			return -ENOTSUPP;
+		else if (!out_bus_fmts)
+			return -ENOMEM;
+	} else {
+		num_out_bus_fmts = 1;
+		out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
+		if (!out_bus_fmts)
+			return -ENOMEM;
+
+		if (conn->display_info.num_bus_formats &&
+		    conn->display_info.bus_formats)
+			out_bus_fmts[0] = conn->display_info.bus_formats[0];
+		else
+			out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+	}
+
+	for (i = 0; i < num_out_bus_fmts; i++) {
+		ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
+					       conn_state, out_bus_fmts[i]);
+		if (ret != -ENOTSUPP)
+			break;
+	}
+
+	kfree(out_bus_fmts);
+
+	return ret;
+}
+
+static void
+drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
+				      struct drm_connector *conn,
+				      struct drm_atomic_state *state)
+{
+	struct drm_bridge_state *bridge_state, *next_bridge_state;
+	struct drm_bridge *next_bridge;
+	u32 output_flags = 0;
+
+	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+
+	/* No bridge state attached to this bridge => nothing to propagate. */
+	if (!bridge_state)
+		return;
+
+	next_bridge = drm_bridge_get_next_bridge(bridge);
+
+	/*
+	 * Let's try to apply the most common case here, that is, propagate
+	 * display_info flags for the last bridge, and propagate the input
+	 * flags of the next bridge element to the output end of the current
+	 * bridge when the bridge is not the last one.
+	 * There are exceptions to this rule, like when signal inversion is
+	 * happening at the board level, but that's something drivers can deal
+	 * with from their &drm_bridge_funcs.atomic_check() implementation by
+	 * simply overriding the flags value we've set here.
+	 */
+	if (!next_bridge) {
+		output_flags = conn->display_info.bus_flags;
+	} else {
+		next_bridge_state = drm_atomic_get_new_bridge_state(state,
+								next_bridge);
+		/*
+		 * No bridge state attached to the next bridge, just leave the
+		 * flags to 0.
+		 */
+		if (next_bridge_state)
+			output_flags = next_bridge_state->input_bus_cfg.flags;
+	}
+
+	bridge_state->output_bus_cfg.flags = output_flags;
+
+	/*
+	 * Propage the output flags to the input end of the bridge. Again, it's
+	 * not necessarily what all bridges want, but that's what most of them
+	 * do, and by doing that by default we avoid forcing drivers to
+	 * duplicate the "dummy propagation" logic.
+	 */
+	bridge_state->input_bus_cfg.flags = output_flags;
+}
+
+/**
+ * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
+ * @bridge: bridge control structure
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ *
+ * First trigger a bus format negotiation before calling
+ * &drm_bridge_funcs.atomic_check() (falls back on
+ * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_check()
+ *
+ * RETURNS:
+ * 0 on success, a negative error code on failure
+ */
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+				  struct drm_crtc_state *crtc_state,
+				  struct drm_connector_state *conn_state)
+{
+	struct drm_connector *conn = conn_state->connector;
+	struct drm_encoder *encoder;
+	struct drm_bridge *iter;
+	int ret;
+
+	if (!bridge)
+		return 0;
+
+	ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
+						      conn_state);
+	if (ret)
+		return ret;
+
+	encoder = bridge->encoder;
+	list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+		int ret;
+
+		/*
+		 * Bus flags are propagated by default. If a bridge needs to
+		 * tweak the input bus flags for any reason, it should happen
+		 * in its &drm_bridge_funcs.atomic_check() implementation such
+		 * that preceding bridges in the chain can propagate the new
+		 * bus flags.
+		 */
+		drm_atomic_bridge_propagate_bus_flags(iter, conn,
+						      crtc_state->state);
+
+		ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
+		if (ret)
+			return ret;
+
+		if (iter == bridge)
+			break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
+
+/**
+ * drm_bridge_detect - check if anything is attached to the bridge output
+ * @bridge: bridge control structure
+ *
+ * If the bridge supports output detection, as reported by the
+ * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
+ * bridge and return the connection status. Otherwise return
+ * connector_status_unknown.
+ *
+ * RETURNS:
+ * The detection status on success, or connector_status_unknown if the bridge
+ * doesn't support output detection.
+ */
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+{
+	if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
+		return connector_status_unknown;
+
+	return bridge->funcs->detect(bridge);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_detect);
+
+/**
+ * drm_bridge_get_modes - fill all modes currently valid for the sink into the
+ * @connector
+ * @bridge: bridge control structure
+ * @connector: the connector to fill with modes
+ *
+ * If the bridge supports output modes retrieval, as reported by the
+ * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
+ * fill the connector with all valid modes and return the number of modes
+ * added. Otherwise return 0.
+ *
+ * RETURNS:
+ * The number of modes added to the connector.
+ */
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+			 struct drm_connector *connector)
+{
+	if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
+		return 0;
+
+	return bridge->funcs->get_modes(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+
+/**
+ * drm_bridge_get_edid - get the EDID data of the connected display
+ * @bridge: bridge control structure
+ * @connector: the connector to read EDID for
+ *
+ * If the bridge supports output EDID retrieval, as reported by the
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
+ * get the EDID and return it. Otherwise return ERR_PTR(-ENOTSUPP).
+ *
+ * RETURNS:
+ * The retrieved EDID on success, or an error pointer otherwise.
+ */
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+				 struct drm_connector *connector)
+{
+	if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
+		return ERR_PTR(-ENOTSUPP);
+
+	return bridge->funcs->get_edid(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+
+/**
+ * drm_bridge_hpd_enable - enable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ * @cb: hot-plug detection callback
+ * @data: data to be passed to the hot-plug detection callback
+ *
+ * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
+ * and @data as hot plug notification callback. From now on the @cb will be
+ * called with @data when an output status change is detected by the bridge,
+ * until hot plug notification gets disabled with drm_bridge_hpd_disable().
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ *
+ * Only one hot plug detection callback can be registered at a time, it is an
+ * error to call this function when hot plug detection is already enabled for
+ * the bridge.
+ */
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+			   void (*cb)(void *data,
+				      enum drm_connector_status status),
+			   void *data)
+{
+	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+		return;
+
+	mutex_lock(&bridge->hpd_mutex);
+
+	if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
+		goto unlock;
+
+	bridge->hpd_cb = cb;
+	bridge->hpd_data = data;
+
+	if (bridge->funcs->hpd_enable)
+		bridge->funcs->hpd_enable(bridge);
+
+unlock:
+	mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
+
+/**
+ * drm_bridge_hpd_disable - disable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ *
+ * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
+ * plug detection callback previously registered with drm_bridge_hpd_enable().
+ * Once this function returns the callback will not be called by the bridge
+ * when an output status change occurs.
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ */
+void drm_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+	if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+		return;
+
+	mutex_lock(&bridge->hpd_mutex);
+	if (bridge->funcs->hpd_disable)
+		bridge->funcs->hpd_disable(bridge);
+
+	bridge->hpd_cb = NULL;
+	bridge->hpd_data = NULL;
+	mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
+
+/**
+ * drm_bridge_hpd_notify - notify hot plug detection events
+ * @bridge: bridge control structure
+ * @status: output connection status
+ *
+ * Bridge drivers shall call this function to report hot plug events when they
+ * detect a change in the output status, when hot plug detection has been
+ * enabled by drm_bridge_hpd_enable().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+			   enum drm_connector_status status)
+{
+	mutex_lock(&bridge->hpd_mutex);
+	if (bridge->hpd_cb)
+		bridge->hpd_cb(bridge->hpd_data, status);
+	mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
+
 #ifdef CONFIG_OF
 /**
  * of_drm_find_bridge - find the bridge corresponding to the device node in
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
new file mode 100644
index 0000000..c6994fe
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The DRM bridge connector helper object provides a DRM connector
+ * implementation that wraps a chain of &struct drm_bridge. The connector
+ * operations are fully implemented based on the operations of the bridges in
+ * the chain, and don't require any intervention from the display controller
+ * driver at runtime.
+ *
+ * To use the helper, display controller drivers create a bridge connector with
+ * a call to drm_bridge_connector_init(). This associates the newly created
+ * connector with the chain of bridges passed to the function and registers it
+ * with the DRM device. At that point the connector becomes fully usable, no
+ * further operation is needed.
+ *
+ * The DRM bridge connector operations are implemented based on the operations
+ * provided by the bridges in the chain. Each connector operation is delegated
+ * to the bridge closest to the connector (at the end of the chain) that
+ * provides the relevant functionality.
+ *
+ * To make use of this helper, all bridges in the chain shall report bridge
+ * operation flags (&drm_bridge->ops) and bridge output type
+ * (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
+ * flag (none of the bridges shall create a DRM connector directly).
+ */
+
+/**
+ * struct drm_bridge_connector - A connector backed by a chain of bridges
+ */
+struct drm_bridge_connector {
+	/**
+	 * @base: The base DRM connector
+	 */
+	struct drm_connector base;
+	/**
+	 * @encoder:
+	 *
+	 * The encoder at the start of the bridges chain.
+	 */
+	struct drm_encoder *encoder;
+	/**
+	 * @bridge_edid:
+	 *
+	 * The last bridge in the chain (closest to the connector) that provides
+	 * EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
+	 */
+	struct drm_bridge *bridge_edid;
+	/**
+	 * @bridge_hpd:
+	 *
+	 * The last bridge in the chain (closest to the connector) that provides
+	 * hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
+	 */
+	struct drm_bridge *bridge_hpd;
+	/**
+	 * @bridge_detect:
+	 *
+	 * The last bridge in the chain (closest to the connector) that provides
+	 * connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
+	 */
+	struct drm_bridge *bridge_detect;
+	/**
+	 * @bridge_modes:
+	 *
+	 * The last bridge in the chain (closest to the connector) that provides
+	 * connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
+	 */
+	struct drm_bridge *bridge_modes;
+};
+
+#define to_drm_bridge_connector(x) \
+	container_of(x, struct drm_bridge_connector, base)
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Hot-Plug Handling
+ */
+
+static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
+					    enum drm_connector_status status)
+{
+	struct drm_bridge_connector *bridge_connector =
+		to_drm_bridge_connector(connector);
+	struct drm_bridge *bridge;
+
+	/* Notify all bridges in the pipeline of hotplug events. */
+	drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+		if (bridge->funcs->hpd_notify)
+			bridge->funcs->hpd_notify(bridge, status);
+	}
+}
+
+static void drm_bridge_connector_hpd_cb(void *cb_data,
+					enum drm_connector_status status)
+{
+	struct drm_bridge_connector *drm_bridge_connector = cb_data;
+	struct drm_connector *connector = &drm_bridge_connector->base;
+	struct drm_device *dev = connector->dev;
+	enum drm_connector_status old_status;
+
+	mutex_lock(&dev->mode_config.mutex);
+	old_status = connector->status;
+	connector->status = status;
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (old_status == status)
+		return;
+
+	drm_bridge_connector_hpd_notify(connector, status);
+
+	drm_kms_helper_hotplug_event(dev);
+}
+
+/**
+ * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
+ * @connector: The DRM bridge connector
+ *
+ * This function enables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their resume handler.
+ */
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+{
+	struct drm_bridge_connector *bridge_connector =
+		to_drm_bridge_connector(connector);
+	struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+	if (hpd)
+		drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
+				      bridge_connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
+
+/**
+ * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
+ * connector
+ * @connector: The DRM bridge connector
+ *
+ * This function disables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their suspend handler.
+ */
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+{
+	struct drm_bridge_connector *bridge_connector =
+		to_drm_bridge_connector(connector);
+	struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+	if (hpd)
+		drm_bridge_hpd_disable(hpd);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Functions
+ */
+
+static enum drm_connector_status
+drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_bridge_connector *bridge_connector =
+		to_drm_bridge_connector(connector);
+	struct drm_bridge *detect = bridge_connector->bridge_detect;
+	enum drm_connector_status status;
+
+	if (detect) {
+		status = detect->funcs->detect(detect);
+
+		drm_bridge_connector_hpd_notify(connector, status);
+	} else {
+		switch (connector->connector_type) {
+		case DRM_MODE_CONNECTOR_DPI:
+		case DRM_MODE_CONNECTOR_LVDS:
+		case DRM_MODE_CONNECTOR_DSI:
+			status = connector_status_connected;
+			break;
+		default:
+			status = connector_status_unknown;
+			break;
+		}
+	}
+
+	return status;
+}
+
+static void drm_bridge_connector_destroy(struct drm_connector *connector)
+{
+	struct drm_bridge_connector *bridge_connector =
+		to_drm_bridge_connector(connector);
+
+	if (bridge_connector->bridge_hpd) {
+		struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+		drm_bridge_hpd_disable(hpd);
+	}
+
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+
+	kfree(bridge_connector);
+}
+
+static const struct drm_connector_funcs drm_bridge_connector_funcs = {
+	.reset = drm_atomic_helper_connector_reset,
+	.detect = drm_bridge_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = drm_bridge_connector_destroy,
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Helper Functions
+ */
+
+static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
+					       struct drm_bridge *bridge)
+{
+	enum drm_connector_status status;
+	struct edid *edid;
+	int n;
+
+	status = drm_bridge_connector_detect(connector, false);
+	if (status != connector_status_connected)
+		goto no_edid;
+
+	edid = bridge->funcs->get_edid(bridge, connector);
+	if (!edid || !drm_edid_is_valid(edid)) {
+		kfree(edid);
+		goto no_edid;
+	}
+
+	drm_connector_update_edid_property(connector, edid);
+	n = drm_add_edid_modes(connector, edid);
+
+	kfree(edid);
+	return n;
+
+no_edid:
+	drm_connector_update_edid_property(connector, NULL);
+	return 0;
+}
+
+static int drm_bridge_connector_get_modes(struct drm_connector *connector)
+{
+	struct drm_bridge_connector *bridge_connector =
+		to_drm_bridge_connector(connector);
+	struct drm_bridge *bridge;
+
+	/*
+	 * If display exposes EDID, then we parse that in the normal way to
+	 * build table of supported modes.
+	 */
+	bridge = bridge_connector->bridge_edid;
+	if (bridge)
+		return drm_bridge_connector_get_modes_edid(connector, bridge);
+
+	/*
+	 * Otherwise if the display pipeline reports modes (e.g. with a fixed
+	 * resolution panel or an analog TV output), query it.
+	 */
+	bridge = bridge_connector->bridge_modes;
+	if (bridge)
+		return bridge->funcs->get_modes(bridge, connector);
+
+	/*
+	 * We can't retrieve modes, which can happen for instance for a DVI or
+	 * VGA output with the DDC bus unconnected. The KMS core will add the
+	 * default modes.
+	 */
+	return 0;
+}
+
+static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
+	.get_modes = drm_bridge_connector_get_modes,
+	/* No need for .mode_valid(), the bridges are checked by the core. */
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Initialisation
+ */
+
+/**
+ * drm_bridge_connector_init - Initialise a connector for a chain of bridges
+ * @drm: the DRM device
+ * @encoder: the encoder where the bridge chain starts
+ *
+ * Allocate, initialise and register a &drm_bridge_connector with the @drm
+ * device. The connector is associated with a chain of bridges that starts at
+ * the @encoder. All bridges in the chain shall report bridge operation flags
+ * (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
+ * them may create a DRM connector directly.
+ *
+ * Returns a pointer to the new connector on success, or a negative error
+ * pointer otherwise.
+ */
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+						struct drm_encoder *encoder)
+{
+	struct drm_bridge_connector *bridge_connector;
+	struct drm_connector *connector;
+	struct i2c_adapter *ddc = NULL;
+	struct drm_bridge *bridge;
+	int connector_type;
+
+	bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
+	if (!bridge_connector)
+		return ERR_PTR(-ENOMEM);
+
+	bridge_connector->encoder = encoder;
+
+	/*
+	 * TODO: Handle doublescan_allowed, stereo_allowed and
+	 * ycbcr_420_allowed.
+	 */
+	connector = &bridge_connector->base;
+	connector->interlace_allowed = true;
+
+	/*
+	 * Initialise connector status handling. First locate the furthest
+	 * bridges in the pipeline that support HPD and output detection. Then
+	 * initialise the connector polling mode, using HPD if available and
+	 * falling back to polling if supported. If neither HPD nor output
+	 * detection are available, we don't support hotplug detection at all.
+	 */
+	connector_type = DRM_MODE_CONNECTOR_Unknown;
+	drm_for_each_bridge_in_chain(encoder, bridge) {
+		if (!bridge->interlace_allowed)
+			connector->interlace_allowed = false;
+
+		if (bridge->ops & DRM_BRIDGE_OP_EDID)
+			bridge_connector->bridge_edid = bridge;
+		if (bridge->ops & DRM_BRIDGE_OP_HPD)
+			bridge_connector->bridge_hpd = bridge;
+		if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+			bridge_connector->bridge_detect = bridge;
+		if (bridge->ops & DRM_BRIDGE_OP_MODES)
+			bridge_connector->bridge_modes = bridge;
+
+		if (!drm_bridge_get_next_bridge(bridge))
+			connector_type = bridge->type;
+
+		if (bridge->ddc)
+			ddc = bridge->ddc;
+	}
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
+		kfree(bridge_connector);
+		return ERR_PTR(-EINVAL);
+	}
+
+	drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
+				    connector_type, ddc);
+	drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
+
+	if (bridge_connector->bridge_hpd)
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+	else if (bridge_connector->bridge_detect)
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT
+				  | DRM_CONNECTOR_POLL_DISCONNECT;
+
+	return connector;
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8ce9d73..dcabf56 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -134,7 +134,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
 					 shift, add);
 }
 
-/**
+/*
  * Core function to create a range of memory available for mapping by a
  * non-root process.
  *
@@ -149,7 +149,6 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
 {
 	struct drm_local_map *map;
 	struct drm_map_list *list;
-	drm_dma_handle_t *dmah;
 	unsigned long user_token;
 	int ret;
 
@@ -324,14 +323,14 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
 		 * As we're limiting the address to 2^32-1 (or less),
 		 * casting it down to 32 bits is no problem, but we
 		 * need to point to a 64bit variable first. */
-		dmah = drm_pci_alloc(dev, map->size, map->size);
-		if (!dmah) {
+		map->handle = dma_alloc_coherent(&dev->pdev->dev,
+						 map->size,
+						 &map->offset,
+						 GFP_KERNEL);
+		if (!map->handle) {
 			kfree(map);
 			return -ENOMEM;
 		}
-		map->handle = dmah->vaddr;
-		map->offset = (unsigned long)dmah->busaddr;
-		kfree(dmah);
 		break;
 	default:
 		kfree(map);
@@ -399,7 +398,7 @@ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_legacy_findmap);
 
-/**
+/*
  * Ioctl to specify a range of memory that is available for mapping by a
  * non-root process.
  *
@@ -500,7 +499,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Remove a map private from list and deallocate resources if the mapping
  * isn't in use.
  *
@@ -513,7 +512,6 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 {
 	struct drm_map_list *r_list = NULL, *list_t;
-	drm_dma_handle_t dmah;
 	int found = 0;
 	struct drm_master *master;
 
@@ -554,10 +552,10 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 	case _DRM_SCATTER_GATHER:
 		break;
 	case _DRM_CONSISTENT:
-		dmah.vaddr = map->handle;
-		dmah.busaddr = map->offset;
-		dmah.size = map->size;
-		__drm_legacy_pci_free(dev, &dmah);
+		dma_free_coherent(&dev->pdev->dev,
+				  map->size,
+				  map->handle,
+				  map->offset);
 		break;
 	}
 	kfree(map);
@@ -661,7 +659,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
 	return ret;
 }
 
-/**
+/*
  * Cleanup after an error on one of the addbufs() functions.
  *
  * \param dev DRM device.
@@ -696,7 +694,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
 }
 
 #if IS_ENABLED(CONFIG_AGP)
-/**
+/*
  * Add AGP buffers for DMA transfers.
  *
  * \param dev struct drm_device to which the buffers are to be added.
@@ -1232,7 +1230,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
 	return 0;
 }
 
-/**
+/*
  * Add buffers for DMA transfers (ioctl).
  *
  * \param inode device inode.
@@ -1273,7 +1271,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
 	return ret;
 }
 
-/**
+/*
  * Get information about the buffer mappings.
  *
  * This was originally mean for debugging purposes, or by a sophisticated
@@ -1364,7 +1362,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
 	return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
 }
 
-/**
+/*
  * Specifies a low and high water mark for buffer allocation
  *
  * \param inode device inode.
@@ -1413,7 +1411,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Unreserve the buffers in list, previously reserved using drmDMA.
  *
  * \param inode device inode.
@@ -1465,7 +1463,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Maps all of the DMA buffers into client-virtual space (ioctl).
  *
  * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index b031b45..6b0c6ef 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0 or MIT
 /*
  * Copyright 2018 Noralf Trønnes
  */
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 3035584..7443114 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -1095,15 +1095,17 @@ static int drm_client_modeset_commit_legacy(struct drm_client_dev *client)
 }
 
 /**
- * drm_client_modeset_commit_force() - Force commit CRTC configuration
+ * drm_client_modeset_commit_locked() - Force commit CRTC configuration
  * @client: DRM client
  *
- * Commit modeset configuration to crtcs without checking if there is a DRM master.
+ * Commit modeset configuration to crtcs without checking if there is a DRM
+ * master. The assumption is that the caller already holds an internal DRM
+ * master reference acquired with drm_master_internal_acquire().
  *
  * Returns:
  * Zero on success or negative error code on failure.
  */
-int drm_client_modeset_commit_force(struct drm_client_dev *client)
+int drm_client_modeset_commit_locked(struct drm_client_dev *client)
 {
 	struct drm_device *dev = client->dev;
 	int ret;
@@ -1117,7 +1119,7 @@ int drm_client_modeset_commit_force(struct drm_client_dev *client)
 
 	return ret;
 }
-EXPORT_SYMBOL(drm_client_modeset_commit_force);
+EXPORT_SYMBOL(drm_client_modeset_commit_locked);
 
 /**
  * drm_client_modeset_commit() - Commit CRTC configuration
@@ -1136,7 +1138,7 @@ int drm_client_modeset_commit(struct drm_client_dev *client)
 	if (!drm_master_internal_acquire(dev))
 		return -EBUSY;
 
-	ret = drm_client_modeset_commit_force(client);
+	ret = drm_client_modeset_commit_locked(client);
 
 	drm_master_internal_release(dev);
 
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2166000..644f0ad 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -112,6 +112,21 @@ void drm_connector_ida_destroy(void)
 }
 
 /**
+ * drm_get_connector_type_name - return a string for connector type
+ * @type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * Returns: the name of the connector type, or NULL if the type is not valid.
+ */
+const char *drm_get_connector_type_name(unsigned int type)
+{
+	if (type < ARRAY_SIZE(drm_connector_enum_list))
+		return drm_connector_enum_list[type].name;
+
+	return NULL;
+}
+EXPORT_SYMBOL(drm_get_connector_type_name);
+
+/**
  * drm_connector_get_cmdline_mode - reads the user's cmdline mode
  * @connector: connector to quwery
  *
@@ -140,6 +155,13 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
 		connector->force = mode->force;
 	}
 
+	if (mode->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) {
+		DRM_INFO("cmdline forces connector %s panel_orientation to %d\n",
+			 connector->name, mode->panel_orientation);
+		drm_connector_set_panel_orientation(connector,
+						    mode->panel_orientation);
+	}
+
 	DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
 		      connector->name, mode->name,
 		      mode->xres, mode->yres,
@@ -1139,7 +1161,8 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
  *	coordinates, so if userspace rotates the picture to adjust for
  *	the orientation it must also apply the same transformation to the
  *	touchscreen input coordinates. This property is initialized by calling
- *	drm_connector_init_panel_orientation_property().
+ *	drm_connector_set_panel_orientation() or
+ *	drm_connector_set_panel_orientation_with_quirk()
  *
  * scaling mode:
  *	This property defines how a non-native mode is upscaled to the native
@@ -2046,38 +2069,41 @@ void drm_connector_set_vrr_capable_property(
 EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
 
 /**
- * drm_connector_init_panel_orientation_property -
- *	initialize the connecters panel_orientation property
- * @connector: connector for which to init the panel-orientation property.
- * @width: width in pixels of the panel, used for panel quirk detection
- * @height: height in pixels of the panel, used for panel quirk detection
+ * drm_connector_set_panel_orientation - sets the connecter's panel_orientation
+ * @connector: connector for which to set the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
  *
- * This function should only be called for built-in panels, after setting
- * connector->display_info.panel_orientation first (if known).
+ * This function sets the connector's panel_orientation and attaches
+ * a "panel orientation" property to the connector.
  *
- * This function will check for platform specific (e.g. DMI based) quirks
- * overriding display_info.panel_orientation first, then if panel_orientation
- * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the
- * "panel orientation" property to the connector.
+ * Calling this function on a connector where the panel_orientation has
+ * already been set is a no-op (e.g. the orientation has been overridden with
+ * a kernel commandline option).
+ *
+ * It is allowed to call this function with a panel_orientation of
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_connector_init_panel_orientation_property(
-	struct drm_connector *connector, int width, int height)
+int drm_connector_set_panel_orientation(
+	struct drm_connector *connector,
+	enum drm_panel_orientation panel_orientation)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_display_info *info = &connector->display_info;
 	struct drm_property *prop;
-	int orientation_quirk;
 
-	orientation_quirk = drm_get_panel_orientation_quirk(width, height);
-	if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
-		info->panel_orientation = orientation_quirk;
-
-	if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+	/* Already set? */
+	if (info->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
 		return 0;
 
+	/* Don't attach the property if the orientation is unknown */
+	if (panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+		return 0;
+
+	info->panel_orientation = panel_orientation;
+
 	prop = dev->mode_config.panel_orientation_property;
 	if (!prop) {
 		prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
@@ -2094,7 +2120,37 @@ int drm_connector_init_panel_orientation_property(
 				   info->panel_orientation);
 	return 0;
 }
-EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
+EXPORT_SYMBOL(drm_connector_set_panel_orientation);
+
+/**
+ * drm_connector_set_panel_orientation_with_quirk -
+ *	set the connecter's panel_orientation after checking for quirks
+ * @connector: connector for which to init the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ * @width: width in pixels of the panel, used for panel quirk detection
+ * @height: height in pixels of the panel, used for panel quirk detection
+ *
+ * Like drm_connector_set_panel_orientation(), but with a check for platform
+ * specific (e.g. DMI based) quirks overriding the passed in panel_orientation.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_set_panel_orientation_with_quirk(
+	struct drm_connector *connector,
+	enum drm_panel_orientation panel_orientation,
+	int width, int height)
+{
+	int orientation_quirk;
+
+	orientation_quirk = drm_get_panel_orientation_quirk(width, height);
+	if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+		panel_orientation = orientation_quirk;
+
+	return drm_connector_set_panel_orientation(connector,
+						   panel_orientation);
+}
+EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
 
 int drm_connector_set_obj_prop(struct drm_mode_object *obj,
 				    struct drm_property *property,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 1f802d8..c99be95 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -47,7 +47,7 @@ struct drm_ctx_list {
 /** \name Context bitmap support */
 /*@{*/
 
-/**
+/*
  * Free a handle from the context bitmap.
  *
  * \param dev DRM device.
@@ -68,7 +68,7 @@ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
+/*
  * Context bitmap allocation.
  *
  * \param dev DRM device.
@@ -88,7 +88,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
 	return ret;
 }
 
-/**
+/*
  * Context bitmap initialization.
  *
  * \param dev DRM device.
@@ -104,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 	idr_init(&dev->ctx_idr);
 }
 
-/**
+/*
  * Context bitmap cleanup.
  *
  * \param dev DRM device.
@@ -163,7 +163,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
 /** \name Per Context SAREA Support */
 /*@{*/
 
-/**
+/*
  * Get per-context SAREA.
  *
  * \param inode device inode.
@@ -211,7 +211,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Set per-context SAREA.
  *
  * \param inode device inode.
@@ -263,7 +263,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
 /** \name The actual DRM context handling routines */
 /*@{*/
 
-/**
+/*
  * Switch context.
  *
  * \param dev DRM device.
@@ -290,7 +290,7 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
 	return 0;
 }
 
-/**
+/*
  * Complete context switch.
  *
  * \param dev DRM device.
@@ -318,7 +318,7 @@ static int drm_context_switch_complete(struct drm_device *dev,
 	return 0;
 }
 
-/**
+/*
  * Reserve contexts.
  *
  * \param inode device inode.
@@ -351,7 +351,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Add context.
  *
  * \param inode device inode.
@@ -404,7 +404,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Get context.
  *
  * \param inode device inode.
@@ -428,7 +428,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Switch context.
  *
  * \param inode device inode.
@@ -452,7 +452,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
 	return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
 
-/**
+/*
  * New context.
  *
  * \param inode device inode.
@@ -478,7 +478,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Remove context.
  *
  * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 93a4eec..a4d36ac 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -244,10 +244,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
 		/* Disable unused encoders */
 		if (encoder->crtc == NULL)
 			drm_encoder_disable(encoder);
-		/* Disable encoders whose CRTC is about to change */
-		if (encoder_funcs->get_crtc &&
-		    encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
-			drm_encoder_disable(encoder);
 	}
 }
 
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c7d5e4c..16f2413 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -216,6 +216,8 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
 			void *data, struct drm_file *file_priv);
 int drm_mode_getfb(struct drm_device *dev,
 		   void *data, struct drm_file *file_priv);
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv);
 int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
 			   void *data, struct drm_file *file_priv);
 
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index eab0f26..4e673d3 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -182,8 +182,7 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
 	for (i = 0; i < count; i++) {
 		u32 features = files[i].driver_features;
 
-		if (features != 0 &&
-		    (dev->driver->driver_features & features) != features)
+		if (features && !drm_core_check_all_features(dev, features))
 			continue;
 
 		tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index e22b812..5d67a41 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -372,7 +372,7 @@ void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
 
 	crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
 
-	debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+	debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc,
 			    &drm_crtc_crc_control_fops);
 	debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
 			    &drm_crtc_crc_data_fops);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index e45b078..a7add55a 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -42,10 +42,10 @@
 #include "drm_legacy.h"
 
 /**
- * Initialize the DMA data.
+ * drm_legacy_dma_setup() - Initialize the DMA data.
  *
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
+ * @dev: DRM device.
+ * Return: zero on success or a negative value on failure.
  *
  * Allocate and initialize a drm_device_dma structure.
  */
@@ -71,9 +71,9 @@ int drm_legacy_dma_setup(struct drm_device *dev)
 }
 
 /**
- * Cleanup the DMA resources.
+ * drm_legacy_dma_takedown() - Cleanup the DMA resources.
  *
- * \param dev DRM device.
+ * @dev: DRM device.
  *
  * Free all pages associated with DMA buffers, the buffers and pages lists, and
  * finally the drm_device::dma structure itself.
@@ -120,10 +120,10 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
 }
 
 /**
- * Free a buffer.
+ * drm_legacy_free_buffer() - Free a buffer.
  *
- * \param dev DRM device.
- * \param buf buffer to free.
+ * @dev: DRM device.
+ * @buf: buffer to free.
  *
  * Resets the fields of \p buf.
  */
@@ -139,9 +139,10 @@ void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
 }
 
 /**
- * Reclaim the buffers.
+ * drm_legacy_reclaim_buffers() - Reclaim the buffers.
  *
- * \param file_priv DRM file private.
+ * @dev: DRM device.
+ * @file_priv: DRM file private.
  *
  * Frees each buffer associated with \p file_priv not already on the hardware.
  */
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a5364b5..c6fbe6e 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -362,6 +362,65 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
 EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
 
 /**
+ * drm_dp_send_real_edid_checksum() - send back real edid checksum value
+ * @aux: DisplayPort AUX channel
+ * @real_edid_checksum: real edid checksum for the last block
+ *
+ * Returns:
+ * True on success
+ */
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+				    u8 real_edid_checksum)
+{
+	u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
+
+	if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+			     &auto_test_req, 1) < 1) {
+		DRM_ERROR("DPCD failed read at register 0x%x\n",
+			  DP_DEVICE_SERVICE_IRQ_VECTOR);
+		return false;
+	}
+	auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
+
+	if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+		DRM_ERROR("DPCD failed read at register 0x%x\n",
+			  DP_TEST_REQUEST);
+		return false;
+	}
+	link_edid_read &= DP_TEST_LINK_EDID_READ;
+
+	if (!auto_test_req || !link_edid_read) {
+		DRM_DEBUG_KMS("Source DUT does not support TEST_EDID_READ\n");
+		return false;
+	}
+
+	if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+			      &auto_test_req, 1) < 1) {
+		DRM_ERROR("DPCD failed write at register 0x%x\n",
+			  DP_DEVICE_SERVICE_IRQ_VECTOR);
+		return false;
+	}
+
+	/* send back checksum for the last edid extension block data */
+	if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
+			      &real_edid_checksum, 1) < 1) {
+		DRM_ERROR("DPCD failed write at register 0x%x\n",
+			  DP_TEST_EDID_CHECKSUM);
+		return false;
+	}
+
+	test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
+	if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+		DRM_ERROR("DPCD failed write at register 0x%x\n",
+			  DP_TEST_RESPONSE);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+
+/**
  * drm_dp_downstream_max_clock() - extract branch device max
  *                                 pixel rate for legacy VGA
  *                                 converter or max TMDS clock
@@ -470,8 +529,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
 	int len;
 	uint8_t rev[2];
 	int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
-	bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
-			     DP_DWN_STRM_PORT_PRESENT;
+	bool branch_device = drm_dp_is_branch(dpcd);
 
 	seq_printf(m, "\tDP branch device present: %s\n",
 		   branch_device ? "yes" : "no");
@@ -1222,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
 #undef DEVICE_ID_ANY
 #undef DEVICE_ID
 
+struct edid_quirk {
+	u8 mfg_id[2];
+	u8 prod_id[2];
+	u32 quirks;
+};
+
+#define MFG(first, second) { (first), (second) }
+#define PROD_ID(first, second) { (first), (second) }
+
+/*
+ * Some devices have unreliable OUIDs where they don't set the device ID
+ * correctly, and as a result we need to use the EDID for finding additional
+ * DP quirks in such cases.
+ */
+static const struct edid_quirk edid_quirk_list[] = {
+	/* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
+	 * only supports DPCD backlight controls
+	 */
+	{ MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+	/*
+	 * Some Dell CML 2020 systems have panels support both AUX and PWM
+	 * backlight control, and some only support AUX backlight control. All
+	 * said panels start up in AUX mode by default, and we don't have any
+	 * support for disabling HDR mode on these panels which would be
+	 * required to switch to PWM backlight control mode (plus, I'm not
+	 * even sure we want PWM backlight controls over DPCD backlight
+	 * controls anyway...). Until we have a better way of detecting these,
+	 * force DPCD backlight mode on all of them.
+	 */
+	{ MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+	{ MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+	{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+	{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+};
+
+#undef MFG
+#undef PROD_ID
+
+/**
+ * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
+ * DP-specific quirks
+ * @edid: The EDID to check
+ *
+ * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
+ * of manufacturers don't seem to like following standards and neglect to fill
+ * the dev-ID in, making it impossible to only use OUIDs for determining
+ * quirks in some cases. This function can be used to check the EDID and look
+ * up any additional DP quirks. The bits returned by this function correspond
+ * to the quirk bits in &drm_dp_quirk.
+ *
+ * Returns: a bitmask of quirks, if any. The driver can check this using
+ * drm_dp_has_quirk().
+ */
+u32 drm_dp_get_edid_quirks(const struct edid *edid)
+{
+	const struct edid_quirk *quirk;
+	u32 quirks = 0;
+	int i;
+
+	if (!edid)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+		quirk = &edid_quirk_list[i];
+		if (memcmp(quirk->mfg_id, edid->mfg_id,
+			   sizeof(edid->mfg_id)) == 0 &&
+		    memcmp(quirk->prod_id, edid->prod_code,
+			   sizeof(edid->prod_code)) == 0)
+			quirks |= quirk->quirks;
+	}
+
+	DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
+		      (int)sizeof(edid->mfg_id), edid->mfg_id,
+		      (int)sizeof(edid->prod_code), edid->prod_code, quirks);
+
+	return quirks;
+}
+EXPORT_SYMBOL(drm_dp_get_edid_quirks);
+
 /**
  * drm_dp_read_desc - read sink/branch descriptor from DPCD
  * @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ed0fea2..70c4b7a 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -736,6 +736,10 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
 	if (msg->curchunk_idx >= msg->curchunk_len) {
 		/* do CRC */
 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+		if (crc4 != msg->chunk[msg->curchunk_len - 1])
+			print_hex_dump(KERN_DEBUG, "wrong crc",
+				       DUMP_PREFIX_NONE, 16, 1,
+				       msg->chunk,  msg->curchunk_len, false);
 		/* copy chunk into bigger msg */
 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
 		msg->curlen += msg->curchunk_len - 1;
@@ -1035,7 +1039,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
 	}
 }
 
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
+			     u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
 {
 	struct drm_dp_sideband_msg_req_body req;
 
@@ -1045,17 +1050,14 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
 	req.u.dpcd_write.num_bytes = num_bytes;
 	req.u.dpcd_write.bytes = bytes;
 	drm_dp_encode_sideband_req(&req, msg);
-
-	return 0;
 }
 
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
 {
 	struct drm_dp_sideband_msg_req_body req;
 
 	req.req_type = DP_LINK_ADDRESS;
 	drm_dp_encode_sideband_req(&req, msg);
-	return 0;
 }
 
 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
@@ -1067,7 +1069,8 @@ static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
 	return 0;
 }
 
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+				     int port_num)
 {
 	struct drm_dp_sideband_msg_req_body req;
 
@@ -1078,10 +1081,11 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
 	return 0;
 }
 
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
-				  u8 vcpi, uint16_t pbn,
-				  u8 number_sdp_streams,
-				  u8 *sdp_stream_sink)
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
+				   int port_num,
+				   u8 vcpi, uint16_t pbn,
+				   u8 number_sdp_streams,
+				   u8 *sdp_stream_sink)
 {
 	struct drm_dp_sideband_msg_req_body req;
 	memset(&req, 0, sizeof(req));
@@ -1094,11 +1098,10 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
 		   number_sdp_streams);
 	drm_dp_encode_sideband_req(&req, msg);
 	msg->path_msg = true;
-	return 0;
 }
 
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
-				  int port_num, bool power_up)
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+				   int port_num, bool power_up)
 {
 	struct drm_dp_sideband_msg_req_body req;
 
@@ -1110,7 +1113,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
 	req.u.port_num.port_number = port_num;
 	drm_dp_encode_sideband_req(&req, msg);
 	msg->path_msg = true;
-	return 0;
 }
 
 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
@@ -2061,7 +2063,7 @@ ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
  * sideband messaging as drm_dp_dpcd_write() does for local
  * devices via actual AUX CH.
  *
- * Return: 0 on success, negative error code on failure.
+ * Return: number of bytes written on success, negative error code on failure.
  */
 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
 			      unsigned int offset, void *buffer, size_t size)
@@ -2073,29 +2075,27 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
 				      offset, size, buffer);
 }
 
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
 {
-	int ret;
+	int ret = 0;
 
 	memcpy(mstb->guid, guid, 16);
 
 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
 		if (mstb->port_parent) {
-			ret = drm_dp_send_dpcd_write(
-					mstb->mgr,
-					mstb->port_parent,
-					DP_GUID,
-					16,
-					mstb->guid);
+			ret = drm_dp_send_dpcd_write(mstb->mgr,
+						     mstb->port_parent,
+						     DP_GUID, 16, mstb->guid);
 		} else {
-
-			ret = drm_dp_dpcd_write(
-					mstb->mgr->aux,
-					DP_GUID,
-					mstb->guid,
-					16);
+			ret = drm_dp_dpcd_write(mstb->mgr->aux,
+						DP_GUID, mstb->guid, 16);
 		}
 	}
+
+	if (ret < 16 && ret > 0)
+		return -EPROTO;
+
+	return ret == 16 ? 0 : ret;
 }
 
 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2178,7 +2178,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
 		drm_connector_set_tile_property(port->connector);
 	}
 
-	mgr->cbs->register_connector(port->connector);
+	drm_connector_register(port->connector);
 	return;
 
 error:
@@ -2641,7 +2641,8 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
 	return false;
 }
 
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
+			    u8 port_num, u32 offset, u8 num_bytes)
 {
 	struct drm_dp_sideband_msg_req_body req;
 
@@ -2650,8 +2651,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
 	req.u.dpcd_read.dpcd_address = offset;
 	req.u.dpcd_read.num_bytes = num_bytes;
 	drm_dp_encode_sideband_req(&req, msg);
-
-	return 0;
 }
 
 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
@@ -2877,7 +2876,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 	struct drm_dp_sideband_msg_tx *txmsg;
 	struct drm_dp_link_address_ack_reply *reply;
 	struct drm_dp_mst_port *port, *tmp;
-	int i, len, ret, port_mask = 0;
+	int i, ret, port_mask = 0;
 	bool changed = false;
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2885,7 +2884,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 		return -ENOMEM;
 
 	txmsg->dst = mstb;
-	len = build_link_address(txmsg);
+	build_link_address(txmsg);
 
 	mstb->link_address_sent = true;
 	drm_dp_queue_down_tx(mgr, txmsg);
@@ -2906,7 +2905,15 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 	DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
 	drm_dp_dump_link_address(reply);
 
-	drm_dp_check_mstb_guid(mstb, reply->guid);
+	ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+	if (ret) {
+		char buf[64];
+
+		drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
+		DRM_ERROR("GUID check on %s failed: %d\n",
+			  buf, ret);
+		goto out;
+	}
 
 	for (i = 0; i < reply->nports; i++) {
 		port_mask |= BIT(reply->ports[i].port_number);
@@ -2947,14 +2954,14 @@ void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
 					struct drm_dp_mst_branch *mstb)
 {
 	struct drm_dp_sideband_msg_tx *txmsg;
-	int len, ret;
+	int ret;
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
 	if (!txmsg)
 		return;
 
 	txmsg->dst = mstb;
-	len = build_clear_payload_id_table(txmsg);
+	build_clear_payload_id_table(txmsg);
 
 	drm_dp_queue_down_tx(mgr, txmsg);
 
@@ -2972,7 +2979,6 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 {
 	struct drm_dp_enum_path_resources_ack_reply *path_res;
 	struct drm_dp_sideband_msg_tx *txmsg;
-	int len;
 	int ret;
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2980,7 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 		return -ENOMEM;
 
 	txmsg->dst = mstb;
-	len = build_enum_path_resources(txmsg, port->port_num);
+	build_enum_path_resources(txmsg, port->port_num);
 
 	drm_dp_queue_down_tx(mgr, txmsg);
 
@@ -3073,7 +3079,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 {
 	struct drm_dp_sideband_msg_tx *txmsg;
 	struct drm_dp_mst_branch *mstb;
-	int len, ret, port_num;
+	int ret, port_num;
 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
 	int i;
 
@@ -3098,9 +3104,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 		sinks[i] = i;
 
 	txmsg->dst = mstb;
-	len = build_allocate_payload(txmsg, port_num,
-				     id,
-				     pbn, port->num_sdp_streams, sinks);
+	build_allocate_payload(txmsg, port_num,
+			       id,
+			       pbn, port->num_sdp_streams, sinks);
 
 	drm_dp_queue_down_tx(mgr, txmsg);
 
@@ -3129,7 +3135,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port, bool power_up)
 {
 	struct drm_dp_sideband_msg_tx *txmsg;
-	int len, ret;
+	int ret;
 
 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
 	if (!port)
@@ -3142,7 +3148,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
 	}
 
 	txmsg->dst = port->parent;
-	len = build_power_updown_phy(txmsg, port->port_num, power_up);
+	build_power_updown_phy(txmsg, port->port_num, power_up);
 	drm_dp_queue_down_tx(mgr, txmsg);
 
 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
@@ -3364,7 +3370,6 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
 				 struct drm_dp_mst_port *port,
 				 int offset, int size, u8 *bytes)
 {
-	int len;
 	int ret = 0;
 	struct drm_dp_sideband_msg_tx *txmsg;
 	struct drm_dp_mst_branch *mstb;
@@ -3379,7 +3384,7 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
 		goto fail_put;
 	}
 
-	len = build_dpcd_read(txmsg, port->port_num, offset, size);
+	build_dpcd_read(txmsg, port->port_num, offset, size);
 	txmsg->dst = port->parent;
 
 	drm_dp_queue_down_tx(mgr, txmsg);
@@ -3417,7 +3422,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
 				  struct drm_dp_mst_port *port,
 				  int offset, int size, u8 *bytes)
 {
-	int len;
 	int ret;
 	struct drm_dp_sideband_msg_tx *txmsg;
 	struct drm_dp_mst_branch *mstb;
@@ -3432,18 +3436,15 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
 		goto fail_put;
 	}
 
-	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+	build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
 	txmsg->dst = mstb;
 
 	drm_dp_queue_down_tx(mgr, txmsg);
 
 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
-	if (ret > 0) {
-		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
-			ret = -EIO;
-		else
-			ret = 0;
-	}
+	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+		ret = -EIO;
+
 	kfree(txmsg);
 fail_put:
 	drm_dp_mst_topology_put_mstb(mstb);
@@ -3504,9 +3505,9 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
 {
 	int ret = 0;
-	int i = 0;
 	struct drm_dp_mst_branch *mstb = NULL;
 
+	mutex_lock(&mgr->payload_lock);
 	mutex_lock(&mgr->lock);
 	if (mst_state == mgr->mst_state)
 		goto out_unlock;
@@ -3514,6 +3515,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 	mgr->mst_state = mst_state;
 	/* set the device into MST mode */
 	if (mst_state) {
+		struct drm_dp_payload reset_pay;
+
 		WARN_ON(mgr->mst_primary);
 
 		/* get dpcd info */
@@ -3543,17 +3546,15 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
 
 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
-							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
-		if (ret < 0) {
+					 DP_MST_EN |
+					 DP_UP_REQ_EN |
+					 DP_UPSTREAM_IS_SRC);
+		if (ret < 0)
 			goto out_unlock;
-		}
 
-		{
-			struct drm_dp_payload reset_pay;
-			reset_pay.start_slot = 0;
-			reset_pay.num_slots = 0x3f;
-			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
-		}
+		reset_pay.start_slot = 0;
+		reset_pay.num_slots = 0x3f;
+		drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
 
 		queue_work(system_long_wq, &mgr->work);
 
@@ -3565,27 +3566,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
 		/* this can fail if the device is gone */
 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
 		ret = 0;
-		mutex_lock(&mgr->payload_lock);
-		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+		memset(mgr->payloads, 0,
+		       mgr->max_payloads * sizeof(mgr->payloads[0]));
+		memset(mgr->proposed_vcpis, 0,
+		       mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
 		mgr->payload_mask = 0;
 		set_bit(0, &mgr->payload_mask);
-		for (i = 0; i < mgr->max_payloads; i++) {
-			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-
-			if (vcpi) {
-				vcpi->vcpi = 0;
-				vcpi->num_slots = 0;
-			}
-			mgr->proposed_vcpis[i] = NULL;
-		}
 		mgr->vcpi_mask = 0;
-		mutex_unlock(&mgr->payload_lock);
-
 		mgr->payload_id_table_cleared = false;
 	}
 
 out_unlock:
 	mutex_unlock(&mgr->lock);
+	mutex_unlock(&mgr->payload_lock);
 	if (mstb)
 		drm_dp_mst_topology_put_mstb(mstb);
 	return ret;
@@ -3682,7 +3675,12 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
 		goto out_fail;
 	}
-	drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
+	ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+	if (ret) {
+		DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
+		goto out_fail;
+	}
 
 	/*
 	 * For the final step of resuming the topology, we need to bring the
@@ -3709,7 +3707,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 {
 	int len;
 	u8 replyblock[32];
-	int replylen, origlen, curreply;
+	int replylen, curreply;
 	int ret;
 	struct drm_dp_sideband_msg_rx *msg;
 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
@@ -3729,7 +3727,6 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 	}
 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
 
-	origlen = replylen;
 	replylen -= len;
 	curreply = len;
 	while (replylen > 0) {
@@ -4625,15 +4622,34 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
 		int ret;
 
 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+		if (ret) {
+			seq_printf(m, "dpcd read failed\n");
+			goto out;
+		}
 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
+
 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+		if (ret) {
+			seq_printf(m, "faux/mst read failed\n");
+			goto out;
+		}
 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
+
 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+		if (ret) {
+			seq_printf(m, "mst ctrl read failed\n");
+			goto out;
+		}
 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
 
 		/* dump the standard OUI branch header */
 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+		if (ret) {
+			seq_printf(m, "branch oui read failed\n");
+			goto out;
+		}
 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
+
 		for (i = 0x3; i < 0x8 && buf[i]; i++)
 			seq_printf(m, "%c", buf[i]);
 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
@@ -4642,6 +4658,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
 	}
 
+out:
 	mutex_unlock(&mgr->lock);
 
 }
@@ -4657,11 +4674,23 @@ static void drm_dp_tx_work(struct work_struct *work)
 	mutex_unlock(&mgr->qlock);
 }
 
+static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+{
+	if (!port->connector)
+		return;
+
+	if (port->mgr->cbs->destroy_connector) {
+		port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+	} else {
+		drm_connector_unregister(port->connector);
+		drm_connector_put(port->connector);
+	}
+}
+
 static inline void
 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
 {
-	if (port->connector)
-		port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+	drm_dp_destroy_connector(port);
 
 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
 	drm_dp_mst_put_port_malloc(port);
@@ -5529,7 +5558,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
 		return NULL;
 
-	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+	if (drm_dp_has_quirk(&desc, 0,
+			     DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
 	    port->parent == port->mgr->mst_primary) {
 		u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7c18a98..7b1a628 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -946,7 +946,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 	struct drm_driver *driver = dev->driver;
 	int ret;
 
-	mutex_lock(&drm_global_mutex);
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_lock(&drm_global_mutex);
 
 	ret = drm_minor_register(dev, DRM_MINOR_RENDER);
 	if (ret)
@@ -986,7 +987,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
 out_unlock:
-	mutex_unlock(&drm_global_mutex);
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_unlock(&drm_global_mutex);
 	return ret;
 }
 EXPORT_SYMBOL(drm_dev_register);
@@ -1079,17 +1081,14 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
 
 	DRM_DEBUG("\n");
 
-	mutex_lock(&drm_global_mutex);
 	minor = drm_minor_acquire(iminor(inode));
-	if (IS_ERR(minor)) {
-		err = PTR_ERR(minor);
-		goto out_unlock;
-	}
+	if (IS_ERR(minor))
+		return PTR_ERR(minor);
 
 	new_fops = fops_get(minor->dev->driver->fops);
 	if (!new_fops) {
 		err = -ENODEV;
-		goto out_release;
+		goto out;
 	}
 
 	replace_fops(filp, new_fops);
@@ -1098,10 +1097,9 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
 	else
 		err = 0;
 
-out_release:
+out:
 	drm_minor_release(minor);
-out_unlock:
-	mutex_unlock(&drm_global_mutex);
+
 	return err;
 }
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 805fb00..1164511 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1590,11 +1590,22 @@ static int validate_displayid(u8 *displayid, int length, int idx);
 static int drm_edid_block_checksum(const u8 *raw_edid)
 {
 	int i;
-	u8 csum = 0;
-	for (i = 0; i < EDID_LENGTH; i++)
+	u8 csum = 0, crc = 0;
+
+	for (i = 0; i < EDID_LENGTH - 1; i++)
 		csum += raw_edid[i];
 
-	return csum;
+	crc = 0x100 - csum;
+
+	return crc;
+}
+
+static bool drm_edid_block_checksum_diff(const u8 *raw_edid, u8 real_checksum)
+{
+	if (raw_edid[EDID_LENGTH - 1] != real_checksum)
+		return true;
+	else
+		return false;
 }
 
 static bool drm_edid_is_zero(const u8 *in_edid, int length)
@@ -1652,7 +1663,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
 	}
 
 	csum = drm_edid_block_checksum(raw_edid);
-	if (csum) {
+	if (drm_edid_block_checksum_diff(raw_edid, csum)) {
 		if (edid_corrupt)
 			*edid_corrupt = true;
 
@@ -1793,6 +1804,11 @@ static void connector_bad_edid(struct drm_connector *connector,
 			       u8 *edid, int num_blocks)
 {
 	int i;
+	u8 num_of_ext = edid[0x7e];
+
+	/* Calculate real checksum for the last edid extension block data */
+	connector->real_edid_checksum =
+		drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
 
 	if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
 		return;
@@ -2196,15 +2212,29 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_mode_find_dmt);
 
+static bool is_display_descriptor(const u8 d[18], u8 tag)
+{
+	return d[0] == 0x00 && d[1] == 0x00 &&
+		d[2] == 0x00 && d[3] == tag;
+}
+
+static bool is_detailed_timing_descriptor(const u8 d[18])
+{
+	return d[0] != 0x00 || d[1] != 0x00;
+}
+
 typedef void detailed_cb(struct detailed_timing *timing, void *closure);
 
 static void
 cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
 {
-	int i, n = 0;
+	int i, n;
 	u8 d = ext[0x02];
 	u8 *det_base = ext + d;
 
+	if (d < 4 || d > 127)
+		return;
+
 	n = (127 - d) / 18;
 	for (i = 0; i < n; i++)
 		cb((struct detailed_timing *)(det_base + 18 * i), closure);
@@ -2254,9 +2284,12 @@ static void
 is_rb(struct detailed_timing *t, void *data)
 {
 	u8 *r = (u8 *)t;
-	if (r[3] == EDID_DETAIL_MONITOR_RANGE)
-		if (r[15] & 0x10)
-			*(bool *)data = true;
+
+	if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+		return;
+
+	if (r[15] & 0x10)
+		*(bool *)data = true;
 }
 
 /* EDID 1.4 defines this explicitly.  For EDID 1.3, we guess, badly. */
@@ -2276,7 +2309,11 @@ static void
 find_gtf2(struct detailed_timing *t, void *data)
 {
 	u8 *r = (u8 *)t;
-	if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+
+	if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+		return;
+
+	if (r[10] == 0x02)
 		*(u8 **)data = r;
 }
 
@@ -2815,13 +2852,13 @@ do_inferred_modes(struct detailed_timing *timing, void *c)
 	struct detailed_non_pixel *data = &timing->data.other_data;
 	struct detailed_data_monitor_range *range = &data->data.range;
 
-	if (data->type != EDID_DETAIL_MONITOR_RANGE)
+	if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
 		return;
 
 	closure->modes += drm_dmt_modes_for_range(closure->connector,
 						  closure->edid,
 						  timing);
-	
+
 	if (!version_greater(closure->edid, 1, 1))
 		return; /* GTF not defined yet */
 
@@ -2894,10 +2931,11 @@ static void
 do_established_modes(struct detailed_timing *timing, void *c)
 {
 	struct detailed_mode_closure *closure = c;
-	struct detailed_non_pixel *data = &timing->data.other_data;
 
-	if (data->type == EDID_DETAIL_EST_TIMINGS)
-		closure->modes += drm_est3_modes(closure->connector, timing);
+	if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_EST_TIMINGS))
+		return;
+
+	closure->modes += drm_est3_modes(closure->connector, timing);
 }
 
 /**
@@ -2946,19 +2984,19 @@ do_standard_modes(struct detailed_timing *timing, void *c)
 	struct detailed_non_pixel *data = &timing->data.other_data;
 	struct drm_connector *connector = closure->connector;
 	struct edid *edid = closure->edid;
+	int i;
 
-	if (data->type == EDID_DETAIL_STD_MODES) {
-		int i;
-		for (i = 0; i < 6; i++) {
-			struct std_timing *std;
-			struct drm_display_mode *newmode;
+	if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_STD_MODES))
+		return;
 
-			std = &data->data.timings[i];
-			newmode = drm_mode_std(connector, edid, std);
-			if (newmode) {
-				drm_mode_probed_add(connector, newmode);
-				closure->modes++;
-			}
+	for (i = 0; i < 6; i++) {
+		struct std_timing *std = &data->data.timings[i];
+		struct drm_display_mode *newmode;
+
+		newmode = drm_mode_std(connector, edid, std);
+		if (newmode) {
+			drm_mode_probed_add(connector, newmode);
+			closure->modes++;
 		}
 	}
 }
@@ -3053,15 +3091,16 @@ static void
 do_cvt_mode(struct detailed_timing *timing, void *c)
 {
 	struct detailed_mode_closure *closure = c;
-	struct detailed_non_pixel *data = &timing->data.other_data;
 
-	if (data->type == EDID_DETAIL_CVT_3BYTE)
-		closure->modes += drm_cvt_modes(closure->connector, timing);
+	if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_CVT_3BYTE))
+		return;
+
+	closure->modes += drm_cvt_modes(closure->connector, timing);
 }
 
 static int
 add_cvt_modes(struct drm_connector *connector, struct edid *edid)
-{	
+{
 	struct detailed_mode_closure closure = {
 		.connector = connector,
 		.edid = edid,
@@ -3083,27 +3122,28 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
 	struct detailed_mode_closure *closure = c;
 	struct drm_display_mode *newmode;
 
-	if (timing->pixel_clock) {
-		newmode = drm_mode_detailed(closure->connector->dev,
-					    closure->edid, timing,
-					    closure->quirks);
-		if (!newmode)
-			return;
+	if (!is_detailed_timing_descriptor((const u8 *)timing))
+		return;
 
-		if (closure->preferred)
-			newmode->type |= DRM_MODE_TYPE_PREFERRED;
+	newmode = drm_mode_detailed(closure->connector->dev,
+				    closure->edid, timing,
+				    closure->quirks);
+	if (!newmode)
+		return;
 
-		/*
-		 * Detailed modes are limited to 10kHz pixel clock resolution,
-		 * so fix up anything that looks like CEA/HDMI mode, but the clock
-		 * is just slightly off.
-		 */
-		fixup_detailed_cea_mode_clock(newmode);
+	if (closure->preferred)
+		newmode->type |= DRM_MODE_TYPE_PREFERRED;
 
-		drm_mode_probed_add(closure->connector, newmode);
-		closure->modes++;
-		closure->preferred = false;
-	}
+	/*
+	 * Detailed modes are limited to 10kHz pixel clock resolution,
+	 * so fix up anything that looks like CEA/HDMI mode, but the clock
+	 * is just slightly off.
+	 */
+	fixup_detailed_cea_mode_clock(newmode);
+
+	drm_mode_probed_add(closure->connector, newmode);
+	closure->modes++;
+	closure->preferred = false;
 }
 
 /*
@@ -3953,6 +3993,13 @@ cea_db_tag(const u8 *db)
 static int
 cea_revision(const u8 *cea)
 {
+	/*
+	 * FIXME is this correct for the DispID variant?
+	 * The DispID spec doesn't really specify whether
+	 * this is the revision of the CEA extension or
+	 * the DispID CEA data block. And the only value
+	 * given as an example is 0.
+	 */
 	return cea[1];
 }
 
@@ -3977,6 +4024,10 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
 	 *   no non-DTD data.
 	 */
 	if (cea[0] == DATA_BLOCK_CTA) {
+		/*
+		 * for_each_displayid_db() has already verified
+		 * that these stay within expected bounds.
+		 */
 		*start = 3;
 		*end = *start + cea[2];
 	} else if (cea[0] == CEA_EXT) {
@@ -4282,8 +4333,10 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
 static void
 monitor_name(struct detailed_timing *t, void *data)
 {
-	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
-		*(u8 **)data = t->data.other_data.data.str.str;
+	if (!is_display_descriptor((const u8 *)t, EDID_DETAIL_MONITOR_NAME))
+		return;
+
+	*(u8 **)data = t->data.other_data.data.str.str;
 }
 
 static int get_monitor_name(struct edid *edid, char name[13])
@@ -4316,7 +4369,7 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
 {
 	int name_length;
 	char buf[13];
-	
+
 	if (bufsize <= 0)
 		return;
 
@@ -4381,6 +4434,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
 
 	if (cea_revision(cea) >= 3) {
 		int i, start, end;
+		int sad_count;
 
 		if (cea_db_offsets(cea, &start, &end)) {
 			start = 0;
@@ -4392,8 +4446,6 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
 			dbl = cea_db_payload_len(db);
 
 			switch (cea_db_tag(db)) {
-				int sad_count;
-
 			case AUDIO_BLOCK:
 				/* Audio Data Block, contains SADs */
 				sad_count = min(dbl / 3, 15 - total_sad_count);
@@ -4594,6 +4646,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
  *
  * Parse the CEA extension according to CEA-861-B.
  *
+ * Drivers that have added the modes parsed from EDID to drm_display_info
+ * should use &drm_display_info.is_hdmi instead of calling this function.
+ *
  * Return: True if the monitor is HDMI, false if not or unknown.
  */
 bool drm_detect_hdmi_monitor(struct edid *edid)
@@ -4828,6 +4883,8 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
 	struct drm_display_info *info = &connector->display_info;
 	u8 len = cea_db_payload_len(db);
 
+	info->is_hdmi = true;
+
 	if (len >= 6)
 		info->dvi_dual = db[6] & 1;
 	if (len >= 7)
@@ -4880,6 +4937,47 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
 	}
 }
 
+static
+void get_monitor_range(struct detailed_timing *timing,
+		       void *info_monitor_range)
+{
+	struct drm_monitor_range_info *monitor_range = info_monitor_range;
+	const struct detailed_non_pixel *data = &timing->data.other_data;
+	const struct detailed_data_monitor_range *range = &data->data.range;
+
+	if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
+		return;
+
+	/*
+	 * Check for flag range limits only. If flag == 1 then
+	 * no additional timing information provided.
+	 * Default GTF, GTF Secondary curve and CVT are not
+	 * supported
+	 */
+	if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
+		return;
+
+	monitor_range->min_vfreq = range->min_vfreq;
+	monitor_range->max_vfreq = range->max_vfreq;
+}
+
+static
+void drm_get_monitor_range(struct drm_connector *connector,
+			   const struct edid *edid)
+{
+	struct drm_display_info *info = &connector->display_info;
+
+	if (!version_greater(edid, 1, 1))
+		return;
+
+	drm_for_each_detailed_block((u8 *)edid, get_monitor_range,
+				    &info->monitor_range);
+
+	DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
+		      info->monitor_range.min_vfreq,
+		      info->monitor_range.max_vfreq);
+}
+
 /* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
  * all of the values which would have been set from EDID
  */
@@ -4896,11 +4994,13 @@ drm_reset_display_info(struct drm_connector *connector)
 	info->cea_rev = 0;
 	info->max_tmds_clock = 0;
 	info->dvi_dual = false;
+	info->is_hdmi = false;
 	info->has_hdmi_infoframe = false;
 	info->rgb_quant_range_selectable = false;
 	memset(&info->hdmi, 0, sizeof(info->hdmi));
 
 	info->non_desktop = 0;
+	memset(&info->monitor_range, 0, sizeof(info->monitor_range));
 }
 
 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -4916,6 +5016,8 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
 
 	info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
 
+	drm_get_monitor_range(connector, edid);
+
 	DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
 
 	if (edid->revision < 3)
@@ -5396,14 +5498,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 {
 	enum hdmi_picture_aspect picture_aspect;
 	u8 vic, hdmi_vic;
-	int err;
 
 	if (!frame || !mode)
 		return -EINVAL;
 
-	err = hdmi_avi_infoframe_init(frame);
-	if (err < 0)
-		return err;
+	hdmi_avi_infoframe_init(frame);
 
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		frame->pixel_repeat = 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4c7cbce..a9771de 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -250,17 +250,7 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 		return 0;
 
 	mutex_lock(&fb_helper->lock);
-	/*
-	 * TODO:
-	 * We should bail out here if there is a master by dropping _force.
-	 * Currently these igt tests fail if we do that:
-	 * - kms_fbcon_fbt@psr
-	 * - kms_fbcon_fbt@psr-suspend
-	 *
-	 * So first these tests need to be fixed so they drop master or don't
-	 * have an fd open.
-	 */
-	ret = drm_client_modeset_commit_force(&fb_helper->client);
+	ret = drm_client_modeset_commit(&fb_helper->client);
 
 	do_delayed = fb_helper->delayed_hotplug;
 	if (do_delayed)
@@ -294,7 +284,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
 			continue;
 
 		mutex_lock(&helper->lock);
-		ret = drm_client_modeset_commit_force(&helper->client);
+		ret = drm_client_modeset_commit_locked(&helper->client);
 		if (ret)
 			error = true;
 		mutex_unlock(&helper->lock);
@@ -460,7 +450,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
  * drm_fb_helper_init - initialize a &struct drm_fb_helper
  * @dev: drm device
  * @fb_helper: driver-allocated fbdev helper structure to initialize
- * @max_conn_count: max connector count (not used)
  *
  * This allocates the structures for the fbdev helper with the given limits.
  * Note that this won't yet touch the hardware (through the driver interfaces)
@@ -473,8 +462,7 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
  * Zero if everything went ok, nonzero otherwise.
  */
 int drm_fb_helper_init(struct drm_device *dev,
-		       struct drm_fb_helper *fb_helper,
-		       int max_conn_count)
+		       struct drm_fb_helper *fb_helper)
 {
 	int ret;
 
@@ -1357,7 +1345,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
 
 	pan_set(fb_helper, var->xoffset, var->yoffset);
 
-	ret = drm_client_modeset_commit_force(&fb_helper->client);
+	ret = drm_client_modeset_commit_locked(&fb_helper->client);
 	if (!ret) {
 		info->var.xoffset = var->xoffset;
 		info->var.yoffset = var->yoffset;
@@ -2135,7 +2123,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
 
 	drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
 
-	ret = drm_fb_helper_init(dev, fb_helper, 0);
+	ret = drm_fb_helper_init(dev, fb_helper);
 	if (ret)
 		goto err;
 
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 92d16724..c4c704e 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -51,6 +51,37 @@
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
 
+bool drm_dev_needs_global_mutex(struct drm_device *dev)
+{
+	/*
+	 * Legacy drivers rely on all kinds of BKL locking semantics, don't
+	 * bother. They also still need BKL locking for their ioctls, so better
+	 * safe than sorry.
+	 */
+	if (drm_core_check_feature(dev, DRIVER_LEGACY))
+		return true;
+
+	/*
+	 * The deprecated ->load callback must be called after the driver is
+	 * already registered. This means such drivers rely on the BKL to make
+	 * sure an open can't proceed until the driver is actually fully set up.
+	 * Similar hilarity holds for the unload callback.
+	 */
+	if (dev->driver->load || dev->driver->unload)
+		return true;
+
+	/*
+	 * Drivers with the lastclose callback assume that it's synchronized
+	 * against concurrent opens, which again needs the BKL. The proper fix
+	 * is to use the drm_client infrastructure with proper locking for each
+	 * client.
+	 */
+	if (dev->driver->lastclose)
+		return true;
+
+	return false;
+}
+
 /**
  * DOC: file operations
  *
@@ -220,7 +251,7 @@ void drm_file_free(struct drm_file *file)
 	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
 		  task_pid_nr(current),
 		  (long)old_encode_dev(file->minor->kdev->devt),
-		  dev->open_count);
+		  atomic_read(&dev->open_count));
 
 	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
 	    dev->driver->preclose)
@@ -379,7 +410,10 @@ int drm_open(struct inode *inode, struct file *filp)
 		return PTR_ERR(minor);
 
 	dev = minor->dev;
-	if (!dev->open_count++)
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_lock(&drm_global_mutex);
+
+	if (!atomic_fetch_inc(&dev->open_count))
 		need_setup = 1;
 
 	/* share address_space across all char-devs of a single device */
@@ -395,10 +429,16 @@ int drm_open(struct inode *inode, struct file *filp)
 			goto err_undo;
 		}
 	}
+
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_unlock(&drm_global_mutex);
+
 	return 0;
 
 err_undo:
-	dev->open_count--;
+	atomic_dec(&dev->open_count);
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_unlock(&drm_global_mutex);
 	drm_minor_release(minor);
 	return retcode;
 }
@@ -438,16 +478,18 @@ int drm_release(struct inode *inode, struct file *filp)
 	struct drm_minor *minor = file_priv->minor;
 	struct drm_device *dev = minor->dev;
 
-	mutex_lock(&drm_global_mutex);
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_lock(&drm_global_mutex);
 
-	DRM_DEBUG("open_count = %d\n", dev->open_count);
+	DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
 
 	drm_close_helper(filp);
 
-	if (!--dev->open_count)
+	if (atomic_dec_and_test(&dev->open_count))
 		drm_lastclose(dev);
 
-	mutex_unlock(&drm_global_mutex);
+	if (drm_dev_needs_global_mutex(dev))
+		mutex_unlock(&drm_global_mutex);
 
 	drm_minor_release(minor);
 
@@ -456,6 +498,40 @@ int drm_release(struct inode *inode, struct file *filp)
 EXPORT_SYMBOL(drm_release);
 
 /**
+ * drm_release_noglobal - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function may be used by drivers as their &file_operations.release
+ * method. It frees any resources associated with the open file prior to taking
+ * the drm_global_mutex, which then calls the &drm_driver.postclose driver
+ * callback. If this is the last open file for the DRM device also proceeds to
+ * call the &drm_driver.lastclose driver callback.
+ *
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
+ */
+int drm_release_noglobal(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_minor *minor = file_priv->minor;
+	struct drm_device *dev = minor->dev;
+
+	drm_close_helper(filp);
+
+	if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
+		drm_lastclose(dev);
+		mutex_unlock(&drm_global_mutex);
+	}
+
+	drm_minor_release(minor);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_release_noglobal);
+
+/**
  * drm_read - read method for DRM file
  * @filp: file pointer
  * @buffer: userspace destination pointer for the read
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 0897cb9..3b818f2 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0 or MIT
 /*
  * Copyright (C) 2016 Noralf Trønnes
  *
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 5756431..57ac94c 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -31,6 +31,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
 #include <drm/drm_print.h>
 #include <drm/drm_util.h>
 
@@ -548,7 +549,128 @@ int drm_mode_getfb(struct drm_device *dev,
 
 out:
 	drm_framebuffer_put(fb);
+	return ret;
+}
 
+/**
+ * drm_mode_getfb2 - get extended FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+			  void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd2 *r = data;
+	struct drm_framebuffer *fb;
+	unsigned int i;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+	if (!fb)
+		return -ENOENT;
+
+	/* For multi-plane framebuffers, we require the driver to place the
+	 * GEM objects directly in the drm_framebuffer. For single-plane
+	 * framebuffers, we can fall back to create_handle.
+	 */
+	if (!fb->obj[0] &&
+	    (fb->format->num_planes > 1 || !fb->funcs->create_handle)) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	r->height = fb->height;
+	r->width = fb->width;
+	r->pixel_format = fb->format->format;
+
+	r->flags = 0;
+	if (dev->mode_config.allow_fb_modifiers)
+		r->flags |= DRM_MODE_FB_MODIFIERS;
+
+	for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+		r->handles[i] = 0;
+		r->pitches[i] = 0;
+		r->offsets[i] = 0;
+		r->modifier[i] = 0;
+	}
+
+	for (i = 0; i < fb->format->num_planes; i++) {
+		r->pitches[i] = fb->pitches[i];
+		r->offsets[i] = fb->offsets[i];
+		if (dev->mode_config.allow_fb_modifiers)
+			r->modifier[i] = fb->modifier;
+	}
+
+	/* GET_FB2() is an unprivileged ioctl so we must not return a
+	 * buffer-handle to non master/root processes! To match GET_FB()
+	 * just return invalid handles (0) for non masters/root
+	 * rather than making GET_FB2() privileged.
+	 */
+	if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
+		ret = 0;
+		goto out;
+	}
+
+	for (i = 0; i < fb->format->num_planes; i++) {
+		int j;
+
+		/* If we reuse the same object for multiple planes, also
+		 * return the same handle.
+		 */
+		for (j = 0; j < i; j++) {
+			if (fb->obj[i] == fb->obj[j]) {
+				r->handles[i] = r->handles[j];
+				break;
+			}
+		}
+
+		if (r->handles[i])
+			continue;
+
+		if (fb->obj[i]) {
+			ret = drm_gem_handle_create(file_priv, fb->obj[i],
+						    &r->handles[i]);
+		} else {
+			WARN_ON(i > 0);
+			ret = fb->funcs->create_handle(fb, file_priv,
+						       &r->handles[i]);
+		}
+
+		if (ret != 0)
+			goto out;
+	}
+
+out:
+	if (ret != 0) {
+		/* Delete any previously-created handles on failure. */
+		for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+			int j;
+
+			if (r->handles[i])
+				drm_gem_handle_delete(file_priv, r->handles[i]);
+
+			/* Zero out any handles identical to the one we just
+			 * deleted.
+			 */
+			for (j = i + 1; j < ARRAY_SIZE(r->handles); j++) {
+				if (r->handles[j] == r->handles[i])
+					r->handles[j] = 0;
+			}
+		}
+	}
+
+	drm_framebuffer_put(fb);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a9e4a61..37627d0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
 	struct drm_device *dev = obj->dev;
 	bool final = false;
 
-	if (WARN_ON(obj->handle_count == 0))
+	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
 		return;
 
 	/*
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index a486332..92a11bb 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1141,3 +1141,64 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
 	dev->vram_mm = NULL;
 }
 EXPORT_SYMBOL(drm_vram_helper_release_mm);
+
+/*
+ * Mode-config helpers
+ */
+
+static enum drm_mode_status
+drm_vram_helper_mode_valid_internal(struct drm_device *dev,
+				    const struct drm_display_mode *mode,
+				    unsigned long max_bpp)
+{
+	struct drm_vram_mm *vmm = dev->vram_mm;
+	unsigned long fbsize, fbpages, max_fbpages;
+
+	if (WARN_ON(!dev->vram_mm))
+		return MODE_BAD;
+
+	max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
+
+	fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+	fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+	if (fbpages > max_fbpages)
+		return MODE_MEM;
+
+	return MODE_OK;
+}
+
+/**
+ * drm_vram_helper_mode_valid - Tests if a display mode's
+ *	framebuffer fits into the available video memory.
+ * @dev:	the DRM device
+ * @mode:	the mode to test
+ *
+ * This function tests if enough video memory is available for using the
+ * specified display mode. Atomic modesetting requires importing the
+ * designated framebuffer into video memory before evicting the active
+ * one. Hence, any framebuffer may consume at most half of the available
+ * VRAM. Display modes that require a larger framebuffer can not be used,
+ * even if the CRTC does support them. Each framebuffer is assumed to
+ * have 32-bit color depth.
+ *
+ * Note:
+ * The function can only test if the display mode is supported in
+ * general. If there are too many framebuffers pinned to video memory,
+ * a display mode may still not be usable in practice. The color depth of
+ * 32-bit fits all current use case. A more flexible test can be added
+ * when necessary.
+ *
+ * Returns:
+ * MODE_OK if the display mode is supported, or an error code of type
+ * enum drm_mode_status otherwise.
+ */
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+			   const struct drm_display_mode *mode)
+{
+	static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+
+	return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
+}
+EXPORT_SYMBOL(drm_vram_helper_mode_valid);
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 9191633..7f386ad 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -23,14 +23,6 @@
 
 #include "drm_internal.h"
 
-static struct hdcp_srm {
-	u32 revoked_ksv_cnt;
-	u8 *revoked_ksv_list;
-
-	/* Mutex to protect above struct member */
-	struct mutex mutex;
-} *srm_data;
-
 static inline void drm_hdcp_print_ksv(const u8 *ksv)
 {
 	DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
@@ -60,11 +52,11 @@ static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
 	return ksv_count;
 }
 
-static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
 				     u32 vrls_length)
 {
-	u32 parsed_bytes = 0, ksv_count = 0;
 	u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+	u32 parsed_bytes = 0, ksv_count = 0;
 
 	do {
 		vrl_ksv_cnt = *buf;
@@ -74,10 +66,10 @@ static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
 
 		DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
 			  vrl_ksv_cnt);
-		memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+		memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
+		       buf, vrl_ksv_sz);
 
 		ksv_count += vrl_ksv_cnt;
-		revoked_ksv_list += vrl_ksv_sz;
 		buf += vrl_ksv_sz;
 
 		parsed_bytes += (vrl_ksv_sz + 1);
@@ -91,7 +83,8 @@ static inline u32 get_vrl_length(const u8 *buf)
 	return drm_hdcp_be24_to_cpu(buf);
 }
 
-static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
+				    u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
 {
 	struct hdcp_srm_header *header;
 	u32 vrl_length, ksv_count;
@@ -131,29 +124,28 @@ static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
 	ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
 	if (!ksv_count) {
 		DRM_DEBUG("Revoked KSV count is 0\n");
-		return count;
+		return 0;
 	}
 
-	kfree(srm_data->revoked_ksv_list);
-	srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
-					     GFP_KERNEL);
-	if (!srm_data->revoked_ksv_list) {
+	*revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+	if (!*revoked_ksv_list) {
 		DRM_ERROR("Out of Memory\n");
 		return -ENOMEM;
 	}
 
-	if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+	if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
 				      vrl_length) != ksv_count) {
-		srm_data->revoked_ksv_cnt = 0;
-		kfree(srm_data->revoked_ksv_list);
+		*revoked_ksv_cnt = 0;
+		kfree(*revoked_ksv_list);
 		return -EINVAL;
 	}
 
-	srm_data->revoked_ksv_cnt = ksv_count;
-	return count;
+	*revoked_ksv_cnt = ksv_count;
+	return 0;
 }
 
-static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
+				    u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
 {
 	struct hdcp_srm_header *header;
 	u32 vrl_length, ksv_count, ksv_sz;
@@ -195,13 +187,11 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
 	ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
 	if (!ksv_count) {
 		DRM_DEBUG("Revoked KSV count is 0\n");
-		return count;
+		return 0;
 	}
 
-	kfree(srm_data->revoked_ksv_list);
-	srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
-					     GFP_KERNEL);
-	if (!srm_data->revoked_ksv_list) {
+	*revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+	if (!*revoked_ksv_list) {
 		DRM_ERROR("Out of Memory\n");
 		return -ENOMEM;
 	}
@@ -210,10 +200,10 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
 	buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
 
 	DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
-	memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
+	memcpy(*revoked_ksv_list, buf, ksv_sz);
 
-	srm_data->revoked_ksv_cnt = ksv_count;
-	return count;
+	*revoked_ksv_cnt = ksv_count;
+	return 0;
 }
 
 static inline bool is_srm_version_hdcp1(const u8 *buf)
@@ -226,22 +216,27 @@ static inline bool is_srm_version_hdcp2(const u8 *buf)
 	return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
 }
 
-static void drm_hdcp_srm_update(const u8 *buf, size_t count)
+static int drm_hdcp_srm_update(const u8 *buf, size_t count,
+			       u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
 {
 	if (count < sizeof(struct hdcp_srm_header))
-		return;
+		return -EINVAL;
 
 	if (is_srm_version_hdcp1(buf))
-		drm_hdcp_parse_hdcp1_srm(buf, count);
+		return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
+						revoked_ksv_cnt);
 	else if (is_srm_version_hdcp2(buf))
-		drm_hdcp_parse_hdcp2_srm(buf, count);
+		return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
+						revoked_ksv_cnt);
+	else
+		return -EINVAL;
 }
 
-static void drm_hdcp_request_srm(struct drm_device *drm_dev)
+static int drm_hdcp_request_srm(struct drm_device *drm_dev,
+				u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
 {
 	char fw_name[36] = "display_hdcp_srm.bin";
 	const struct firmware *fw;
-
 	int ret;
 
 	ret = request_firmware_direct(&fw, (const char *)fw_name,
@@ -250,10 +245,12 @@ static void drm_hdcp_request_srm(struct drm_device *drm_dev)
 		goto exit;
 
 	if (fw->size && fw->data)
-		drm_hdcp_srm_update(fw->data, fw->size);
+		ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
+					  revoked_ksv_cnt);
 
 exit:
 	release_firmware(fw);
+	return ret;
 }
 
 /**
@@ -279,71 +276,34 @@ static void drm_hdcp_request_srm(struct drm_device *drm_dev)
  * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
  *
  * Returns:
- * TRUE on any of the KSV is revoked, else FALSE.
+ * Count of the revoked KSVs or -ve error number incase of the failure.
  */
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
-				 u32 ksv_count)
+int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
+				u32 ksv_count)
 {
-	u32 rev_ksv_cnt, cnt, i, j;
-	u8 *rev_ksv_list;
+	u32 revoked_ksv_cnt = 0, i, j;
+	u8 *revoked_ksv_list = NULL;
+	int ret = 0;
 
-	if (!srm_data)
-		return false;
+	ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
+				   &revoked_ksv_cnt);
 
-	mutex_lock(&srm_data->mutex);
-	drm_hdcp_request_srm(drm_dev);
+	/* revoked_ksv_cnt will be zero when above function failed */
+	for (i = 0; i < revoked_ksv_cnt; i++)
+		for  (j = 0; j < ksv_count; j++)
+			if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
+				    &revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
+				    DRM_HDCP_KSV_LEN)) {
+				DRM_DEBUG("Revoked KSV is ");
+				drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
+				ret++;
+			}
 
-	rev_ksv_cnt = srm_data->revoked_ksv_cnt;
-	rev_ksv_list = srm_data->revoked_ksv_list;
-
-	/* If the Revoked ksv list is empty */
-	if (!rev_ksv_cnt || !rev_ksv_list) {
-		mutex_unlock(&srm_data->mutex);
-		return false;
-	}
-
-	for  (cnt = 0; cnt < ksv_count; cnt++) {
-		rev_ksv_list = srm_data->revoked_ksv_list;
-		for (i = 0; i < rev_ksv_cnt; i++) {
-			for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
-				if (ksvs[j] != rev_ksv_list[j]) {
-					break;
-				} else if (j == (DRM_HDCP_KSV_LEN - 1)) {
-					DRM_DEBUG("Revoked KSV is ");
-					drm_hdcp_print_ksv(ksvs);
-					mutex_unlock(&srm_data->mutex);
-					return true;
-				}
-			/* Move the offset to next KSV in the revoked list */
-			rev_ksv_list += DRM_HDCP_KSV_LEN;
-		}
-
-		/* Iterate to next ksv_offset */
-		ksvs += DRM_HDCP_KSV_LEN;
-	}
-	mutex_unlock(&srm_data->mutex);
-	return false;
+	kfree(revoked_ksv_list);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
 
-int drm_setup_hdcp_srm(struct class *drm_class)
-{
-	srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
-	if (!srm_data)
-		return -ENOMEM;
-	mutex_init(&srm_data->mutex);
-
-	return 0;
-}
-
-void drm_teardown_hdcp_srm(struct class *drm_class)
-{
-	if (srm_data) {
-		kfree(srm_data->revoked_ksv_list);
-		kfree(srm_data);
-	}
-}
-
 static struct drm_prop_enum_list drm_cp_enum_list[] = {
 	{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
 	{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 6937bf9..5714a78 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -41,6 +41,7 @@ struct drm_printer;
 
 /* drm_file.c */
 extern struct mutex drm_global_mutex;
+bool drm_dev_needs_global_mutex(struct drm_device *dev);
 struct drm_file *drm_file_alloc(struct drm_minor *minor);
 void drm_file_free(struct drm_file *file);
 void drm_lastclose(struct drm_device *dev);
@@ -235,7 +236,3 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
 void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
 				const struct drm_framebuffer *fb);
 int drm_framebuffer_debugfs_init(struct drm_minor *minor);
-
-/* drm_hdcp.c */
-int drm_setup_hdcp_srm(struct class *drm_class);
-void drm_teardown_hdcp_srm(struct class *drm_class);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 5afb3968..9e41972 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -671,6 +671,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB2, drm_mode_getfb2_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03bce56..588be45 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -111,10 +111,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
 	if (irq == 0)
 		return -EINVAL;
 
-	/* Driver must have been initialized */
-	if (!dev->dev_private)
-		return -EINVAL;
-
 	if (dev->irq_enabled)
 		return -EBUSY;
 	dev->irq_enabled = true;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2c79e81..f16eefb 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -46,7 +46,7 @@
 
 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
 
-/**
+/*
  * Take the heavyweight lock.
  *
  * \param lock lock pointer.
@@ -93,7 +93,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
 	return 0;
 }
 
-/**
+/*
  * This takes a lock forcibly and hands it to context.	Should ONLY be used
  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
  *
@@ -150,7 +150,7 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
 	return 0;
 }
 
-/**
+/*
  * Lock ioctl.
  *
  * \param inode device inode.
@@ -243,7 +243,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
 	return 0;
 }
 
-/**
+/*
  * Unlock ioctl.
  *
  * \param inode device inode.
@@ -275,7 +275,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
 	return 0;
 }
 
-/**
+/*
  * This function returns immediately and takes the hw lock
  * with the kernel context if it is free, otherwise it gets the highest priority when and if
  * it is eventually released.
@@ -287,7 +287,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
  * This should be sufficient to wait for GPU idle without
  * having to worry about starvation.
  */
-
 void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
 {
 	int ret;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 16bff1b..558baf9 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -24,7 +24,6 @@
 #include <drm/drm_modes.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
 #include <video/mipi_display.h>
 
 #define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -238,6 +237,23 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL(mipi_dbi_buf_copy);
 
+static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
+					unsigned int xs, unsigned int xe,
+					unsigned int ys, unsigned int ye)
+{
+	struct mipi_dbi *dbi = &dbidev->dbi;
+
+	xs += dbidev->left_offset;
+	xe += dbidev->left_offset;
+	ys += dbidev->top_offset;
+	ye += dbidev->top_offset;
+
+	mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, (xs >> 8) & 0xff,
+			 xs & 0xff, (xe >> 8) & 0xff, xe & 0xff);
+	mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, (ys >> 8) & 0xff,
+			 ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
+}
+
 static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
 {
 	struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
@@ -271,12 +287,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
 		tr = cma_obj->vaddr;
 	}
 
-	mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
-			 (rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
-			 ((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
-	mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
-			 (rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
-			 ((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
+	mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
+				    rect->y2 - 1);
 
 	ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
 				   width * height * 2);
@@ -299,18 +311,10 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
 			  struct drm_plane_state *old_state)
 {
 	struct drm_plane_state *state = pipe->plane.state;
-	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_rect rect;
 
 	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
 		mipi_dbi_fb_dirty(state->fb, &rect);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		spin_unlock_irq(&crtc->dev->event_lock);
-		crtc->state->event = NULL;
-	}
 }
 EXPORT_SYMBOL(mipi_dbi_pipe_update);
 
@@ -366,10 +370,7 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
 
 	memset(dbidev->tx_buf, 0, len);
 
-	mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
-			 ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
-	mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
-			 ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+	mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
 	mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
 			     (u8 *)dbidev->tx_buf, len);
 
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2a6e346..bc6e208 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -45,6 +45,7 @@
 #include <linux/export.h>
 #include <linux/interval_tree_generic.h>
 #include <linux/seq_file.h>
+#include <linux/sched/signal.h>
 #include <linux/slab.h>
 #include <linux/stacktrace.h>
 
@@ -366,6 +367,11 @@ next_hole(struct drm_mm *mm,
 	  struct drm_mm_node *node,
 	  enum drm_mm_insert_mode mode)
 {
+	/* Searching is slow; check if we ran out of time/patience */
+	cond_resched();
+	if (fatal_signal_pending(current))
+		return NULL;
+
 	switch (mode) {
 	default:
 	case DRM_MM_INSERT_BEST:
@@ -399,10 +405,10 @@ next_hole(struct drm_mm *mm,
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-	u64 end = node->start + node->size;
 	struct drm_mm_node *hole;
 	u64 hole_start, hole_end;
 	u64 adj_start, adj_end;
+	u64 end;
 
 	end = node->start + node->size;
 	if (unlikely(end <= node->start))
@@ -557,7 +563,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 		return 0;
 	}
 
-	return -ENOSPC;
+	return signal_pending(current) ? -ERESTARTSYS : -ENOSPC;
 }
 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f2e43d3..81aa215 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -51,8 +51,6 @@
 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
 {
 	drm_dma_handle_t *dmah;
-	unsigned long addr;
-	size_t sz;
 
 	/* pci_alloc_consistent only guarantees alignment to the smallest
 	 * PAGE_SIZE order which is greater than or equal to the requested size.
@@ -68,47 +66,17 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
 	dmah->size = size;
 	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
 					 &dmah->busaddr,
-					 GFP_KERNEL | __GFP_COMP);
+					 GFP_KERNEL);
 
 	if (dmah->vaddr == NULL) {
 		kfree(dmah);
 		return NULL;
 	}
 
-	/* XXX - Is virt_to_page() legal for consistent mem? */
-	/* Reserve */
-	for (addr = (unsigned long)dmah->vaddr, sz = size;
-	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-		SetPageReserved(virt_to_page((void *)addr));
-	}
-
 	return dmah;
 }
-
 EXPORT_SYMBOL(drm_pci_alloc);
 
-/*
- * Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
-	unsigned long addr;
-	size_t sz;
-
-	if (dmah->vaddr) {
-		/* XXX - Is virt_to_page() legal for consistent mem? */
-		/* Unreserve */
-		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
-		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-			ClearPageReserved(virt_to_page((void *)addr));
-		}
-		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
-				  dmah->busaddr);
-	}
-}
-
 /**
  * drm_pci_free - Free a PCI consistent memory block
  * @dev: DRM device
@@ -119,7 +87,8 @@ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  */
 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
-	__drm_legacy_pci_free(dev, dmah);
+	dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+			  dmah->busaddr);
 	kfree(dmah);
 }
 
@@ -197,6 +166,18 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 	return drm_pci_irq_by_busid(dev, p);
 }
 
+void drm_pci_agp_destroy(struct drm_device *dev)
+{
+	if (dev->agp) {
+		arch_phys_wc_del(dev->agp->agp_mtrr);
+		drm_legacy_agp_clear(dev);
+		kfree(dev->agp);
+		dev->agp = NULL;
+	}
+}
+
+#ifdef CONFIG_DRM_LEGACY
+
 static void drm_pci_agp_init(struct drm_device *dev)
 {
 	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -211,33 +192,9 @@ static void drm_pci_agp_init(struct drm_device *dev)
 	}
 }
 
-void drm_pci_agp_destroy(struct drm_device *dev)
-{
-	if (dev->agp) {
-		arch_phys_wc_del(dev->agp->agp_mtrr);
-		drm_legacy_agp_clear(dev);
-		kfree(dev->agp);
-		dev->agp = NULL;
-	}
-}
-
-/**
- * drm_get_pci_dev - Register a PCI device with the DRM subsystem
- * @pdev: PCI device
- * @ent: entry from the PCI ID table that matches @pdev
- * @driver: DRM device driver
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- *
- * NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your &drm_driver.load callback.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
-		    struct drm_driver *driver)
+static int drm_get_pci_dev(struct pci_dev *pdev,
+			   const struct pci_device_id *ent,
+			   struct drm_driver *driver)
 {
 	struct drm_device *dev;
 	int ret;
@@ -280,9 +237,6 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 	drm_dev_put(dev);
 	return ret;
 }
-EXPORT_SYMBOL(drm_get_pci_dev);
-
-#ifdef CONFIG_DRM_LEGACY
 
 /**
  * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d5c3861..ca52002 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -99,6 +99,9 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
 	if (!drm_core_check_feature(dev, DRIVER_SG))
 		return -EOPNOTSUPP;
 
+	if (request->size > SIZE_MAX - PAGE_SIZE)
+		return -EINVAL;
+
 	if (dev->sg)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 15fb516..7494669 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -26,12 +26,51 @@
  * entity. Some flexibility for code reuse is provided through a separately
  * allocated &drm_connector object and supporting optional &drm_bridge
  * encoder drivers.
+ *
+ * Many drivers require only a very simple encoder that fulfills the minimum
+ * requirements of the display pipeline and does not add additional
+ * functionality. The function drm_simple_encoder_init() provides an
+ * implementation of such an encoder.
  */
 
-static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
 	.destroy = drm_encoder_cleanup,
 };
 
+/**
+ * drm_simple_encoder_init - Initialize a preallocated encoder with
+ *                           basic functionality.
+ * @dev: drm device
+ * @encoder: the encoder to initialize
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * The encoder will be cleaned up automatically as part of the mode-setting
+ * cleanup.
+ *
+ * The caller of drm_simple_encoder_init() is responsible for freeing
+ * the encoder's memory after the encoder has been cleaned up. At the
+ * moment this only works reliably if the encoder data structure is
+ * stored in the device structure. Free the encoder's memory as part of
+ * the device release function.
+ *
+ * FIXME: Later improvements to DRM's resource management may allow for
+ *        an automated kfree() of the encoder's memory.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_simple_encoder_init(struct drm_device *dev,
+			    struct drm_encoder *encoder,
+			    int encoder_type)
+{
+	return drm_encoder_init(dev, encoder,
+				&drm_simple_encoder_funcs_cleanup,
+				encoder_type, NULL);
+}
+EXPORT_SYMBOL(drm_simple_encoder_init);
+
 static enum drm_mode_status
 drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
 			       const struct drm_display_mode *mode)
@@ -229,7 +268,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
 int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
 					  struct drm_bridge *bridge)
 {
-	return drm_bridge_attach(&pipe->encoder, bridge, NULL);
+	return drm_bridge_attach(&pipe->encoder, bridge, NULL, 0);
 }
 EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
 
@@ -288,8 +327,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
 		return ret;
 
 	encoder->possible_crtcs = drm_crtc_mask(crtc);
-	ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
-			       DRM_MODE_ENCODER_NONE, NULL);
+	ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
 	if (ret || !connector)
 		return ret;
 
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 669c93f..42d4641 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -43,27 +43,66 @@
  *  - Signal a syncobj (set a trivially signaled fence)
  *  - Wait for a syncobj's fence to appear and be signaled
  *
+ * The syncobj userspace API also provides operations to manipulate a syncobj
+ * in terms of a timeline of struct &dma_fence_chain rather than a single
+ * struct &dma_fence, through the following operations:
+ *
+ *   - Signal a given point on the timeline
+ *   - Wait for a given point to appear and/or be signaled
+ *   - Import and export from/to a given point of a timeline
+ *
  * At it's core, a syncobj is simply a wrapper around a pointer to a struct
  * &dma_fence which may be NULL.
  * When a syncobj is first created, its pointer is either NULL or a pointer
  * to an already signaled fence depending on whether the
  * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
  * &DRM_IOCTL_SYNCOBJ_CREATE.
- * When GPU work which signals a syncobj is enqueued in a DRM driver,
- * the syncobj fence is replaced with a fence which will be signaled by the
- * completion of that work.
- * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
- * driver retrieves syncobj's current fence at the time the work is enqueued
- * waits on that fence before submitting the work to hardware.
- * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
- * All manipulation of the syncobjs's fence happens in terms of the current
- * fence at the time the ioctl is called by userspace regardless of whether
- * that operation is an immediate host-side operation (signal or reset) or
- * or an operation which is enqueued in some driver queue.
- * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
- * manipulate a syncobj from the host by resetting its pointer to NULL or
+ *
+ * If the syncobj is considered as a binary (its state is either signaled or
+ * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
+ * the syncobj, the syncobj's fence is replaced with a fence which will be
+ * signaled by the completion of that work.
+ * If the syncobj is considered as a timeline primitive, when GPU work is
+ * enqueued in a DRM driver to signal the a given point of the syncobj, a new
+ * struct &dma_fence_chain pointing to the DRM driver's fence and also
+ * pointing to the previous fence that was in the syncobj. The new struct
+ * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
+ * completion of the DRM driver's work and also any work associated with the
+ * fence previously in the syncobj.
+ *
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
+ * time the work is enqueued, it waits on the syncobj's fence before
+ * submitting the work to hardware. That fence is either :
+ *
+ *    - The syncobj's current fence if the syncobj is considered as a binary
+ *      primitive.
+ *    - The struct &dma_fence associated with a given point if the syncobj is
+ *      considered as a timeline primitive.
+ *
+ * If the syncobj's fence is NULL or not present in the syncobj's timeline,
+ * the enqueue operation is expected to fail.
+ *
+ * With binary syncobj, all manipulation of the syncobjs's fence happens in
+ * terms of the current fence at the time the ioctl is called by userspace
+ * regardless of whether that operation is an immediate host-side operation
+ * (signal or reset) or or an operation which is enqueued in some driver
+ * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
+ * to manipulate a syncobj from the host by resetting its pointer to NULL or
  * setting its pointer to a fence which is already signaled.
  *
+ * With a timeline syncobj, all manipulation of the synobj's fence happens in
+ * terms of a u64 value referring to point in the timeline. See
+ * dma_fence_chain_find_seqno() to see how a given point is found in the
+ * timeline.
+ *
+ * Note that applications should be careful to always use timeline set of
+ * ioctl() when dealing with syncobj considered as timeline. Using a binary
+ * set of ioctl() with a syncobj considered as timeline could result incorrect
+ * synchronization. The use of binary syncobj is supported through the
+ * timeline set of ioctl() by using a point value of 0, this will reproduce
+ * the behavior of the binary set of ioctl() (for example replace the
+ * syncobj's fence when signaling).
+ *
  *
  * Host-side wait on syncobjs
  * --------------------------
@@ -87,6 +126,16 @@
  * synchronize between the two.
  * This requirement is inherited from the Vulkan fence API.
  *
+ * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
+ * handles as well as an array of u64 points and does a host-side wait on all
+ * of syncobj fences at the given points simultaneously.
+ *
+ * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
+ * fence to materialize on the timeline without waiting for the fence to be
+ * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
+ * requirement is inherited from the wait-before-signal behavior required by
+ * the Vulkan timeline semaphore API.
+ *
  *
  * Import/export of syncobjs
  * -------------------------
@@ -120,6 +169,18 @@
  * Because sync files are immutable, resetting or signaling the syncobj
  * will not affect any sync files whose fences have been imported into the
  * syncobj.
+ *
+ *
+ * Import/export of timeline points in timeline syncobjs
+ * -----------------------------------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
+ * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
+ * into another syncobj.
+ *
+ * Note that if you want to transfer a struct &dma_fence_chain from a given
+ * point on a timeline syncobj from/into a binary syncobj, you can use the
+ * point 0 to mean take/replace the fence in the syncobj.
  */
 
 #include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index dd2bc85..939f003 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -85,7 +85,6 @@ int drm_sysfs_init(void)
 	}
 
 	drm_class->devnode = drm_devnode;
-	drm_setup_hdcp_srm(drm_class);
 	return 0;
 }
 
@@ -98,7 +97,6 @@ void drm_sysfs_destroy(void)
 {
 	if (IS_ERR_OR_NULL(drm_class))
 		return;
-	drm_teardown_hdcp_srm(drm_class);
 	class_remove_file(drm_class, &class_attr_version.attr);
 	class_destroy(drm_class);
 	drm_class = NULL;
@@ -230,7 +228,7 @@ static ssize_t modes_show(struct device *device,
 
 	mutex_lock(&connector->dev->mode_config.mutex);
 	list_for_each_entry(mode, &connector->modes, head) {
-		written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+		written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
 				    mode->name);
 	}
 	mutex_unlock(&connector->dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 1659b13..da7b0b0 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_print.h>
 #include <drm/drm_vblank.h>
 
@@ -69,6 +70,12 @@
  * &drm_driver.max_vblank_count. In that case the vblank core only disables the
  * vblanks after a timer has expired, which can be configured through the
  * ``vblankoffdelay`` module parameter.
+ *
+ * Drivers for hardware without support for vertical-blanking interrupts
+ * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * automatically generate fake vblank events as part of the display update.
+ * This functionality also can be controlled by the driver by enabling and
+ * disabling struct drm_crtc_state.no_vblank.
  */
 
 /* Retry timestamp calculation up to 3 times to satisfy
@@ -137,10 +144,9 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 
 		if (crtc->funcs->get_vblank_counter)
 			return crtc->funcs->get_vblank_counter(crtc);
-	}
-
-	if (dev->driver->get_vblank_counter)
+	} else if (dev->driver->get_vblank_counter) {
 		return dev->driver->get_vblank_counter(dev, pipe);
+	}
 
 	return drm_vblank_no_hw_counter(dev, pipe);
 }
@@ -332,7 +338,8 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
 	u64 vblank;
 	unsigned long flags;
 
-	WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
+	WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) &&
+		  !crtc->funcs->get_vblank_timestamp,
 		  "This function requires support for accurate vblank timestamps.");
 
 	spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -354,13 +361,11 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
 		if (WARN_ON(!crtc))
 			return;
 
-		if (crtc->funcs->disable_vblank) {
+		if (crtc->funcs->disable_vblank)
 			crtc->funcs->disable_vblank(crtc);
-			return;
-		}
+	} else {
+		dev->driver->disable_vblank(dev, pipe);
 	}
-
-	dev->driver->disable_vblank(dev, pipe);
 }
 
 /*
@@ -480,19 +485,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 
 	DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
-	/* Driver specific high-precision vblank timestamping supported? */
-	if (dev->driver->get_vblank_timestamp)
-		DRM_INFO("Driver supports precise vblank timestamp query.\n");
-	else
-		DRM_INFO("No driver support for vblank timestamp query.\n");
-
-	/* Must have precise timestamping for reliable vblank instant disable */
-	if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
-		dev->vblank_disable_immediate = false;
-		DRM_INFO("Setting vblank_disable_immediate to false because "
-			 "get_vblank_timestamp == NULL\n");
-	}
-
 	return 0;
 
 err:
@@ -502,6 +494,28 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 EXPORT_SYMBOL(drm_vblank_init);
 
 /**
+ * drm_dev_has_vblank - test if vblanking has been initialized for
+ *                      a device
+ * @dev: the device
+ *
+ * Drivers may call this function to test if vblank support is
+ * initialized for a device. For most hardware this means that vblanking
+ * can also be enabled.
+ *
+ * Atomic helpers use this function to initialize
+ * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
+ *
+ * Returns:
+ * True if vblanking has been initialized for the given device, false
+ * otherwise.
+ */
+bool drm_dev_has_vblank(const struct drm_device *dev)
+{
+	return dev->num_crtcs != 0;
+}
+EXPORT_SYMBOL(drm_dev_has_vblank);
+
+/**
  * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
  * @crtc: which CRTC's vblank waitqueue to retrieve
  *
@@ -523,9 +537,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
  *
  * Calculate and store various constants which are later needed by vblank and
  * swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
- * scanout timing, so they take things like panel scaling or other adjustments
- * into account.
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from
+ * CRTC's true scanout timing, so they take things like panel scaling or
+ * other adjustments into account.
  */
 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
 				     const struct drm_display_mode *mode)
@@ -576,9 +590,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
 EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
 /**
- * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank
+ *                                                        timestamp helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
  * @max_error: Desired maximum allowable error in timestamps (nanosecs)
  *             On return contains true maximum error of timestamp
  * @vblank_time: Pointer to time which should receive the timestamp
@@ -586,11 +600,12 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
  *     True when called from drm_crtc_handle_vblank().  Some drivers
  *     need to apply some workarounds for gpu-specific vblank irq quirks
  *     if flag is set.
+ * @get_scanout_position:
+ *     Callback function to retrieve the scanout position. See
+ *     @struct drm_crtc_helper_funcs.get_scanout_position.
  *
  * Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be directly
- * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
- * if &drm_driver.get_scanout_position is implemented.
+ * timings and current video scanout position of a CRTC.
  *
  * The current implementation only handles standard video modes. For double scan
  * and interlaced modes the driver is supposed to adjust the hardware mode
@@ -606,34 +621,30 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
  * Returns true on success, and false on failure, i.e. when no accurate
  * timestamp could be acquired.
  */
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-					   unsigned int pipe,
-					   int *max_error,
-					   ktime_t *vblank_time,
-					   bool in_vblank_irq)
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+	struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time,
+	bool in_vblank_irq,
+	drm_vblank_get_scanout_position_func get_scanout_position)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	struct timespec64 ts_etime, ts_vblank_time;
 	ktime_t stime, etime;
 	bool vbl_status;
-	struct drm_crtc *crtc;
 	const struct drm_display_mode *mode;
-	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int vpos, hpos, i;
 	int delta_ns, duration_ns;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		return false;
-
-	crtc = drm_crtc_from_index(dev, pipe);
-
-	if (pipe >= dev->num_crtcs || !crtc) {
+	if (pipe >= dev->num_crtcs) {
 		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return false;
 	}
 
 	/* Scanout position query not supported? Should not happen. */
-	if (!dev->driver->get_scanout_position) {
-		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+	if (!get_scanout_position) {
+		DRM_ERROR("Called from CRTC w/o get_scanout_position()!?\n");
 		return false;
 	}
 
@@ -648,7 +659,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
 	if (mode->crtc_clock == 0) {
 		DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
 		WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
-
 		return false;
 	}
 
@@ -664,11 +674,10 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
 		 * Get vertical and horizontal scanout position vpos, hpos,
 		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		vbl_status = dev->driver->get_scanout_position(dev, pipe,
-							       in_vblank_irq,
-							       &vpos, &hpos,
-							       &stime, &etime,
-							       mode);
+		vbl_status = get_scanout_position(crtc, in_vblank_irq,
+						  &vpos, &hpos,
+						  &stime, &etime,
+						  mode);
 
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!vbl_status) {
@@ -720,7 +729,49 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
 
 	return true;
 }
-EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp
+ *                                               helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ *             On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
+ * @in_vblank_irq:
+ *     True when called from drm_crtc_handle_vblank().  Some drivers
+ *     need to apply some workarounds for gpu-specific vblank irq quirks
+ *     if flag is set.
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms
+ * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented.
+ *
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
+ *
+ * Note that atomic drivers must call drm_calc_timestamping_constants() before
+ * enabling a CRTC. The atomic helpers already take care of that in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * Returns:
+ *
+ * Returns true on success, and false on failure, i.e. when no accurate
+ * timestamp could be acquired.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+						 int *max_error,
+						 ktime_t *vblank_time,
+						 bool in_vblank_irq)
+{
+	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+		crtc, max_error, vblank_time, in_vblank_irq,
+		crtc->helper_private->get_scanout_position);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
 
 /**
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
@@ -747,15 +798,19 @@ static bool
 drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  ktime_t *tvblank, bool in_vblank_irq)
 {
+	struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
 	bool ret = false;
 
 	/* Define requested maximum error on timestamps (nanoseconds). */
 	int max_error = (int) drm_timestamp_precision * 1000;
 
 	/* Query driver if possible and precision timestamping enabled. */
-	if (dev->driver->get_vblank_timestamp && (max_error > 0))
-		ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
+	if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
+		struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+		ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
 							tvblank, in_vblank_irq);
+	}
 
 	/* GPU high precision timestamp query unsupported or failed.
 	 * Return current monotonic/gettimeofday timestamp as best estimate.
@@ -977,9 +1032,11 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 		if (crtc->funcs->enable_vblank)
 			return crtc->funcs->enable_vblank(crtc);
+	} else if (dev->driver->enable_vblank) {
+		return dev->driver->enable_vblank(dev, pipe);
 	}
 
-	return dev->driver->enable_vblank(dev, pipe);
+	return -EINVAL;
 }
 
 static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
@@ -1738,6 +1795,8 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
 
 static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
 {
+	struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+	bool high_prec = false;
 	struct drm_pending_vblank_event *e, *t;
 	ktime_t now;
 	u64 seq;
@@ -1760,8 +1819,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
 		send_vblank_event(dev, e, seq, now);
 	}
 
-	trace_drm_vblank_event(pipe, seq, now,
-			dev->driver->get_vblank_timestamp != NULL);
+	if (crtc && crtc->funcs->get_vblank_timestamp)
+		high_prec = true;
+
+	trace_drm_vblank_event(pipe, seq, now, high_prec);
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 52e87e48..aa88911b 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -102,7 +102,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
 	return tmp;
 }
 
-/**
+/*
  * \c fault method for AGP virtual memory.
  *
  * \param vma virtual memory area.
@@ -192,7 +192,7 @@ static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
 }
 #endif
 
-/**
+/*
  * \c nopage method for shared virtual memory.
  *
  * \param vma virtual memory area.
@@ -225,7 +225,7 @@ static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
 	return 0;
 }
 
-/**
+/*
  * \c close method for shared virtual memory.
  *
  * \param vma virtual memory area.
@@ -269,8 +269,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 		}
 
 		if (!found_maps) {
-			drm_dma_handle_t dmah;
-
 			switch (map->type) {
 			case _DRM_REGISTERS:
 			case _DRM_FRAME_BUFFER:
@@ -284,10 +282,10 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 			case _DRM_SCATTER_GATHER:
 				break;
 			case _DRM_CONSISTENT:
-				dmah.vaddr = map->handle;
-				dmah.busaddr = map->offset;
-				dmah.size = map->size;
-				__drm_legacy_pci_free(dev, &dmah);
+				dma_free_coherent(&dev->pdev->dev,
+						  map->size,
+						  map->handle,
+						  map->offset);
 				break;
 			}
 			kfree(map);
@@ -296,7 +294,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
+/*
  * \c fault method for DMA virtual memory.
  *
  * \param address access address.
@@ -331,7 +329,7 @@ static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
 	return 0;
 }
 
-/**
+/*
  * \c fault method for scatter-gather virtual memory.
  *
  * \param address access address.
@@ -437,7 +435,7 @@ static void drm_vm_close_locked(struct drm_device *dev,
 	}
 }
 
-/**
+/*
  * \c close method for all virtual memory types.
  *
  * \param vma virtual memory area.
@@ -455,7 +453,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
+/*
  * mmap DMA memory.
  *
  * \param file_priv DRM file private.
@@ -515,7 +513,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 #endif
 }
 
-/**
+/*
  * mmap DMA memory.
  *
  * \param file_priv DRM file private.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 32d9fac..76d3856 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -12,6 +12,7 @@
 
 #include "common.xml.h"
 #include "state.xml.h"
+#include "state_blt.xml.h"
 #include "state_hi.xml.h"
 #include "state_3d.xml.h"
 #include "cmdstream.xml.h"
@@ -233,6 +234,8 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
 	struct etnaviv_cmdbuf *buffer = &gpu->buffer;
 	unsigned int waitlink_offset = buffer->user_size - 16;
 	u32 link_target, flush = 0;
+	bool has_blt = !!(gpu->identity.minor_features5 &
+			  chipMinorFeatures5_BLT_ENGINE);
 
 	lockdep_assert_held(&gpu->lock);
 
@@ -248,16 +251,38 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
 	if (flush) {
 		unsigned int dwords = 7;
 
+		if (has_blt)
+			dwords += 10;
+
 		link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
 
 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		if (has_blt) {
+			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+			CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+			CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+		}
 		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
-		if (gpu->exec_state == ETNA_PIPE_3D)
-			CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
-				       VIVS_TS_FLUSH_CACHE_FLUSH);
+		if (gpu->exec_state == ETNA_PIPE_3D) {
+			if (has_blt) {
+				CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+				CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+				CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+			} else {
+				CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+					       VIVS_TS_FLUSH_CACHE_FLUSH);
+			}
+		}
 		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+		if (has_blt) {
+			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+			CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+			CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+		}
 		CMD_END(buffer);
 
 		etnaviv_buffer_replace_wait(buffer, waitlink_offset,
@@ -323,6 +348,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
 	bool switch_mmu_context = gpu->mmu_context != mmu_context;
 	unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
 	bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
+	bool has_blt = !!(gpu->identity.minor_features5 &
+			  chipMinorFeatures5_BLT_ENGINE);
 
 	lockdep_assert_held(&gpu->lock);
 
@@ -433,6 +460,15 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
 	 * 2 semaphore stall + 1 event + 1 wait + 1 link.
 	 */
 	return_dwords = 7;
+
+	/*
+	 * When the BLT engine is present we need 6 more dwords in the return
+	 * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
+	 * but we don't need the normal TS flush state.
+	 */
+	if (has_blt)
+		return_dwords += 6;
+
 	return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
 	CMD_LINK(cmdbuf, return_dwords, return_target);
 
@@ -447,11 +483,25 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
 		CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
 				       VIVS_GL_FLUSH_CACHE_DEPTH |
 				       VIVS_GL_FLUSH_CACHE_COLOR);
-		CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
-				       VIVS_TS_FLUSH_CACHE_FLUSH);
+		if (has_blt) {
+			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+			CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
+			CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+		} else {
+			CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+					       VIVS_TS_FLUSH_CACHE_FLUSH);
+		}
 	}
 	CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
 	CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+
+	if (has_blt) {
+		CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
+		CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+		CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
+		CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
+	}
+
 	CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
 		       VIVS_GL_EVENT_FROM_PE);
 	CMD_WAIT(buffer);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6b43c1c..a8685b2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -551,6 +551,7 @@ static int etnaviv_bind(struct device *dev)
 	mutex_init(&priv->gem_lock);
 	INIT_LIST_HEAD(&priv->gem_list);
 	priv->num_gpus = 0;
+	priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
 
 	priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
 	if (IS_ERR(priv->cmdbuf_suballoc)) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index efc656e..4d8dc92 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -35,6 +35,7 @@ struct etnaviv_drm_private {
 	int num_gpus;
 	struct device_dma_parameters dma_parms;
 	struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+	gfp_t shm_gfp_mask;
 
 	struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
 	struct etnaviv_iommu_global *mmu_global;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 6adea18..dc9ef30 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -602,6 +602,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 	u32 size, u32 flags, u32 *handle)
 {
+	struct etnaviv_drm_private *priv = dev->dev_private;
 	struct drm_gem_object *obj = NULL;
 	int ret;
 
@@ -624,8 +625,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 	 * above new_inode() why this is required _and_ expected if you're
 	 * going to pin these pages.
 	 */
-	mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
-			     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
 
 	etnaviv_gem_obj_add(dev, obj);
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 6b68fe1..98e60df 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -105,7 +105,7 @@ struct etnaviv_gem_submit {
 	unsigned int nr_pmrs;
 	struct etnaviv_perfmon_request *pmrs;
 	unsigned int nr_bos;
-	struct etnaviv_gem_submit_bo bos[0];
+	struct etnaviv_gem_submit_bo bos[];
 	/* No new members here, the previous one is variable-length! */
 };
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 799ec20..a31eeff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -333,9 +333,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 		gpu->identity.revision = etnaviv_field(chipIdentity,
 					 VIVS_HI_CHIP_IDENTITY_REVISION);
 	} else {
+		u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 
 		gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
 		gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
+		gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
+		gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
+		gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
 
 		/*
 		 * !!!! HACK ALERT !!!!
@@ -350,7 +354,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 
 		/* Another special case */
 		if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
-			u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
 			u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 
 			if (chipDate == 0x20080814 && chipTime == 0x12051100) {
@@ -373,6 +376,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
 			gpu->identity.model = chipModel_GC3000;
 			gpu->identity.revision &= 0xffff;
 		}
+
+		if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
+			gpu->identity.eco_id = 1;
+
+		if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
+			gpu->identity.eco_id = 1;
 	}
 
 	dev_info(gpu->dev, "model: GC%x, revision: %x\n",
@@ -506,7 +515,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 		/* read idle register. */
 		idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 
-		/* try reseting again if FE it not idle */
+		/* try resetting again if FE is not idle */
 		if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
 			dev_dbg(gpu->dev, "FE is not idle\n");
 			continue;
@@ -772,6 +781,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 		gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 	}
 
+	/*
+	 * If the GPU is part of a system with DMA addressing limitations,
+	 * request pages for our SHM backend buffers from the DMA32 zone to
+	 * hopefully avoid performance killing SWIOTLB bounce buffering.
+	 */
+	if (dma_addressing_limited(gpu->dev))
+		priv->shm_gfp_mask |= GFP_DMA32;
+
 	/* Create buffer: */
 	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
 				  PAGE_SIZE);
@@ -851,6 +868,13 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 
 	verify_dma(gpu, &debug);
 
+	seq_puts(m, "\tidentity\n");
+	seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
+	seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
+	seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
+	seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
+	seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
+
 	seq_puts(m, "\tfeatures\n");
 	seq_printf(m, "\t major_features: 0x%08x\n",
 		   gpu->identity.features);
@@ -930,6 +954,20 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 		seq_puts(m, "\t FP is not idle\n");
 	if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
 		seq_puts(m, "\t TS is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
+		seq_puts(m, "\t BL is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
+		seq_puts(m, "\t ASYNCFE is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
+		seq_puts(m, "\t MC is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
+		seq_puts(m, "\t PPA is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
+		seq_puts(m, "\t WD is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
+		seq_puts(m, "\t NN is not idle\n");
+	if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
+		seq_puts(m, "\t TP is not idle\n");
 	if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
 		seq_puts(m, "\t AXI low power mode\n");
 
@@ -1805,11 +1843,15 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
 	if (atomic_read(&gpu->sched.hw_rq_count))
 		return -EBUSY;
 
-	/* Check whether the hardware (except FE) is idle */
-	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
+	/* Check whether the hardware (except FE and MC) is idle */
+	mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
+				  VIVS_HI_IDLE_STATE_MC);
 	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
-	if (idle != mask)
+	if (idle != mask) {
+		dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
+				     idle);
 		return -EBUSY;
+	}
 
 	return etnaviv_gpu_hw_suspend(gpu);
 }
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 97bb480..8ea4869 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -15,11 +15,11 @@ struct etnaviv_gem_submit;
 struct etnaviv_vram_mapping;
 
 struct etnaviv_chip_identity {
-	/* Chip model. */
 	u32 model;
-
-	/* Revision value.*/
 	u32 revision;
+	u32 product_id;
+	u32 customer_id;
+	u32 eco_id;
 
 	/* Supported feature fields. */
 	u32 features;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 39b463d..167971a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -7,8 +7,42 @@
 
 static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
 	{
+		.model = 0x400,
+		.revision = 0x4652,
+		.product_id = 0x70001,
+		.customer_id = 0x100,
+		.eco_id = 0,
+		.stream_count = 4,
+		.register_max = 64,
+		.thread_count = 128,
+		.shader_core_count = 1,
+		.vertex_cache_size = 8,
+		.vertex_output_buffer_size = 1024,
+		.pixel_pipes = 1,
+		.instruction_count = 256,
+		.num_constants = 320,
+		.buffer_size = 0,
+		.varyings_count = 8,
+		.features = 0xa0e9e004,
+		.minor_features0 = 0xe1299fff,
+		.minor_features1 = 0xbe13b219,
+		.minor_features2 = 0xce110010,
+		.minor_features3 = 0x8000001,
+		.minor_features4 = 0x20102,
+		.minor_features5 = 0x120000,
+		.minor_features6 = 0x0,
+		.minor_features7 = 0x0,
+		.minor_features8 = 0x0,
+		.minor_features9 = 0x0,
+		.minor_features10 = 0x0,
+		.minor_features11 = 0x0,
+	},
+	{
 		.model = 0x7000,
 		.revision = 0x6214,
+		.product_id = ~0U,
+		.customer_id = ~0U,
+		.eco_id = ~0U,
 		.stream_count = 16,
 		.register_max = 64,
 		.thread_count = 1024,
@@ -43,7 +77,13 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
 
 	for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
 		if (etnaviv_chip_identities[i].model == ident->model &&
-		    etnaviv_chip_identities[i].revision == ident->revision) {
+		    etnaviv_chip_identities[i].revision == ident->revision &&
+		    (etnaviv_chip_identities[i].product_id == ident->product_id ||
+			 etnaviv_chip_identities[i].product_id == ~0U) &&
+		    (etnaviv_chip_identities[i].customer_id == ident->customer_id ||
+			 etnaviv_chip_identities[i].customer_id == ~0U) &&
+		    (etnaviv_chip_identities[i].eco_id == ident->eco_id ||
+			 etnaviv_chip_identities[i].eco_id == ~0U)) {
 			memcpy(ident, &etnaviv_chip_identities[i],
 			       sizeof(*ident));
 			return true;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 8adbf28..e6795ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -32,6 +32,7 @@ struct etnaviv_pm_domain {
 };
 
 struct etnaviv_pm_domain_meta {
+	unsigned int feature;
 	const struct etnaviv_pm_domain *domains;
 	u32 nr_domains;
 };
@@ -410,36 +411,78 @@ static const struct etnaviv_pm_domain doms_vg[] = {
 
 static const struct etnaviv_pm_domain_meta doms_meta[] = {
 	{
+		.feature = chipFeatures_PIPE_3D,
 		.nr_domains = ARRAY_SIZE(doms_3d),
 		.domains = &doms_3d[0]
 	},
 	{
+		.feature = chipFeatures_PIPE_2D,
 		.nr_domains = ARRAY_SIZE(doms_2d),
 		.domains = &doms_2d[0]
 	},
 	{
+		.feature = chipFeatures_PIPE_VG,
 		.nr_domains = ARRAY_SIZE(doms_vg),
 		.domains = &doms_vg[0]
 	}
 };
 
+static unsigned int num_pm_domains(const struct etnaviv_gpu *gpu)
+{
+	unsigned int num = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+		const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+		if (gpu->identity.features & meta->feature)
+			num += meta->nr_domains;
+	}
+
+	return num;
+}
+
+static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
+	unsigned int index)
+{
+	const struct etnaviv_pm_domain *domain = NULL;
+	unsigned int offset = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(doms_meta); i++) {
+		const struct etnaviv_pm_domain_meta *meta = &doms_meta[i];
+
+		if (!(gpu->identity.features & meta->feature))
+			continue;
+
+		if (meta->nr_domains < (index - offset)) {
+			offset += meta->nr_domains;
+			continue;
+		}
+
+		domain = meta->domains + (index - offset);
+	}
+
+	return domain;
+}
+
 int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
 	struct drm_etnaviv_pm_domain *domain)
 {
-	const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
+	const unsigned int nr_domains = num_pm_domains(gpu);
 	const struct etnaviv_pm_domain *dom;
 
-	if (domain->iter >= meta->nr_domains)
+	if (domain->iter >= nr_domains)
 		return -EINVAL;
 
-	dom = meta->domains + domain->iter;
+	dom = pm_domain(gpu, domain->iter);
+	if (!dom)
+		return -EINVAL;
 
 	domain->id = domain->iter;
 	domain->nr_signals = dom->nr_signals;
 	strncpy(domain->name, dom->name, sizeof(domain->name));
 
 	domain->iter++;
-	if (domain->iter == meta->nr_domains)
+	if (domain->iter == nr_domains)
 		domain->iter = 0xff;
 
 	return 0;
@@ -448,14 +491,16 @@ int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
 int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
 	struct drm_etnaviv_pm_signal *signal)
 {
-	const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
+	const unsigned int nr_domains = num_pm_domains(gpu);
 	const struct etnaviv_pm_domain *dom;
 	const struct etnaviv_pm_signal *sig;
 
-	if (signal->domain >= meta->nr_domains)
+	if (signal->domain >= nr_domains)
 		return -EINVAL;
 
-	dom = meta->domains + signal->domain;
+	dom = pm_domain(gpu, signal->domain);
+	if (!dom)
+		return -EINVAL;
 
 	if (signal->iter >= dom->nr_signals)
 		return -EINVAL;
diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h
index daae559..0e8bcf9 100644
--- a/drivers/gpu/drm/etnaviv/state_blt.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h
@@ -46,6 +46,8 @@ DEALINGS IN THE SOFTWARE.
 
 /* This is a cut-down version of the state_blt.xml.h file */
 
+#define VIVS_BLT_SET_COMMAND					0x000140ac
+
 #define VIVS_BLT_ENABLE						0x000140b8
 #define VIVS_BLT_ENABLE_ENABLE					0x00000001
 
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 41d8da2..deaaa99 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,17 +8,17 @@ This file was generated by the rules-ng-ng headergen tool in this git repository
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state.xml     (  26087 bytes, from 2017-12-18 16:51:59)
-- common.xml    (  35468 bytes, from 2018-01-22 13:48:54)
-- common_3d.xml (  14615 bytes, from 2017-12-18 16:51:59)
-- state_hi.xml  (  30232 bytes, from 2018-02-15 15:48:01)
-- copyright.xml (   1597 bytes, from 2016-12-08 16:37:56)
-- state_2d.xml  (  51552 bytes, from 2016-12-08 16:37:56)
-- state_3d.xml  (  79992 bytes, from 2017-12-18 16:51:59)
-- state_blt.xml (  13405 bytes, from 2017-12-18 16:51:59)
-- state_vg.xml  (   5975 bytes, from 2016-12-08 16:37:56)
+- state.xml     (  26666 bytes, from 2019-12-20 21:20:35)
+- common.xml    (  35468 bytes, from 2018-02-10 13:09:26)
+- common_3d.xml (  15058 bytes, from 2019-12-28 20:02:03)
+- state_hi.xml  (  30552 bytes, from 2019-12-28 20:02:48)
+- copyright.xml (   1597 bytes, from 2018-02-10 13:09:26)
+- state_2d.xml  (  51552 bytes, from 2018-02-10 13:09:26)
+- state_3d.xml  (  83098 bytes, from 2019-12-28 20:02:03)
+- state_blt.xml (  14252 bytes, from 2019-10-20 19:59:15)
+- state_vg.xml  (   5975 bytes, from 2018-02-10 13:09:26)
 
-Copyright (C) 2012-2018 by the following authors:
+Copyright (C) 2012-2019 by the following authors:
 - Wladimir J. van der Laan <laanwj@gmail.com>
 - Christian Gmeiner <christian.gmeiner@gmail.com>
 - Lucas Stach <l.stach@pengutronix.de>
@@ -48,6 +48,9 @@ DEALINGS IN THE SOFTWARE.
 #define MMU_EXCEPTION_SLAVE_NOT_PRESENT				0x00000001
 #define MMU_EXCEPTION_PAGE_NOT_PRESENT				0x00000002
 #define MMU_EXCEPTION_WRITE_VIOLATION				0x00000003
+#define MMU_EXCEPTION_OUT_OF_BOUND				0x00000004
+#define MMU_EXCEPTION_READ_SECURITY_VIOLATION			0x00000005
+#define MMU_EXCEPTION_WRITE_SECURITY_VIOLATION			0x00000006
 #define VIVS_HI							0x00000000
 
 #define VIVS_HI_CLOCK_CONTROL					0x00000000
@@ -81,6 +84,13 @@ DEALINGS IN THE SOFTWARE.
 #define VIVS_HI_IDLE_STATE_IM					0x00000200
 #define VIVS_HI_IDLE_STATE_FP					0x00000400
 #define VIVS_HI_IDLE_STATE_TS					0x00000800
+#define VIVS_HI_IDLE_STATE_BL					0x00001000
+#define VIVS_HI_IDLE_STATE_ASYNCFE				0x00002000
+#define VIVS_HI_IDLE_STATE_MC					0x00004000
+#define VIVS_HI_IDLE_STATE_PPA					0x00008000
+#define VIVS_HI_IDLE_STATE_WD					0x00010000
+#define VIVS_HI_IDLE_STATE_NN					0x00020000
+#define VIVS_HI_IDLE_STATE_TP					0x00040000
 #define VIVS_HI_IDLE_STATE_AXI_LP				0x80000000
 
 #define VIVS_HI_AXI_CONFIG					0x00000008
@@ -140,6 +150,8 @@ DEALINGS IN THE SOFTWARE.
 
 #define VIVS_HI_CHIP_TIME					0x0000002c
 
+#define VIVS_HI_CHIP_CUSTOMER_ID				0x00000030
+
 #define VIVS_HI_CHIP_MINOR_FEATURE_0				0x00000034
 
 #define VIVS_HI_CACHE_CONTROL					0x00000038
@@ -237,6 +249,8 @@ DEALINGS IN THE SOFTWARE.
 
 #define VIVS_HI_BLT_INTR					0x000000d4
 
+#define VIVS_HI_CHIP_ECO_ID					0x000000e8
+
 #define VIVS_HI_AUXBIT						0x000000ec
 
 #define VIVS_PM							0x00000000
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 1eed332..f2d87a7 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -140,7 +140,7 @@ static void decon_ctx_remove(struct decon_context *ctx)
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
 		const struct drm_display_mode *mode)
 {
-	unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+	unsigned long ideal_clk = mode->clock;
 	u32 clkdiv;
 
 	/* Find the clock divider value that gets us closest to ideal_clk */
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4785885..d23d350 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -106,7 +106,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
 
 	/* Pre-empt DP connector creation if there's a bridge */
 	if (dp->ptn_bridge) {
-		ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
+		ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge,
+					0);
 		if (ret) {
 			DRM_DEV_ERROR(dp->dev,
 				      "Failed to attach bridge to drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba0f868b..57defeb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -270,7 +270,7 @@ static int exynos_drm_bind(struct device *dev)
 	struct drm_encoder *encoder;
 	struct drm_device *drm;
 	unsigned int clone_mask;
-	int cnt, ret;
+	int ret;
 
 	drm = drm_dev_alloc(&exynos_drm_driver, dev);
 	if (IS_ERR(drm))
@@ -293,10 +293,9 @@ static int exynos_drm_bind(struct device *dev)
 	exynos_drm_mode_config_init(drm);
 
 	/* setup possible_clones. */
-	cnt = 0;
 	clone_mask = 0;
 	list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
-		clone_mask |= (1 << (cnt++));
+		clone_mask |= drm_encoder_mask(encoder);
 
 	list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
 		encoder->possible_clones = clone_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a85365c..e080aa9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1514,7 +1514,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 		return 0;
 
 	connector->funcs->reset(connector);
-	drm_fb_helper_add_one_connector(drm->fb_helper, connector);
 	drm_connector_register(connector);
 	return 0;
 }
@@ -1540,7 +1539,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
 
 	out_bridge  = of_drm_find_bridge(device->dev.of_node);
 	if (out_bridge) {
-		drm_bridge_attach(encoder, out_bridge, NULL);
+		drm_bridge_attach(encoder, out_bridge, NULL, 0);
 		dsi->out_bridge = out_bridge;
 		list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
 	} else {
@@ -1717,7 +1716,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
 	if (dsi->in_bridge_node) {
 		in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
 		if (in_bridge)
-			drm_bridge_attach(encoder, in_bridge, NULL);
+			drm_bridge_attach(encoder, in_bridge, NULL, 0);
 	}
 
 	return mipi_dsi_host_register(&dsi->dsi_host);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 647a1fd..e6ceaf3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -200,21 +200,13 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
+	ret = drm_fb_helper_init(dev, helper);
 	if (ret < 0) {
 		DRM_DEV_ERROR(dev->dev,
 			      "failed to initialize drm fb helper.\n");
 		goto err_init;
 	}
 
-	ret = drm_fb_helper_single_add_all_connectors(helper);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dev->dev,
-			      "failed to register drm_fb_helper_connector.\n");
-		goto err_setup;
-
-	}
-
 	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
 	if (ret < 0) {
 		DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index f141916..1a7c828 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -960,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
 	drm_connector_attach_encoder(connector, encoder);
 
 	if (hdata->bridge) {
-		ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+		ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
 		if (ret)
 			DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
 	}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 9598ee3..cff3443 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -151,5 +151,5 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
 		return fsl_dcu_attach_panel(fsl_dev, panel);
 	}
 
-	return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
+	return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL, 0);
 }
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 1ed854f..686385a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -977,6 +977,9 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
 	.set_config = gma_crtc_set_config,
 	.destroy = gma_crtc_destroy,
 	.page_flip = gma_crtc_page_flip,
+	.enable_vblank = psb_enable_vblank,
+	.disable_vblank = psb_disable_vblank,
+	.get_vblank_counter = psb_get_vblank_counter,
 };
 
 const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1459076..1d8f67e 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -513,14 +513,10 @@ int psb_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
+	ret = drm_fb_helper_init(dev, fb_helper);
 	if (ret)
 		goto free;
 
-	ret = drm_fb_helper_single_add_all_connectors(fb_helper);
-	if (ret)
-		goto fini;
-
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	drm_helper_disable_unused_functions(dev);
 
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index a1f9ce9..0e6facf 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -227,7 +227,7 @@ struct bdb_general_definitions {
 	 * number = (block_size - sizeof(bdb_general_definitions))/
 	 *	     sizeof(child_device_config);
 	 */
-	struct child_device_config devices[0];
+	struct child_device_config devices[];
 };
 
 struct bdb_lvds_options {
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6956c8e..2411eb9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -363,7 +363,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
 	drm_irq_install(dev, dev->pdev->irq);
 
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	dev->driver->get_vblank_counter = psb_get_vblank_counter;
 
 	psb_modeset_init(dev);
 	psb_fbdev_init(dev);
@@ -507,9 +506,6 @@ static struct drm_driver driver = {
 	.irq_postinstall = psb_irq_postinstall,
 	.irq_uninstall = psb_irq_uninstall,
 	.irq_handler = psb_irq_handler,
-	.enable_vblank = psb_enable_vblank,
-	.disable_vblank = psb_disable_vblank,
-	.get_vblank_counter = psb_get_vblank_counter,
 
 	.gem_free_object = psb_gem_free_object,
 	.gem_vm_ops = &psb_gem_vm_ops,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 3d4ef30..9569263 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -681,15 +681,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
 extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
 extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
 extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern int psb_enable_vblank(struct drm_crtc *crtc);
+extern void psb_disable_vblank(struct drm_crtc *crtc);
 void
 psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
 
 void
 psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
 
-extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
 
 /* framebuffer.c */
 extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index fed3b56..531c548 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -433,6 +433,9 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
 	.set_config = gma_crtc_set_config,
 	.destroy = gma_crtc_destroy,
 	.page_flip = gma_crtc_page_flip,
+	.enable_vblank = psb_enable_vblank,
+	.disable_vblank = psb_disable_vblank,
+	.get_vblank_counter = psb_get_vblank_counter,
 };
 
 const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 91f9001..15eb377 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -506,8 +506,10 @@ int psb_irq_disable_dpst(struct drm_device *dev)
 /*
  * It is used to enable VBLANK interrupt
  */
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int psb_enable_vblank(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	unsigned long irqflags;
 	uint32_t reg_val = 0;
@@ -545,8 +547,10 @@ int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
 /*
  * It is used to disable VBLANK interrupt
  */
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void psb_disable_vblank(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct drm_psb_private *dev_priv = dev->dev_private;
 	unsigned long irqflags;
 
@@ -618,8 +622,10 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 psb_get_vblank_counter(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	uint32_t high_frame = PIPEAFRAMEHIGH;
 	uint32_t low_frame = PIPEAFRAMEPIXEL;
 	uint32_t pipeconf_reg = PIPEACONF;
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 58fd502..4f73998 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -12,6 +12,7 @@
 #ifndef _PSB_IRQ_H_
 #define _PSB_IRQ_H_
 
+struct drm_crtc;
 struct drm_device;
 
 bool sysirq_init(struct drm_device *dev);
@@ -26,9 +27,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
 int psb_irq_disable_dpst(struct drm_device *dev);
 void psb_irq_turn_on_dpst(struct drm_device *dev);
 void psb_irq_turn_off_dpst(struct drm_device *dev);
-int  psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
-u32  psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int  psb_enable_vblank(struct drm_crtc *crtc);
+void psb_disable_vblank(struct drm_crtc *crtc);
+u32  psb_get_vblank_counter(struct drm_crtc *crtc);
 
 int mdfld_enable_te(struct drm_device *dev, int pipe);
 void mdfld_disable_te(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 7fa7d49..55b46a7 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -40,6 +40,7 @@ struct hibmc_dislay_pll_config {
 };
 
 static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+	{640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ},
 	{800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
 	{1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
 	{1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
@@ -47,6 +48,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
 	{1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
 	{1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
 	{1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+	{1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ},
+	{1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
 	{1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
 	{1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
 	{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
@@ -80,6 +83,9 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
 		return -EINVAL;
 	}
 
+	if (!crtc_state->enable)
+		return 0;
+
 	if (state->crtc_x + state->crtc_w >
 	    crtc_state->adjusted_mode.hdisplay ||
 	    state->crtc_y + state->crtc_h >
@@ -184,6 +190,20 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
 	return plane;
 }
 
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+	struct hibmc_drm_private *priv = crtc->dev->dev_private;
+	unsigned int reg;
+
+	reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+	reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
+	reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_DPMS, dpms);
+	reg &= ~HIBMC_CRT_DISP_CTL_TIMING_MASK;
+	if (dpms == HIBMC_CRT_DPMS_ON)
+		reg |= HIBMC_CRT_DISP_CTL_TIMING(1);
+	writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
 static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
 				     struct drm_crtc_state *old_state)
 {
@@ -200,6 +220,7 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
 	reg |= HIBMC_CURR_GATE_DISPLAY(1);
 	hibmc_set_current_gate(priv, reg);
 	drm_crtc_vblank_on(crtc);
+	hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_ON);
 }
 
 static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -208,6 +229,7 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
 	unsigned int reg;
 	struct hibmc_drm_private *priv = crtc->dev->dev_private;
 
+	hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
 	drm_crtc_vblank_off(crtc);
 
 	hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
@@ -221,6 +243,25 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
 	hibmc_set_current_gate(priv, reg);
 }
 
+static enum drm_mode_status
+hibmc_crtc_mode_valid(struct drm_crtc *crtc,
+		      const struct drm_display_mode *mode)
+{
+	int i = 0;
+	int vrefresh = drm_mode_vrefresh(mode);
+
+	if (vrefresh < 59 || vrefresh > 61)
+		return MODE_NOCLOCK;
+
+	for (i = 0; i < ARRAY_SIZE(hibmc_pll_table); i++) {
+		if (hibmc_pll_table[i].hdisplay == mode->hdisplay &&
+		    hibmc_pll_table[i].vdisplay == mode->vdisplay)
+			return MODE_OK;
+	}
+
+	return MODE_BAD;
+}
+
 static unsigned int format_pll_reg(void)
 {
 	unsigned int pllreg = 0;
@@ -435,6 +476,42 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
 	       priv->mmio + HIBMC_RAW_INTERRUPT_EN);
 }
 
+static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct hibmc_drm_private *priv = crtc->dev->dev_private;
+	void __iomem   *mmio = priv->mmio;
+	u16 *r, *g, *b;
+	unsigned int reg;
+	int i;
+
+	r = crtc->gamma_store;
+	g = r + crtc->gamma_size;
+	b = g + crtc->gamma_size;
+
+	for (i = 0; i < crtc->gamma_size; i++) {
+		unsigned int offset = i << 2;
+		u8 red = *r++ >> 8;
+		u8 green = *g++ >> 8;
+		u8 blue = *b++ >> 8;
+		u32 rgb = (red << 16) | (green << 8) | blue;
+
+		writel(rgb, mmio + HIBMC_CRT_PALETTE + offset);
+	}
+
+	reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+	reg |= HIBMC_FIELD(HIBMC_CTL_DISP_CTL_GAMMA, 1);
+	writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static int hibmc_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				u16 *blue, uint32_t size,
+				struct drm_modeset_acquire_ctx *ctx)
+{
+	hibmc_crtc_load_lut(crtc);
+
+	return 0;
+}
+
 static const struct drm_crtc_funcs hibmc_crtc_funcs = {
 	.page_flip = drm_atomic_helper_page_flip,
 	.set_config = drm_atomic_helper_set_config,
@@ -444,6 +521,7 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 	.enable_vblank = hibmc_crtc_enable_vblank,
 	.disable_vblank = hibmc_crtc_disable_vblank,
+	.gamma_set = hibmc_crtc_gamma_set,
 };
 
 static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
@@ -452,6 +530,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
 	.atomic_flush	= hibmc_crtc_atomic_flush,
 	.atomic_enable	= hibmc_crtc_atomic_enable,
 	.atomic_disable	= hibmc_crtc_atomic_disable,
+	.mode_valid = hibmc_crtc_mode_valid,
 };
 
 int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 4a8a4cf..222356a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -91,11 +91,11 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
 	priv->dev->mode_config.min_width = 0;
 	priv->dev->mode_config.min_height = 0;
 	priv->dev->mode_config.max_width = 1920;
-	priv->dev->mode_config.max_height = 1440;
+	priv->dev->mode_config.max_height = 1200;
 
 	priv->dev->mode_config.fb_base = priv->fb_base;
 	priv->dev->mode_config.preferred_depth = 24;
-	priv->dev->mode_config.prefer_shadow = 0;
+	priv->dev->mode_config.prefer_shadow = 1;
 
 	priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
 
@@ -327,6 +327,11 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
 	struct drm_device *dev;
 	int ret;
 
+	ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+								"hibmcdrmfb");
+	if (ret)
+		return ret;
+
 	dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
 	if (IS_ERR(dev)) {
 		DRM_ERROR("failed to allocate drm_device\n");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
index b63a1ee..17b30c3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -68,6 +68,12 @@
 
 #define HIBMC_CRT_DISP_CTL			0x80200
 
+#define HIBMC_CRT_DISP_CTL_DPMS(x)		((x) << 30)
+#define HIBMC_CRT_DISP_CTL_DPMS_MASK		0xc0000000
+
+#define HIBMC_CRT_DPMS_ON			0
+#define HIBMC_CRT_DPMS_OFF			3
+
 #define HIBMC_CRT_DISP_CTL_CRTSELECT(x)		((x) << 25)
 #define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK	0x2000000
 
@@ -85,6 +91,9 @@
 #define HIBMC_CRT_DISP_CTL_TIMING(x)		((x) << 8)
 #define HIBMC_CRT_DISP_CTL_TIMING_MASK		0x100
 
+#define HIBMC_CTL_DISP_CTL_GAMMA(x)		((x) << 3)
+#define HIBMC_CTL_DISP_CTL_GAMMA_MASK		0x08
+
 #define HIBMC_CRT_DISP_CTL_PLANE(x)		((x) << 2)
 #define HIBMC_CRT_DISP_CTL_PLANE_MASK		4
 
@@ -170,6 +179,7 @@
 #define CRT_PLL1_HS_74MHZ			0x23941dc2
 #define CRT_PLL1_HS_80MHZ			0x23941001
 #define CRT_PLL1_HS_80MHZ_1152			0x23540fc2
+#define CRT_PLL1_HS_106MHZ			0x237C1641
 #define CRT_PLL1_HS_108MHZ			0x23b41b01
 #define CRT_PLL1_HS_162MHZ			0x23480681
 #define CRT_PLL1_HS_148MHZ			0x23541dc2
@@ -182,10 +192,13 @@
 #define CRT_PLL2_HS_78MHZ			0x50E147AE
 #define CRT_PLL2_HS_74MHZ			0x602B6AE7
 #define CRT_PLL2_HS_80MHZ			0x70000000
+#define CRT_PLL2_HS_106MHZ			0x0075c28f
 #define CRT_PLL2_HS_108MHZ			0x80000000
 #define CRT_PLL2_HS_162MHZ			0xA0000000
 #define CRT_PLL2_HS_148MHZ			0xB0CCCCCD
 #define CRT_PLL2_HS_193MHZ			0xC0872B02
 
+#define HIBMC_CRT_PALETTE                       0x80C00
+
 #define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
 #endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 6d98fdc..678ac2e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,8 +11,10 @@
  *	Jianhua Li <lijianhua@huawei.com>
  */
 
+#include <drm/drm_gem_vram_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
 #include <drm/drm_print.h>
 
 #include "hibmc_drm_drv.h"
@@ -20,7 +22,14 @@
 
 static int hibmc_connector_get_modes(struct drm_connector *connector)
 {
-	return drm_add_modes_noedid(connector, 800, 600);
+	int count;
+
+	count = drm_add_modes_noedid(connector,
+				     connector->dev->mode_config.max_width,
+				     connector->dev->mode_config.max_height);
+	drm_set_preferred_mode(connector, 1024, 768);
+
+	return count;
 }
 
 static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 50b988f..99397ac 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -54,6 +54,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 
 const struct drm_mode_config_funcs hibmc_mode_funcs = {
+	.mode_valid = drm_vram_helper_mode_valid,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 	.fb_create = drm_gem_fb_create,
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index bdcf9c6..f31068d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -777,7 +777,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
 	int ret;
 
 	/* associate the bridge to dsi encoder */
-	ret = drm_bridge_attach(encoder, bridge, NULL);
+	ret = drm_bridge_attach(encoder, bridge, NULL, 0);
 	if (ret) {
 		DRM_ERROR("failed to attach external bridge\n");
 		return ret;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a63790d..c333220 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1356,10 +1356,16 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
 
 /* DRM bridge functions */
 
-static int tda998x_bridge_attach(struct drm_bridge *bridge)
+static int tda998x_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
 {
 	struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	return tda998x_connector_init(priv, bridge->dev);
 }
 
@@ -2022,7 +2028,7 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
 	if (ret)
 		goto err_encoder;
 
-	ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
+	ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
 	if (ret)
 		goto err_bridge;
 
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 907c447..9afa5c4 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -42,16 +42,9 @@
 
 	  If "M" is selected, the module will be called i915.
 
-config DRM_I915_ALPHA_SUPPORT
-	bool "Enable alpha quality support for new Intel hardware by default"
-	depends on DRM_I915
-	help
-	  This option is deprecated. Use DRM_I915_FORCE_PROBE option instead.
-
 config DRM_I915_FORCE_PROBE
 	string "Force probe driver for selected new Intel hardware"
 	depends on DRM_I915
-	default "*" if DRM_I915_ALPHA_SUPPORT
 	help
 	  This is the default value for the i915.force_probe module
 	  parameter. Using the module parameter overrides this option.
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index c280b6a..0bfd276 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -20,6 +20,9 @@
 	  check the health of the GPU and undertake regular house-keeping of
 	  internal driver state.
 
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
 	  May be 0 to disable heartbeats and therefore disable automatic GPU
 	  hang detection.
 
@@ -33,11 +36,18 @@
 	  expires, the HW will be reset to allow the more important context
 	  to execute.
 
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
 	  May be 0 to disable the timeout.
 
-config DRM_I915_SPIN_REQUEST
-	int "Busywait for request completion (us)"
-	default 5 # microseconds
+	  The compiled in default may get overridden at driver probe time on
+	  certain platforms and certain engines which will be reflected in the
+	  sysfs control.
+
+config DRM_I915_MAX_REQUEST_BUSYWAIT
+	int "Busywait for request completion limit (ns)"
+	default 8000 # nanoseconds
 	help
 	  Before sleeping waiting for a request (GPU operation) to complete,
 	  we may spend some time polling for its completion. As the IRQ may
@@ -45,6 +55,9 @@
 	  check if the request will complete in the time it would have taken
 	  us to enable the interrupt.
 
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/max_busywait_duration_ns
+
 	  May be 0 to disable the initial spin. In practice, we estimate
 	  the cost of enabling the interrupt (if currently disabled) to be
 	  a few microseconds.
@@ -60,6 +73,9 @@
 	  that the reset itself may take longer and so be more disruptive to
 	  interactive or low latency workloads.
 
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/stop_timeout_ms
+
 config DRM_I915_TIMESLICE_DURATION
 	int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
 	default 1 # milliseconds
@@ -73,4 +89,7 @@
 	  is scheduled for execution for the timeslice duration, before
 	  switching to the next context.
 
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/timeslice_duration_ms
+
 	  May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a1f2411..9f887a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,15 +46,16 @@
 	  i915_switcheroo.o \
 	  i915_sysfs.o \
 	  i915_utils.o \
-	  intel_csr.o \
 	  intel_device_info.o \
+	  intel_dram.o \
 	  intel_memory_region.o \
 	  intel_pch.o \
 	  intel_pm.o \
 	  intel_runtime_pm.o \
 	  intel_sideband.o \
 	  intel_uncore.o \
-	  intel_wakeref.o
+	  intel_wakeref.o \
+	  vlv_suspend.o
 
 # core library code
 i915-y += \
@@ -66,7 +67,11 @@
 	i915_user_extensions.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += \
+	i915_debugfs.o \
+	i915_debugfs_params.o \
+	display/intel_display_debugfs.o \
+	display/intel_pipe_crc.o
 i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
 
 # "Graphics Technology" (aka we talk to the gpu)
@@ -75,9 +80,12 @@
 	gt/debugfs_gt.o \
 	gt/debugfs_gt_pm.o \
 	gt/gen6_ppgtt.o \
+	gt/gen7_renderclear.o \
 	gt/gen8_ppgtt.o \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
+	gt/intel_context_param.o \
+	gt/intel_context_sseu.o \
 	gt/intel_engine_cs.o \
 	gt/intel_engine_heartbeat.o \
 	gt/intel_engine_pm.o \
@@ -102,7 +110,8 @@
 	gt/intel_rps.o \
 	gt/intel_sseu.o \
 	gt/intel_timeline.o \
-	gt/intel_workarounds.o
+	gt/intel_workarounds.o \
+	gt/sysfs_engines.o
 # autogenerated null render state
 gt-y += \
 	gt/gen6_renderstate.o \
@@ -179,6 +188,7 @@
 	display/intel_color.o \
 	display/intel_combo_phy.o \
 	display/intel_connector.o \
+	display/intel_csr.o \
 	display/intel_display.o \
 	display/intel_display_power.o \
 	display/intel_dpio_phy.o \
@@ -187,6 +197,7 @@
 	display/intel_fbc.o \
 	display/intel_fifo_underrun.o \
 	display/intel_frontbuffer.o \
+	display/intel_global_state.o \
 	display/intel_hdcp.o \
 	display/intel_hotplug.o \
 	display/intel_lpe_audio.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index f8e8821..17cee6f 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -39,14 +39,14 @@
 static inline int header_credits_available(struct drm_i915_private *dev_priv,
 					   enum transcoder dsi_trans)
 {
-	return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+	return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
 		>> FREE_HEADER_CREDIT_SHIFT;
 }
 
 static inline int payload_credits_available(struct drm_i915_private *dev_priv,
 					    enum transcoder dsi_trans)
 {
-	return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+	return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
 		>> FREE_PLOAD_CREDIT_SHIFT;
 }
 
@@ -55,7 +55,7 @@ static void wait_for_header_credits(struct drm_i915_private *dev_priv,
 {
 	if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
 			MAX_HEADER_CREDIT, 100))
-		DRM_ERROR("DSI header credits not released\n");
+		drm_err(&dev_priv->drm, "DSI header credits not released\n");
 }
 
 static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
@@ -63,7 +63,7 @@ static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
 {
 	if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
 			MAX_PLOAD_CREDIT, 100))
-		DRM_ERROR("DSI payload credits not released\n");
+		drm_err(&dev_priv->drm, "DSI payload credits not released\n");
 }
 
 static enum transcoder dsi_port_to_transcoder(enum port port)
@@ -97,7 +97,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
 		dsi->channel = 0;
 		ret = mipi_dsi_dcs_nop(dsi);
 		if (ret < 0)
-			DRM_ERROR("error sending DCS NOP command\n");
+			drm_err(&dev_priv->drm,
+				"error sending DCS NOP command\n");
 	}
 
 	/* wait for header credits to be released */
@@ -109,9 +110,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
 	/* wait for LP TX in progress bit to be cleared */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+		if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
 				  LPTX_IN_PROGRESS), 20))
-			DRM_ERROR("LPTX bit not cleared\n");
+			drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
 	}
 }
 
@@ -129,14 +130,15 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
 
 		free_credits = payload_credits_available(dev_priv, dsi_trans);
 		if (free_credits < 1) {
-			DRM_ERROR("Payload credit not available\n");
+			drm_err(&dev_priv->drm,
+				"Payload credit not available\n");
 			return false;
 		}
 
 		for (j = 0; j < min_t(u32, len - i, 4); j++)
 			tmp |= *data++ << 8 * j;
 
-		I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+		intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp);
 	}
 
 	return true;
@@ -154,11 +156,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
 	/* check if header credit available */
 	free_credits = header_credits_available(dev_priv, dsi_trans);
 	if (free_credits < 1) {
-		DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+		drm_err(&dev_priv->drm,
+			"send pkt header failed, not enough hdr credits\n");
 		return -1;
 	}
 
-	tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+	tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
 
 	if (pkt.payload)
 		tmp |= PAYLOAD_PRESENT;
@@ -175,7 +178,7 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
 	tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
 	tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
 	tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
-	I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+	intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
 
 	return 0;
 }
@@ -212,53 +215,55 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
 		 * Program voltage swing and pre-emphasis level values as per
 		 * table in BSPEC under DDI buffer programing
 		 */
-		tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
 		tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
 		tmp |= SCALING_MODE_SEL(0x2);
 		tmp |= TAP2_DISABLE | TAP3_DISABLE;
 		tmp |= RTERM_SELECT(0x6);
-		I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
 
-		tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
 		tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
 		tmp |= SCALING_MODE_SEL(0x2);
 		tmp |= TAP2_DISABLE | TAP3_DISABLE;
 		tmp |= RTERM_SELECT(0x6);
-		I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
 
-		tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
 		tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
 			 RCOMP_SCALAR_MASK);
 		tmp |= SWING_SEL_UPPER(0x2);
 		tmp |= SWING_SEL_LOWER(0x2);
 		tmp |= RCOMP_SCALAR(0x98);
-		I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
 
-		tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
 		tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
 			 RCOMP_SCALAR_MASK);
 		tmp |= SWING_SEL_UPPER(0x2);
 		tmp |= SWING_SEL_LOWER(0x2);
 		tmp |= RCOMP_SCALAR(0x98);
-		I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
 
-		tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
 		tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 			 CURSOR_COEFF_MASK);
 		tmp |= POST_CURSOR_1(0x0);
 		tmp |= POST_CURSOR_2(0x0);
 		tmp |= CURSOR_COEFF(0x3f);
-		I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
 
 		for (lane = 0; lane <= 3; lane++) {
 			/* Bspec: must not use GRP register for write */
-			tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+			tmp = intel_de_read(dev_priv,
+					    ICL_PORT_TX_DW4_LN(lane, phy));
 			tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 				 CURSOR_COEFF_MASK);
 			tmp |= POST_CURSOR_1(0x0);
 			tmp |= POST_CURSOR_2(0x0);
 			tmp |= CURSOR_COEFF(0x3f);
-			I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+			intel_de_write(dev_priv,
+				       ICL_PORT_TX_DW4_LN(lane, phy), tmp);
 		}
 	}
 }
@@ -270,7 +275,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	u32 dss_ctl1;
 
-	dss_ctl1 = I915_READ(DSS_CTL1);
+	dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
 	dss_ctl1 |= SPLITTER_ENABLE;
 	dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
 	dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -286,20 +291,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
 		dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
 
 		if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
-			DRM_ERROR("DL buffer depth exceed max value\n");
+			drm_err(&dev_priv->drm,
+				"DL buffer depth exceed max value\n");
 
 		dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
 		dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
-		dss_ctl2 = I915_READ(DSS_CTL2);
+		dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
 		dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
 		dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
-		I915_WRITE(DSS_CTL2, dss_ctl2);
+		intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
 	} else {
 		/* Interleave */
 		dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
 	}
 
-	I915_WRITE(DSS_CTL1, dss_ctl1);
+	intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
 }
 
 /* aka DSI 8X clock */
@@ -330,15 +336,15 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
 	esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
-			   esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
-		POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+		intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
+			       esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+		intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
 	}
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
-			   esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
-		POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+		intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
+			       esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+		intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
 	}
 }
 
@@ -348,7 +354,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
 	enum port port;
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		WARN_ON(intel_dsi->io_wakeref[port]);
+		drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
 		intel_dsi->io_wakeref[port] =
 			intel_display_power_get(dev_priv,
 						port == PORT_A ?
@@ -365,9 +371,9 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
 	u32 tmp;
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+		tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
 		tmp |= COMBO_PHY_MODE_DSI;
-		I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+		intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
 	}
 
 	get_dsi_io_power_domains(dev_priv, intel_dsi);
@@ -394,40 +400,46 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
 
 	/* Step 4b(i) set loadgen select for transmit and aux lanes */
 	for_each_dsi_phy(phy, intel_dsi->phys) {
-		tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
 		tmp &= ~LOADGEN_SELECT;
-		I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
 		for (lane = 0; lane <= 3; lane++) {
-			tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+			tmp = intel_de_read(dev_priv,
+					    ICL_PORT_TX_DW4_LN(lane, phy));
 			tmp &= ~LOADGEN_SELECT;
 			if (lane != 2)
 				tmp |= LOADGEN_SELECT;
-			I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+			intel_de_write(dev_priv,
+				       ICL_PORT_TX_DW4_LN(lane, phy), tmp);
 		}
 	}
 
 	/* Step 4b(ii) set latency optimization for transmit and aux lanes */
 	for_each_dsi_phy(phy, intel_dsi->phys) {
-		tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
 		tmp &= ~FRC_LATENCY_OPTIM_MASK;
 		tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
-		I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
-		tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+		intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
 		tmp &= ~FRC_LATENCY_OPTIM_MASK;
 		tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
-		I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
 
 		/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
 		if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
-			tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+			tmp = intel_de_read(dev_priv,
+					    ICL_PORT_PCS_DW1_AUX(phy));
 			tmp &= ~LATENCY_OPTIM_MASK;
 			tmp |= LATENCY_OPTIM_VAL(0);
-			I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+			intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+				       tmp);
 
-			tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+			tmp = intel_de_read(dev_priv,
+					    ICL_PORT_PCS_DW1_LN0(phy));
 			tmp &= ~LATENCY_OPTIM_MASK;
 			tmp |= LATENCY_OPTIM_VAL(0x1);
-			I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+			intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
+				       tmp);
 		}
 	}
 
@@ -442,12 +454,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
 
 	/* clear common keeper enable bit */
 	for_each_dsi_phy(phy, intel_dsi->phys) {
-		tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
 		tmp &= ~COMMON_KEEPER_EN;
-		I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
-		tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+		intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+		tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
 		tmp &= ~COMMON_KEEPER_EN;
-		I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
 	}
 
 	/*
@@ -456,19 +468,19 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
 	 * as part of lane phy sequence configuration
 	 */
 	for_each_dsi_phy(phy, intel_dsi->phys) {
-		tmp = I915_READ(ICL_PORT_CL_DW5(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
 		tmp |= SUS_CLOCK_CONFIG;
-		I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
 	}
 
 	/* Clear training enable to change swing values */
 	for_each_dsi_phy(phy, intel_dsi->phys) {
-		tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
 		tmp &= ~TX_TRAINING_EN;
-		I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
-		tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+		intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
 		tmp &= ~TX_TRAINING_EN;
-		I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
 	}
 
 	/* Program swing and de-emphasis */
@@ -476,12 +488,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
 
 	/* Set training enable to trigger update */
 	for_each_dsi_phy(phy, intel_dsi->phys) {
-		tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
 		tmp |= TX_TRAINING_EN;
-		I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
-		tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+		intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+		tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
 		tmp |= TX_TRAINING_EN;
-		I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
 	}
 }
 
@@ -493,14 +505,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
 	enum port port;
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(DDI_BUF_CTL(port));
+		tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
 		tmp |= DDI_BUF_CTL_ENABLE;
-		I915_WRITE(DDI_BUF_CTL(port), tmp);
+		intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
 
-		if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+		if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
 				  DDI_BUF_IS_IDLE),
 				  500))
-			DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+			drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
+				port_name(port));
 	}
 }
 
@@ -516,28 +529,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
 
 	/* Program T-INIT master registers */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+		tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
 		tmp &= ~MASTER_INIT_TIMER_MASK;
 		tmp |= intel_dsi->init_count;
-		I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+		intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
 	}
 
 	/* Program DPHY clock lanes timings */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+		intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
+			       intel_dsi->dphy_reg);
 
 		/* shadow register inside display core */
-		I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+		intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+			       intel_dsi->dphy_reg);
 	}
 
 	/* Program DPHY data lanes timings */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
-			   intel_dsi->dphy_data_lane_reg);
+		intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
+			       intel_dsi->dphy_data_lane_reg);
 
 		/* shadow register inside display core */
-		I915_WRITE(DSI_DATA_TIMING_PARAM(port),
-			   intel_dsi->dphy_data_lane_reg);
+		intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+			       intel_dsi->dphy_data_lane_reg);
 	}
 
 	/*
@@ -549,25 +564,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
 	if (IS_GEN(dev_priv, 11)) {
 		if (afe_clk(encoder, crtc_state) <= 800000) {
 			for_each_dsi_port(port, intel_dsi->ports) {
-				tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+				tmp = intel_de_read(dev_priv,
+						    DPHY_TA_TIMING_PARAM(port));
 				tmp &= ~TA_SURE_MASK;
 				tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
-				I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+				intel_de_write(dev_priv,
+					       DPHY_TA_TIMING_PARAM(port),
+					       tmp);
 
 				/* shadow register inside display core */
-				tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+				tmp = intel_de_read(dev_priv,
+						    DSI_TA_TIMING_PARAM(port));
 				tmp &= ~TA_SURE_MASK;
 				tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
-				I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+				intel_de_write(dev_priv,
+					       DSI_TA_TIMING_PARAM(port), tmp);
 			}
 		}
 	}
 
 	if (IS_ELKHARTLAKE(dev_priv)) {
 		for_each_dsi_phy(phy, intel_dsi->phys) {
-			tmp = I915_READ(ICL_DPHY_CHKN(phy));
+			tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
 			tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
-			I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
+			intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
 		}
 	}
 }
@@ -579,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
 	u32 tmp;
 	enum phy phy;
 
-	mutex_lock(&dev_priv->dpll_lock);
-	tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+	mutex_lock(&dev_priv->dpll.lock);
+	tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
 	for_each_dsi_phy(phy, intel_dsi->phys)
 		tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
 
-	I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
-	mutex_unlock(&dev_priv->dpll_lock);
+	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -595,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
 	u32 tmp;
 	enum phy phy;
 
-	mutex_lock(&dev_priv->dpll_lock);
-	tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+	mutex_lock(&dev_priv->dpll.lock);
+	tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
 	for_each_dsi_phy(phy, intel_dsi->phys)
 		tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
 
-	I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
-	mutex_unlock(&dev_priv->dpll_lock);
+	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static void gen11_dsi_map_pll(struct intel_encoder *encoder,
@@ -613,14 +633,14 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
 	enum phy phy;
 	u32 val;
 
-	mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&dev_priv->dpll.lock);
 
-	val = I915_READ(ICL_DPCLKA_CFGCR0);
+	val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
 	for_each_dsi_phy(phy, intel_dsi->phys) {
 		val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 		val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
 	}
-	I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 
 	for_each_dsi_phy(phy, intel_dsi->phys) {
 		if (INTEL_GEN(dev_priv) >= 12)
@@ -628,11 +648,11 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
 		else
 			val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
 	}
-	I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 
-	POSTING_READ(ICL_DPCLKA_CFGCR0);
+	intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
 
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static void
@@ -649,7 +669,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+		tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
 
 		if (intel_dsi->eotp_pkt)
 			tmp &= ~EOTP_DISABLED;
@@ -726,16 +746,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 			}
 		}
 
-		I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+		intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
 	}
 
 	/* enable port sync mode if dual link */
 	if (intel_dsi->dual_link) {
 		for_each_dsi_port(port, intel_dsi->ports) {
 			dsi_trans = dsi_port_to_transcoder(port);
-			tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+			tmp = intel_de_read(dev_priv,
+					    TRANS_DDI_FUNC_CTL2(dsi_trans));
 			tmp |= PORT_SYNC_MODE_ENABLE;
-			I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+			intel_de_write(dev_priv,
+				       TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
 		}
 
 		/* configure stream splitting */
@@ -746,7 +768,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 		dsi_trans = dsi_port_to_transcoder(port);
 
 		/* select data lane width */
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
 		tmp &= ~DDI_PORT_WIDTH_MASK;
 		tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
 
@@ -772,15 +794,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
 
 		/* enable DDI buffer */
 		tmp |= TRANS_DDI_FUNC_ENABLE;
-		I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+		intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
 	}
 
 	/* wait for link ready */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
-				LINK_READY), 2500))
-			DRM_ERROR("DSI link not ready\n");
+		if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+				 LINK_READY), 2500))
+			drm_err(&dev_priv->drm, "DSI link not ready\n");
 	}
 }
 
@@ -836,17 +858,18 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 
 	/* minimum hactive as per bspec: 256 pixels */
 	if (adjusted_mode->crtc_hdisplay < 256)
-		DRM_ERROR("hactive is less then 256 pixels\n");
+		drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
 
 	/* if RGB666 format, then hactive must be multiple of 4 pixels */
 	if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
-		DRM_ERROR("hactive pixels are not multiple of 4\n");
+		drm_err(&dev_priv->drm,
+			"hactive pixels are not multiple of 4\n");
 
 	/* program TRANS_HTOTAL register */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		I915_WRITE(HTOTAL(dsi_trans),
-			   (hactive - 1) | ((htotal - 1) << 16));
+		intel_de_write(dev_priv, HTOTAL(dsi_trans),
+			       (hactive - 1) | ((htotal - 1) << 16));
 	}
 
 	/* TRANS_HSYNC register to be programmed only for video mode */
@@ -855,11 +878,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 		    VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
 			/* BSPEC: hsync size should be atleast 16 pixels */
 			if (hsync_size < 16)
-				DRM_ERROR("hsync size < 16 pixels\n");
+				drm_err(&dev_priv->drm,
+					"hsync size < 16 pixels\n");
 		}
 
 		if (hback_porch < 16)
-			DRM_ERROR("hback porch < 16 pixels\n");
+			drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
 
 		if (intel_dsi->dual_link) {
 			hsync_start /= 2;
@@ -868,8 +892,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 
 		for_each_dsi_port(port, intel_dsi->ports) {
 			dsi_trans = dsi_port_to_transcoder(port);
-			I915_WRITE(HSYNC(dsi_trans),
-				   (hsync_start - 1) | ((hsync_end - 1) << 16));
+			intel_de_write(dev_priv, HSYNC(dsi_trans),
+				       (hsync_start - 1) | ((hsync_end - 1) << 16));
 		}
 	}
 
@@ -882,21 +906,21 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 		 * struct drm_display_mode.
 		 * For interlace mode: program required pixel minus 2
 		 */
-		I915_WRITE(VTOTAL(dsi_trans),
-			   (vactive - 1) | ((vtotal - 1) << 16));
+		intel_de_write(dev_priv, VTOTAL(dsi_trans),
+			       (vactive - 1) | ((vtotal - 1) << 16));
 	}
 
 	if (vsync_end < vsync_start || vsync_end > vtotal)
-		DRM_ERROR("Invalid vsync_end value\n");
+		drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
 
 	if (vsync_start < vactive)
-		DRM_ERROR("vsync_start less than vactive\n");
+		drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
 
 	/* program TRANS_VSYNC register */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		I915_WRITE(VSYNC(dsi_trans),
-			   (vsync_start - 1) | ((vsync_end - 1) << 16));
+		intel_de_write(dev_priv, VSYNC(dsi_trans),
+			       (vsync_start - 1) | ((vsync_end - 1) << 16));
 	}
 
 	/*
@@ -907,15 +931,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
 	 */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+		intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
 	}
 
 	/* program TRANS_VBLANK register, should be same as vtotal programmed */
 	if (INTEL_GEN(dev_priv) >= 12) {
 		for_each_dsi_port(port, intel_dsi->ports) {
 			dsi_trans = dsi_port_to_transcoder(port);
-			I915_WRITE(VBLANK(dsi_trans),
-				   (vactive - 1) | ((vtotal - 1) << 16));
+			intel_de_write(dev_priv, VBLANK(dsi_trans),
+				       (vactive - 1) | ((vtotal - 1) << 16));
 		}
 	}
 }
@@ -930,14 +954,15 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
 
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		tmp = I915_READ(PIPECONF(dsi_trans));
+		tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
 		tmp |= PIPECONF_ENABLE;
-		I915_WRITE(PIPECONF(dsi_trans), tmp);
+		intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
 
 		/* wait for transcoder to be enabled */
 		if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
 					  I965_PIPECONF_ACTIVE, 10))
-			DRM_ERROR("DSI transcoder not enabled\n");
+			drm_err(&dev_priv->drm,
+				"DSI transcoder not enabled\n");
 	}
 }
 
@@ -968,26 +993,26 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
 		dsi_trans = dsi_port_to_transcoder(port);
 
 		/* program hst_tx_timeout */
-		tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+		tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
 		tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
 		tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
-		I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+		intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
 
 		/* FIXME: DSI_CALIB_TO */
 
 		/* program lp_rx_host timeout */
-		tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+		tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
 		tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
 		tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
-		I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+		intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
 
 		/* FIXME: DSI_PWAIT_TO */
 
 		/* program turn around timeout */
-		tmp = I915_READ(DSI_TA_TO(dsi_trans));
+		tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
 		tmp &= ~TA_TIMEOUT_VALUE_MASK;
 		tmp |= TA_TIMEOUT_VALUE(ta_timeout);
-		I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+		intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
 	}
 }
 
@@ -1041,14 +1066,15 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
 		 * FIXME: This uses the number of DW's currently in the payload
 		 * receive queue. This is probably not what we want here.
 		 */
-		tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+		tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
 		tmp &= NUMBER_RX_PLOAD_DW_MASK;
 		/* multiply "Number Rx Payload DW" by 4 to get max value */
 		tmp = tmp * 4;
 		dsi = intel_dsi->dsi_hosts[port]->device;
 		ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
 		if (ret < 0)
-			DRM_ERROR("error setting max return pkt size%d\n", tmp);
+			drm_err(&dev_priv->drm,
+				"error setting max return pkt size%d\n", tmp);
 	}
 
 	/* panel power on related mipi dsi vbt sequences */
@@ -1077,8 +1103,6 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
 				 const struct intel_crtc_state *pipe_config,
 				 const struct drm_connector_state *conn_state)
 {
-	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
 	/* step3b */
 	gen11_dsi_map_pll(encoder, pipe_config);
 
@@ -1092,13 +1116,24 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
 
 	/* step6c: configure transcoder timings */
 	gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+}
+
+static void gen11_dsi_enable(struct intel_encoder *encoder,
+			     const struct intel_crtc_state *crtc_state,
+			     const struct drm_connector_state *conn_state)
+{
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+	WARN_ON(crtc_state->has_pch_encoder);
 
 	/* step6d: enable dsi transcoder */
 	gen11_dsi_enable_transcoder(encoder);
 
 	/* step7: enable backlight */
-	intel_panel_enable_backlight(pipe_config, conn_state);
+	intel_panel_enable_backlight(crtc_state, conn_state);
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+
+	intel_crtc_vblank_on(crtc_state);
 }
 
 static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
@@ -1113,14 +1148,15 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
 		dsi_trans = dsi_port_to_transcoder(port);
 
 		/* disable transcoder */
-		tmp = I915_READ(PIPECONF(dsi_trans));
+		tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
 		tmp &= ~PIPECONF_ENABLE;
-		I915_WRITE(PIPECONF(dsi_trans), tmp);
+		intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
 
 		/* wait for transcoder to be disabled */
 		if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
 					    I965_PIPECONF_ACTIVE, 50))
-			DRM_ERROR("DSI trancoder not disabled\n");
+			drm_err(&dev_priv->drm,
+				"DSI trancoder not disabled\n");
 	}
 }
 
@@ -1147,32 +1183,34 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
 	/* put dsi link in ULPS */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+		tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
 		tmp |= LINK_ENTER_ULPS;
 		tmp &= ~LINK_ULPS_TYPE_LP11;
-		I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+		intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
 
-		if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
-				LINK_IN_ULPS),
+		if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+				 LINK_IN_ULPS),
 				10))
-			DRM_ERROR("DSI link not in ULPS\n");
+			drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
 	}
 
 	/* disable ddi function */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
 		tmp &= ~TRANS_DDI_FUNC_ENABLE;
-		I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+		intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
 	}
 
 	/* disable port sync mode if dual link */
 	if (intel_dsi->dual_link) {
 		for_each_dsi_port(port, intel_dsi->ports) {
 			dsi_trans = dsi_port_to_transcoder(port);
-			tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+			tmp = intel_de_read(dev_priv,
+					    TRANS_DDI_FUNC_CTL2(dsi_trans));
 			tmp &= ~PORT_SYNC_MODE_ENABLE;
-			I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+			intel_de_write(dev_priv,
+				       TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
 		}
 	}
 }
@@ -1186,15 +1224,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
 
 	gen11_dsi_ungate_clocks(encoder);
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(DDI_BUF_CTL(port));
+		tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
 		tmp &= ~DDI_BUF_CTL_ENABLE;
-		I915_WRITE(DDI_BUF_CTL(port), tmp);
+		intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
 
-		if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+		if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
 				 DDI_BUF_IS_IDLE),
 				 8))
-			DRM_ERROR("DDI port:%c buffer not idle\n",
-				  port_name(port));
+			drm_err(&dev_priv->drm,
+				"DDI port:%c buffer not idle\n",
+				port_name(port));
 	}
 	gen11_dsi_gate_clocks(encoder);
 }
@@ -1219,9 +1258,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
 
 	/* set mode to DDI */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+		tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
 		tmp &= ~COMBO_PHY_MODE_DSI;
-		I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+		intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
 	}
 }
 
@@ -1311,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
 static void gen11_dsi_get_config(struct intel_encoder *encoder,
 				 struct intel_crtc_state *pipe_config)
 {
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
 	intel_dsc_get_config(encoder, pipe_config);
 
 	/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
-	pipe_config->port_clock =
-		cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+	pipe_config->port_clock = intel_dpll_get_freq(i915,
+						      pipe_config->shared_dpll);
 
 	pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
 	if (intel_dsi->dual_link)
@@ -1357,11 +1396,13 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
 		return ret;
 
 	/* DSI specific sanity checks on the common code */
-	WARN_ON(vdsc_cfg->vbr_enable);
-	WARN_ON(vdsc_cfg->simple_422);
-	WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
-	WARN_ON(vdsc_cfg->slice_height < 8);
-	WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+	drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
+	drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
+	drm_WARN_ON(&dev_priv->drm,
+		    vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+	drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
+	drm_WARN_ON(&dev_priv->drm,
+		    vdsc_cfg->pic_height % vdsc_cfg->slice_height);
 
 	ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
 	if (ret)
@@ -1443,7 +1484,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
 
 	for_each_dsi_port(port, intel_dsi->ports) {
 		dsi_trans = dsi_port_to_transcoder(port);
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+		tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
 		case TRANS_DDI_EDP_INPUT_A_ON:
 			*pipe = PIPE_A;
@@ -1458,11 +1499,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
 			*pipe = PIPE_D;
 			break;
 		default:
-			DRM_ERROR("Invalid PIPE input\n");
+			drm_err(&dev_priv->drm, "Invalid PIPE input\n");
 			goto out;
 		}
 
-		tmp = I915_READ(PIPECONF(dsi_trans));
+		tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
 		ret = tmp & PIPECONF_ENABLE;
 	}
 out:
@@ -1582,7 +1623,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
 	 */
 	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
 	if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
-		DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+		drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
+			    prepare_cnt);
 		prepare_cnt = ICL_PREPARE_CNT_MAX;
 	}
 
@@ -1590,28 +1632,33 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
 	clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
 				    ths_prepare_ns, tlpx_ns);
 	if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
-		DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+		drm_dbg_kms(&dev_priv->drm,
+			    "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
 		clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
 	}
 
 	/* trail cnt in escape clocks*/
 	trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
 	if (trail_cnt > ICL_TRAIL_CNT_MAX) {
-		DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+		drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
+			    trail_cnt);
 		trail_cnt = ICL_TRAIL_CNT_MAX;
 	}
 
 	/* tclk pre count in escape clocks */
 	tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
 	if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
-		DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+		drm_dbg_kms(&dev_priv->drm,
+			    "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
 		tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
 	}
 
 	/* tclk post count in escape clocks */
 	tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
 	if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
-		DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+		drm_dbg_kms(&dev_priv->drm,
+			    "tclk_post_cnt out of range (%d)\n",
+			    tclk_post_cnt);
 		tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
 	}
 
@@ -1619,14 +1666,17 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
 	hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
 				   ths_prepare_ns, tlpx_ns);
 	if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
-		DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+		drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
+			    hs_zero_cnt);
 		hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
 	}
 
 	/* hs exit zero cnt in escape clocks */
 	exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
 	if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
-		DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+		drm_dbg_kms(&dev_priv->drm,
+			    "exit_zero_cnt out of range (%d)\n",
+			    exit_zero_cnt);
 		exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
 	}
 
@@ -1668,9 +1718,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
 
 	connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
 
-	connector->base.display_info.panel_orientation =
-			intel_dsi_get_panel_orientation(connector);
-	drm_connector_init_panel_orientation_property(&connector->base,
+	drm_connector_set_panel_orientation_with_quirk(&connector->base,
+				intel_dsi_get_panel_orientation(connector),
 				connector->panel.fixed_mode->hdisplay,
 				connector->panel.fixed_mode->vdisplay);
 }
@@ -1708,6 +1757,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 
 	encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
 	encoder->pre_enable = gen11_dsi_pre_enable;
+	encoder->enable = gen11_dsi_enable;
 	encoder->disable = gen11_dsi_disable;
 	encoder->post_disable = gen11_dsi_post_disable;
 	encoder->port = port;
@@ -1738,7 +1788,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev->mode_config.mutex);
 
 	if (!fixed_mode) {
-		DRM_ERROR("DSI fixed mode info missing\n");
+		drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
 		goto err;
 	}
 
@@ -1764,7 +1814,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
 	}
 
 	if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
-		DRM_DEBUG_KMS("no device found\n");
+		drm_dbg_kms(&dev_priv->drm, "no device found\n");
 		goto err;
 	}
 
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 3456d33..e21fb14 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -10,6 +10,7 @@
 
 #include "i915_drv.h"
 #include "intel_acpi.h"
+#include "intel_display_types.h"
 
 #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
 #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
@@ -156,3 +157,91 @@ void intel_register_dsm_handler(void)
 void intel_unregister_dsm_handler(void)
 {
 }
+
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT		0
+#define ACPI_DISPLAY_INDEX_MASK			(0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT	4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK	(0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT			8
+#define ACPI_DISPLAY_TYPE_MASK			(0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER			(0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA			(1 << 8)
+#define ACPI_DISPLAY_TYPE_TV			(2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL	(3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL	(4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT		12
+#define ACPI_VENDOR_SPECIFIC_MASK		(0xf << 12)
+#define ACPI_BIOS_CAN_DETECT			(1 << 16)
+#define ACPI_DEPENDS_ON_VGA			(1 << 17)
+#define ACPI_PIPE_ID_SHIFT			18
+#define ACPI_PIPE_ID_MASK			(7 << 18)
+#define ACPI_DEVICE_ID_SCHEME			(1ULL << 31)
+
+static u32 acpi_display_type(struct intel_connector *connector)
+{
+	u32 display_type;
+
+	switch (connector->base.connector_type) {
+	case DRM_MODE_CONNECTOR_VGA:
+	case DRM_MODE_CONNECTOR_DVIA:
+		display_type = ACPI_DISPLAY_TYPE_VGA;
+		break;
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_Component:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+	case DRM_MODE_CONNECTOR_TV:
+		display_type = ACPI_DISPLAY_TYPE_TV;
+		break;
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_DisplayPort:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+	case DRM_MODE_CONNECTOR_eDP:
+	case DRM_MODE_CONNECTOR_DSI:
+		display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+		break;
+	case DRM_MODE_CONNECTOR_Unknown:
+	case DRM_MODE_CONNECTOR_VIRTUAL:
+		display_type = ACPI_DISPLAY_TYPE_OTHER;
+		break;
+	default:
+		MISSING_CASE(connector->base.connector_type);
+		display_type = ACPI_DISPLAY_TYPE_OTHER;
+		break;
+	}
+
+	return display_type;
+}
+
+void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *drm_dev = &dev_priv->drm;
+	struct intel_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	u8 display_index[16] = {};
+
+	/* Populate the ACPI IDs for all connectors for a given drm_device */
+	drm_connector_list_iter_begin(drm_dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		u32 device_id, type;
+
+		device_id = acpi_display_type(connector);
+
+		/* Use display type specific display index. */
+		type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+			>> ACPI_DISPLAY_TYPE_SHIFT;
+		device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+		connector->acpi_device_id = device_id;
+	}
+	drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 1c576b3..e8b0686 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -6,12 +6,17 @@
 #ifndef __INTEL_ACPI_H__
 #define __INTEL_ACPI_H__
 
+struct drm_i915_private;
+
 #ifdef CONFIG_ACPI
 void intel_register_dsm_handler(void);
 void intel_unregister_dsm_handler(void);
+void intel_acpi_device_id_update(struct drm_i915_private *i915);
 #else
 static inline void intel_register_dsm_handler(void) { return; }
 static inline void intel_unregister_dsm_handler(void) { return; }
+static inline
+void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
 #endif /* CONFIG_ACPI */
 
 #endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c362eec..d043057 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,7 +35,9 @@
 #include <drm/drm_plane_helper.h>
 
 #include "intel_atomic.h"
+#include "intel_cdclk.h"
 #include "intel_display_types.h"
+#include "intel_global_state.h"
 #include "intel_hdcp.h"
 #include "intel_psr.h"
 #include "intel_sprite.h"
@@ -64,8 +66,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
 	else if (property == dev_priv->broadcast_rgb_property)
 		*val = intel_conn_state->broadcast_rgb;
 	else {
-		DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
-				 property->base.id, property->name);
+		drm_dbg_atomic(&dev_priv->drm,
+			       "Unknown property [PROP:%d:%s]\n",
+			       property->base.id, property->name);
 		return -EINVAL;
 	}
 
@@ -101,8 +104,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
 		return 0;
 	}
 
-	DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
-			 property->base.id, property->name);
+	drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+		       property->base.id, property->name);
 	return -EINVAL;
 }
 
@@ -178,6 +181,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
 
 /**
  * intel_connector_needs_modeset - check if connector needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ * @connector: the connector
  */
 bool
 intel_connector_needs_modeset(struct intel_atomic_state *state,
@@ -314,7 +319,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
 		}
 	}
 
-	if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+	if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+		     "Cannot find scaler for %s:%d\n", name, idx))
 		return;
 
 	/* set scaler mode */
@@ -357,8 +363,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
 		mode = SKL_PS_SCALER_MODE_DYN;
 	}
 
-	DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-		      intel_crtc->pipe, *scaler_id, name, idx);
+	drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+		    intel_crtc->pipe, *scaler_id, name, idx);
 	scaler_state->scalers[*scaler_id].mode = mode;
 }
 
@@ -409,8 +415,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 
 	/* fail if required scalers > available scalers */
 	if (num_scalers_need > intel_crtc->num_scalers){
-		DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
-			num_scalers_need, intel_crtc->num_scalers);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Too many scaling requests %d > %d\n",
+			    num_scalers_need, intel_crtc->num_scalers);
 		return -EINVAL;
 	}
 
@@ -455,8 +462,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 				plane = drm_plane_from_index(&dev_priv->drm, i);
 				state = drm_atomic_get_plane_state(drm_state, plane);
 				if (IS_ERR(state)) {
-					DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
-						plane->base.id);
+					drm_dbg_kms(&dev_priv->drm,
+						    "Failed to add [PLANE:%d] to drm_state\n",
+						    plane->base.id);
 					return PTR_ERR(state);
 				}
 			}
@@ -465,7 +473,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			idx = plane->base.id;
 
 			/* plane on different crtc cannot be a scaler user of this crtc */
-			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
+			if (drm_WARN_ON(&dev_priv->drm,
+					intel_plane->pipe != intel_crtc->pipe))
 				continue;
 
 			plane_state = intel_atomic_get_new_plane_state(intel_state,
@@ -494,18 +503,28 @@ intel_atomic_state_alloc(struct drm_device *dev)
 	return &state->base;
 }
 
+void intel_atomic_state_free(struct drm_atomic_state *_state)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(_state);
+
+	drm_atomic_state_default_release(&state->base);
+	kfree(state->global_objs);
+
+	i915_sw_fence_fini(&state->commit_ready);
+
+	kfree(state);
+}
+
 void intel_atomic_state_clear(struct drm_atomic_state *s)
 {
 	struct intel_atomic_state *state = to_intel_atomic_state(s);
+
 	drm_atomic_state_default_clear(&state->base);
+	intel_atomic_clear_global_state(state);
+
 	state->dpll_set = state->modeset = false;
 	state->global_state_changed = false;
 	state->active_pipes = 0;
-	memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
-	memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
-	memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
-	memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
-	state->cdclk.pipe = INVALID_PIPE;
 }
 
 struct intel_crtc_state *
@@ -520,7 +539,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
 	return to_intel_crtc_state(crtc_state);
 }
 
-int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
@@ -539,7 +558,7 @@ int intel_atomic_lock_global_state(struct intel_atomic_state *state)
 	return 0;
 }
 
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 74c749d..1114629 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -45,6 +45,7 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
 void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
 void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
 struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_free(struct drm_atomic_state *state);
 void intel_atomic_state_clear(struct drm_atomic_state *state);
 
 struct intel_crtc_state *
@@ -55,8 +56,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
 			       struct intel_crtc *intel_crtc,
 			       struct intel_crtc_state *crtc_state);
 
-int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state);
 
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state);
 
 #endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 3e97af6..457b258 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -37,6 +37,7 @@
 
 #include "i915_trace.h"
 #include "intel_atomic_plane.h"
+#include "intel_cdclk.h"
 #include "intel_display_types.h"
 #include "intel_pm.h"
 #include "intel_sprite.h"
@@ -132,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane,
 	kfree(plane_state);
 }
 
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+				    const struct intel_plane_state *plane_state)
+{
+	unsigned int src_w, src_h, dst_w, dst_h;
+	unsigned int pixel_rate = crtc_state->pixel_rate;
+
+	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+	dst_w = drm_rect_width(&plane_state->uapi.dst);
+	dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+	/* Downscaling limits the maximum pixel rate */
+	dst_w = min(src_w, dst_w);
+	dst_h = min(src_h, dst_h);
+
+	return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+				dst_w * dst_h);
+}
+
 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state)
 {
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int cpp;
+	unsigned int pixel_rate;
 
 	if (!plane_state->uapi.visible)
 		return 0;
 
+	pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
 	cpp = fb->format->cpp[0];
 
 	/*
@@ -152,45 +175,67 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
 	if (fb->format->is_yuv && fb->format->num_planes > 1)
 		cpp *= 4;
 
-	return cpp * crtc_state->pixel_rate;
+	return pixel_rate * cpp;
 }
 
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
-				struct intel_plane *plane)
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+			       struct intel_plane *plane,
+			       bool *need_cdclk_calc)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct intel_plane_state *plane_state =
 		intel_atomic_get_new_plane_state(state, plane);
 	struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
-	struct intel_crtc_state *crtc_state;
+	const struct intel_cdclk_state *cdclk_state;
+	const struct intel_crtc_state *old_crtc_state;
+	struct intel_crtc_state *new_crtc_state;
 
 	if (!plane_state->uapi.visible || !plane->min_cdclk)
-		return false;
+		return 0;
 
-	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
 
-	crtc_state->min_cdclk[plane->id] =
-		plane->min_cdclk(crtc_state, plane_state);
+	new_crtc_state->min_cdclk[plane->id] =
+		plane->min_cdclk(new_crtc_state, plane_state);
 
 	/*
-	 * Does the cdclk need to be bumbed up?
+	 * No need to check against the cdclk state if
+	 * the min cdclk for the plane doesn't increase.
 	 *
-	 * Note: we obviously need to be called before the new
-	 * cdclk frequency is calculated so state->cdclk.logical
-	 * hasn't been populated yet. Hence we look at the old
-	 * cdclk state under dev_priv->cdclk.logical. This is
-	 * safe as long we hold at least one crtc mutex (which
-	 * must be true since we have crtc_state).
+	 * Ie. we only ever increase the cdclk due to plane
+	 * requirements. This can reduce back and forth
+	 * display blinking due to constant cdclk changes.
 	 */
-	if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
-		DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
-			      plane->base.base.id, plane->base.name,
-			      crtc_state->min_cdclk[plane->id],
-			      dev_priv->cdclk.logical.cdclk);
-		return true;
-	}
+	if (new_crtc_state->min_cdclk[plane->id] <=
+	    old_crtc_state->min_cdclk[plane->id])
+		return 0;
 
-	return false;
+	cdclk_state = intel_atomic_get_cdclk_state(state);
+	if (IS_ERR(cdclk_state))
+		return PTR_ERR(cdclk_state);
+
+	/*
+	 * No need to recalculate the cdclk state if
+	 * the min cdclk for the pipe doesn't increase.
+	 *
+	 * Ie. we only ever increase the cdclk due to plane
+	 * requirements. This can reduce back and forth
+	 * display blinking due to constant cdclk changes.
+	 */
+	if (new_crtc_state->min_cdclk[plane->id] <=
+	    cdclk_state->min_cdclk[crtc->pipe])
+		return 0;
+
+	drm_dbg_kms(&dev_priv->drm,
+		    "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
+		    plane->base.base.id, plane->base.name,
+		    new_crtc_state->min_cdclk[plane->id],
+		    crtc->base.base.id, crtc->base.name,
+		    cdclk_state->min_cdclk[crtc->pipe]);
+	*need_cdclk_calc = true;
+
+	return 0;
 }
 
 static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -225,12 +270,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
 					struct intel_plane_state *new_plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
-	const struct drm_framebuffer *fb;
+	const struct drm_framebuffer *fb = new_plane_state->hw.fb;
 	int ret;
 
-	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
-	fb = new_plane_state->hw.fb;
-
 	new_crtc_state->active_planes &= ~BIT(plane->id);
 	new_crtc_state->nv12_planes &= ~BIT(plane->id);
 	new_crtc_state->c8_planes &= ~BIT(plane->id);
@@ -292,6 +334,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
 	const struct intel_crtc_state *old_crtc_state;
 	struct intel_crtc_state *new_crtc_state;
 
+	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
 	new_plane_state->uapi.visible = false;
 	if (!crtc)
 		return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 5cedafd..a6bbf42 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -18,6 +18,9 @@ struct intel_plane_state;
 
 extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
 
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+				    const struct intel_plane_state *plane_state);
+
 unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state);
 void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
@@ -46,7 +49,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 				    struct intel_crtc_state *crtc_state,
 				    const struct intel_plane_state *old_plane_state,
 				    struct intel_plane_state *plane_state);
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
-				struct intel_plane *plane);
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+			       struct intel_plane *plane,
+			       bool *need_cdclk_calc);
 
 #endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index b180407..62f234f 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -30,6 +30,7 @@
 #include "i915_drv.h"
 #include "intel_atomic.h"
 #include "intel_audio.h"
+#include "intel_cdclk.h"
 #include "intel_display_types.h"
 #include "intel_lpe_audio.h"
 
@@ -148,6 +149,10 @@ static const struct {
 	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
 	{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
 	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+	{ 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+	{ 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+	{ 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+	{ 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
 };
 
 /* HDMI N/CTS table */
@@ -233,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
 static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->hw.adjusted_mode;
 	int i;
@@ -242,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
 			break;
 	}
 
+	if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+		i = ARRAY_SIZE(hdmi_audio_clock);
+
 	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
 		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
 			      adjusted_mode->crtc_clock);
@@ -291,18 +300,18 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
 	u32 tmp;
 	int i;
 
-	tmp = I915_READ(reg_eldv);
+	tmp = intel_de_read(dev_priv, reg_eldv);
 	tmp &= bits_eldv;
 
 	if (!tmp)
 		return false;
 
-	tmp = I915_READ(reg_elda);
+	tmp = intel_de_read(dev_priv, reg_elda);
 	tmp &= ~bits_elda;
-	I915_WRITE(reg_elda, tmp);
+	intel_de_write(dev_priv, reg_elda, tmp);
 
 	for (i = 0; i < drm_eld_size(eld) / 4; i++)
-		if (I915_READ(reg_edid) != *((const u32 *)eld + i))
+		if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i))
 			return false;
 
 	return true;
@@ -315,18 +324,18 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 eldv, tmp;
 
-	DRM_DEBUG_KMS("Disable audio codec\n");
+	drm_dbg_kms(&dev_priv->drm, "Disable audio codec\n");
 
-	tmp = I915_READ(G4X_AUD_VID_DID);
+	tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
 	if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
 		eldv = G4X_ELDV_DEVCL_DEVBLC;
 	else
 		eldv = G4X_ELDV_DEVCTG;
 
 	/* Invalidate ELD */
-	tmp = I915_READ(G4X_AUD_CNTL_ST);
+	tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
 	tmp &= ~eldv;
-	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+	intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
 }
 
 static void g4x_audio_codec_enable(struct intel_encoder *encoder,
@@ -340,9 +349,10 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
 	u32 tmp;
 	int len, i;
 
-	DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
+	drm_dbg_kms(&dev_priv->drm, "Enable audio codec, %u bytes ELD\n",
+		    drm_eld_size(eld));
 
-	tmp = I915_READ(G4X_AUD_VID_DID);
+	tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
 	if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
 		eldv = G4X_ELDV_DEVCL_DEVBLC;
 	else
@@ -354,19 +364,20 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
 			       G4X_HDMIW_HDMIEDID))
 		return;
 
-	tmp = I915_READ(G4X_AUD_CNTL_ST);
+	tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
 	tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
 	len = (tmp >> 9) & 0x1f;		/* ELD buffer size */
-	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+	intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
 
 	len = min(drm_eld_size(eld) / 4, len);
-	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	drm_dbg(&dev_priv->drm, "ELD size %d\n", len);
 	for (i = 0; i < len; i++)
-		I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
+		intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
+			       *((const u32 *)eld + i));
 
-	tmp = I915_READ(G4X_AUD_CNTL_ST);
+	tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
 	tmp |= eldv;
-	I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+	intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
 }
 
 static void
@@ -384,11 +395,12 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
 	rate = acomp ? acomp->aud_sample_rate[port] : 0;
 	nm = audio_config_dp_get_n_m(crtc_state, rate);
 	if (nm)
-		DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+		drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+			    nm->n);
 	else
-		DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+		drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
 
-	tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -400,9 +412,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
 		tmp |= AUD_CONFIG_N_PROG_ENABLE;
 	}
 
-	I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+	intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
 
-	tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
 	tmp &= ~AUD_CONFIG_M_MASK;
 	tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
 	tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -413,7 +425,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
 		tmp |= AUD_M_CTS_M_PROG_ENABLE;
 	}
 
-	I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+	intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
 }
 
 static void
@@ -429,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
 
 	rate = acomp ? acomp->aud_sample_rate[port] : 0;
 
-	tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -437,25 +449,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
 
 	n = audio_config_hdmi_get_n(crtc_state, rate);
 	if (n != 0) {
-		DRM_DEBUG_KMS("using N %d\n", n);
+		drm_dbg_kms(&dev_priv->drm, "using N %d\n", n);
 
 		tmp &= ~AUD_CONFIG_N_MASK;
 		tmp |= AUD_CONFIG_N(n);
 		tmp |= AUD_CONFIG_N_PROG_ENABLE;
 	} else {
-		DRM_DEBUG_KMS("using automatic N\n");
+		drm_dbg_kms(&dev_priv->drm, "using automatic N\n");
 	}
 
-	I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+	intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
 
 	/*
 	 * Let's disable "Enable CTS or M Prog bit"
 	 * and let HW calculate the value
 	 */
-	tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
 	tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
 	tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
-	I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+	intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
 }
 
 static void
@@ -476,26 +488,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 	u32 tmp;
 
-	DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
-		      transcoder_name(cpu_transcoder));
+	drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
+		    transcoder_name(cpu_transcoder));
 
 	mutex_lock(&dev_priv->av_mutex);
 
 	/* Disable timestamps */
-	tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
 	tmp |= AUD_CONFIG_N_PROG_ENABLE;
 	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
 	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
 	if (intel_crtc_has_dp_encoder(old_crtc_state))
 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
-	I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+	intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
 
 	/* Invalidate ELD */
-	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
 	tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
 	tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
-	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+	intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
 	mutex_unlock(&dev_priv->av_mutex);
 }
@@ -511,16 +523,17 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
 	u32 tmp;
 	int len, i;
 
-	DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
-		      transcoder_name(cpu_transcoder), drm_eld_size(eld));
+	drm_dbg_kms(&dev_priv->drm,
+		    "Enable audio codec on transcoder %s, %u bytes ELD\n",
+		     transcoder_name(cpu_transcoder), drm_eld_size(eld));
 
 	mutex_lock(&dev_priv->av_mutex);
 
 	/* Enable audio presence detect, invalidate ELD */
-	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
 	tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
 	tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
-	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+	intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
 	/*
 	 * FIXME: We're supposed to wait for vblank here, but we have vblanks
@@ -530,19 +543,20 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
 	 */
 
 	/* Reset ELD write address */
-	tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
 	tmp &= ~IBX_ELD_ADDRESS_MASK;
-	I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+	intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
 
 	/* Up to 84 bytes of hw ELD buffer */
 	len = min(drm_eld_size(eld), 84);
 	for (i = 0; i < len / 4; i++)
-		I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
+		intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder),
+			       *((const u32 *)eld + i));
 
 	/* ELD valid */
-	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
 	tmp |= AUDIO_ELD_VALID(cpu_transcoder);
-	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+	intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
 	/* Enable timestamps */
 	hsw_audio_config_update(encoder, crtc_state);
@@ -561,11 +575,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
 	u32 tmp, eldv;
 	i915_reg_t aud_config, aud_cntrl_st2;
 
-	DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
-		      encoder->base.base.id, encoder->base.name,
-		      pipe_name(pipe));
+	drm_dbg_kms(&dev_priv->drm,
+		    "Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+		     encoder->base.base.id, encoder->base.name,
+		     pipe_name(pipe));
 
-	if (WARN_ON(port == PORT_A))
+	if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
 		return;
 
 	if (HAS_PCH_IBX(dev_priv)) {
@@ -580,21 +595,21 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
 	}
 
 	/* Disable timestamps */
-	tmp = I915_READ(aud_config);
+	tmp = intel_de_read(dev_priv, aud_config);
 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
 	tmp |= AUD_CONFIG_N_PROG_ENABLE;
 	tmp &= ~AUD_CONFIG_UPPER_N_MASK;
 	tmp &= ~AUD_CONFIG_LOWER_N_MASK;
 	if (intel_crtc_has_dp_encoder(old_crtc_state))
 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
-	I915_WRITE(aud_config, tmp);
+	intel_de_write(dev_priv, aud_config, tmp);
 
 	eldv = IBX_ELD_VALID(port);
 
 	/* Invalidate ELD */
-	tmp = I915_READ(aud_cntrl_st2);
+	tmp = intel_de_read(dev_priv, aud_cntrl_st2);
 	tmp &= ~eldv;
-	I915_WRITE(aud_cntrl_st2, tmp);
+	intel_de_write(dev_priv, aud_cntrl_st2, tmp);
 }
 
 static void ilk_audio_codec_enable(struct intel_encoder *encoder,
@@ -611,11 +626,12 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
 	int len, i;
 	i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
-	DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
-		      encoder->base.base.id, encoder->base.name,
-		      pipe_name(pipe), drm_eld_size(eld));
+	drm_dbg_kms(&dev_priv->drm,
+		    "Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+		    encoder->base.base.id, encoder->base.name,
+		    pipe_name(pipe), drm_eld_size(eld));
 
-	if (WARN_ON(port == PORT_A))
+	if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
 		return;
 
 	/*
@@ -646,27 +662,28 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
 	eldv = IBX_ELD_VALID(port);
 
 	/* Invalidate ELD */
-	tmp = I915_READ(aud_cntrl_st2);
+	tmp = intel_de_read(dev_priv, aud_cntrl_st2);
 	tmp &= ~eldv;
-	I915_WRITE(aud_cntrl_st2, tmp);
+	intel_de_write(dev_priv, aud_cntrl_st2, tmp);
 
 	/* Reset ELD write address */
-	tmp = I915_READ(aud_cntl_st);
+	tmp = intel_de_read(dev_priv, aud_cntl_st);
 	tmp &= ~IBX_ELD_ADDRESS_MASK;
-	I915_WRITE(aud_cntl_st, tmp);
+	intel_de_write(dev_priv, aud_cntl_st, tmp);
 
 	/* Up to 84 bytes of hw ELD buffer */
 	len = min(drm_eld_size(eld), 84);
 	for (i = 0; i < len / 4; i++)
-		I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
+		intel_de_write(dev_priv, hdmiw_hdmiedid,
+			       *((const u32 *)eld + i));
 
 	/* ELD valid */
-	tmp = I915_READ(aud_cntrl_st2);
+	tmp = intel_de_read(dev_priv, aud_cntrl_st2);
 	tmp |= eldv;
-	I915_WRITE(aud_cntrl_st2, tmp);
+	intel_de_write(dev_priv, aud_cntrl_st2, tmp);
 
 	/* Enable timestamps */
-	tmp = I915_READ(aud_config);
+	tmp = intel_de_read(dev_priv, aud_config);
 	tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
 	tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
 	tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
@@ -674,7 +691,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
 		tmp |= AUD_CONFIG_N_VALUE_INDEX;
 	else
 		tmp |= audio_config_hdmi_pixel_clock(crtc_state);
-	I915_WRITE(aud_config, tmp);
+	intel_de_write(dev_priv, aud_config, tmp);
 }
 
 /**
@@ -701,14 +718,15 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
 
 	/* FIXME precompute the ELD in .compute_config() */
 	if (!connector->eld[0])
-		DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
-			      connector->base.id, connector->name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Bogus ELD on [CONNECTOR:%d:%s]\n",
+			    connector->base.id, connector->name);
 
-	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-			 connector->base.id,
-			 connector->name,
-			 encoder->base.base.id,
-			 encoder->base.name);
+	drm_dbg(&dev_priv->drm, "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		connector->base.id,
+		connector->name,
+		encoder->base.base.id,
+		encoder->base.name);
 
 	connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
 
@@ -800,37 +818,61 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
 	}
 }
 
+static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
+					struct intel_crtc *crtc,
+					bool enable)
+{
+	struct intel_cdclk_state *cdclk_state;
+	int ret;
+
+	/* need to hold at least one crtc lock for the global state */
+	ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx);
+	if (ret)
+		return ret;
+
+	cdclk_state = intel_atomic_get_cdclk_state(state);
+	if (IS_ERR(cdclk_state))
+		return PTR_ERR(cdclk_state);
+
+	cdclk_state->force_min_cdclk_changed = true;
+	cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+
+	ret = intel_atomic_lock_global_state(&cdclk_state->base);
+	if (ret)
+		return ret;
+
+	return drm_atomic_commit(&state->base);
+}
+
 static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
 				  bool enable)
 {
 	struct drm_modeset_acquire_ctx ctx;
 	struct drm_atomic_state *state;
+	struct intel_crtc *crtc;
 	int ret;
 
+	crtc = intel_get_first_crtc(dev_priv);
+	if (!crtc)
+		return;
+
 	drm_modeset_acquire_init(&ctx, 0);
 	state = drm_atomic_state_alloc(&dev_priv->drm);
-	if (WARN_ON(!state))
+	if (drm_WARN_ON(&dev_priv->drm, !state))
 		return;
 
 	state->acquire_ctx = &ctx;
 
 retry:
-	to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
-	to_intel_atomic_state(state)->cdclk.force_min_cdclk =
-		enable ? 2 * 96000 : 0;
-
-	/* Protects dev_priv->cdclk.force_min_cdclk */
-	ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
-	if (!ret)
-		ret = drm_atomic_commit(state);
-
+	ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc,
+					   enable);
 	if (ret == -EDEADLK) {
 		drm_atomic_state_clear(state);
 		drm_modeset_backoff(&ctx);
 		goto retry;
 	}
 
-	WARN_ON(ret);
+	drm_WARN_ON(&dev_priv->drm, ret);
 
 	drm_atomic_state_put(state);
 
@@ -850,9 +892,11 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
 
 	if (dev_priv->audio_power_refcount++ == 0) {
 		if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
-			I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
-			DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
-				      dev_priv->audio_freq_cntrl);
+			intel_de_write(dev_priv, AUD_FREQ_CNTRL,
+				       dev_priv->audio_freq_cntrl);
+			drm_dbg_kms(&dev_priv->drm,
+				    "restored AUD_FREQ_CNTRL to 0x%x\n",
+				    dev_priv->audio_freq_cntrl);
 		}
 
 		/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -860,9 +904,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
 			glk_force_audio_cdclk(dev_priv, true);
 
 		if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-			I915_WRITE(AUD_PIN_BUF_CTL,
-				   (I915_READ(AUD_PIN_BUF_CTL) |
-				    AUD_PIN_BUF_ENABLE));
+			intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
+				       (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
 	}
 
 	return ret;
@@ -897,15 +940,15 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
 	 * Enable/disable generating the codec wake signal, overriding the
 	 * internal logic to generate the codec wake to controller.
 	 */
-	tmp = I915_READ(HSW_AUD_CHICKENBIT);
+	tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
 	tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
-	I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+	intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
 	usleep_range(1000, 1500);
 
 	if (enable) {
-		tmp = I915_READ(HSW_AUD_CHICKENBIT);
+		tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
 		tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
-		I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+		intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
 		usleep_range(1000, 1500);
 	}
 
@@ -917,7 +960,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 
-	if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
 		return -ENODEV;
 
 	return dev_priv->cdclk.hw.cdclk;
@@ -940,7 +983,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
 
 	/* MST */
 	if (pipe >= 0) {
-		if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+		if (drm_WARN_ON(&dev_priv->drm,
+				pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
 			return NULL;
 
 		encoder = dev_priv->av_enc_map[pipe];
@@ -992,7 +1036,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
 	/* 1. get the pipe */
 	encoder = get_saved_enc(dev_priv, port, pipe);
 	if (!encoder || !encoder->base.crtc) {
-		DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+		drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+			    port_name(port));
 		err = -ENODEV;
 		goto unlock;
 	}
@@ -1023,7 +1068,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
 
 	intel_encoder = get_saved_enc(dev_priv, port, pipe);
 	if (!intel_encoder) {
-		DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+		drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+			    port_name(port));
 		mutex_unlock(&dev_priv->av_mutex);
 		return ret;
 	}
@@ -1057,10 +1103,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 	int i;
 
-	if (WARN_ON(acomp->base.ops || acomp->base.dev))
+	if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev))
 		return -EEXIST;
 
-	if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			!device_link_add(hda_kdev, i915_kdev,
+					 DL_FLAG_STATELESS)))
 		return -ENOMEM;
 
 	drm_modeset_lock_all(&dev_priv->drm);
@@ -1119,15 +1167,18 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
 				  &i915_audio_component_bind_ops,
 				  I915_COMPONENT_AUDIO);
 	if (ret < 0) {
-		DRM_ERROR("failed to add audio component (%d)\n", ret);
+		drm_err(&dev_priv->drm,
+			"failed to add audio component (%d)\n", ret);
 		/* continue with reduced functionality */
 		return;
 	}
 
 	if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
-		dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
-		DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
-			      dev_priv->audio_freq_cntrl);
+		dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
+							   AUD_FREQ_CNTRL);
+		drm_dbg_kms(&dev_priv->drm,
+			    "init value of AUD_FREQ_CNTRL of 0x%x\n",
+			    dev_priv->audio_freq_cntrl);
 	}
 
 	dev_priv->audio_component_registered = true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index ef4017a..8391246 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -26,7 +26,6 @@
  */
 
 #include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
 
 #include "display/intel_display.h"
 #include "display/intel_display_types.h"
@@ -228,17 +227,20 @@ parse_panel_options(struct drm_i915_private *dev_priv,
 
 	ret = intel_opregion_get_panel_type(dev_priv);
 	if (ret >= 0) {
-		WARN_ON(ret > 0xf);
+		drm_WARN_ON(&dev_priv->drm, ret > 0xf);
 		panel_type = ret;
-		DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+		drm_dbg_kms(&dev_priv->drm, "Panel type: %d (OpRegion)\n",
+			    panel_type);
 	} else {
 		if (lvds_options->panel_type > 0xf) {
-			DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
-				      lvds_options->panel_type);
+			drm_dbg_kms(&dev_priv->drm,
+				    "Invalid VBT panel type 0x%x\n",
+				    lvds_options->panel_type);
 			return;
 		}
 		panel_type = lvds_options->panel_type;
-		DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+		drm_dbg_kms(&dev_priv->drm, "Panel type: %d (VBT)\n",
+			    panel_type);
 	}
 
 	dev_priv->vbt.panel_type = panel_type;
@@ -253,15 +255,17 @@ parse_panel_options(struct drm_i915_private *dev_priv,
 	switch (drrs_mode) {
 	case 0:
 		dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
-		DRM_DEBUG_KMS("DRRS supported mode is static\n");
+		drm_dbg_kms(&dev_priv->drm, "DRRS supported mode is static\n");
 		break;
 	case 2:
 		dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
-		DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "DRRS supported mode is seamless\n");
 		break;
 	default:
 		dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
-		DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "DRRS not supported (VBT input)\n");
 		break;
 	}
 }
@@ -298,7 +302,8 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
 
 	dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
 
-	DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "Found panel mode in BIOS VBT legacy lfp table:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
 	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -309,8 +314,9 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
 		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
 		    fp_timing->y_res == panel_fixed_mode->vdisplay) {
 			dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
-			DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
-				      dev_priv->vbt.bios_lvds_val);
+			drm_dbg_kms(&dev_priv->drm,
+				    "VBT initial LVDS value %x\n",
+				    dev_priv->vbt.bios_lvds_val);
 		}
 	}
 }
@@ -329,20 +335,22 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
 		return;
 
 	if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
-		DRM_ERROR("GDTD size %u is too small.\n",
-			  generic_dtd->gdtd_size);
+		drm_err(&dev_priv->drm, "GDTD size %u is too small.\n",
+			generic_dtd->gdtd_size);
 		return;
 	} else if (generic_dtd->gdtd_size !=
 		   sizeof(struct generic_dtd_entry)) {
-		DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+		drm_err(&dev_priv->drm, "Unexpected GDTD size %u\n",
+			generic_dtd->gdtd_size);
 		/* DTD has unknown fields, but keep going */
 	}
 
 	num_dtd = (get_blocksize(generic_dtd) -
 		   sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
 	if (dev_priv->vbt.panel_type >= num_dtd) {
-		DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
-			  dev_priv->vbt.panel_type, num_dtd);
+		drm_err(&dev_priv->drm,
+			"Panel type %d not found in table of %d DTD's\n",
+			dev_priv->vbt.panel_type, num_dtd);
 		return;
 	}
 
@@ -385,7 +393,8 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
 	else
 		panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
 
-	DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "Found panel mode in BIOS VBT generic dtd table:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
 	dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
@@ -422,8 +431,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
 		return;
 
 	if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
-		DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
-			      backlight_data->entry_size);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Unsupported backlight data entry size %u\n",
+			    backlight_data->entry_size);
 		return;
 	}
 
@@ -431,8 +441,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
 
 	dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
 	if (!dev_priv->vbt.backlight.present) {
-		DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
-			      entry->type);
+		drm_dbg_kms(&dev_priv->drm,
+			    "PWM backlight not present in VBT (type %u)\n",
+			    entry->type);
 		return;
 	}
 
@@ -449,13 +460,14 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
 	dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
 	dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
 	dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
-	DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
-		      "active %s, min brightness %u, level %u, controller %u\n",
-		      dev_priv->vbt.backlight.pwm_freq_hz,
-		      dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
-		      dev_priv->vbt.backlight.min_brightness,
-		      backlight_data->level[panel_type],
-		      dev_priv->vbt.backlight.controller);
+	drm_dbg_kms(&dev_priv->drm,
+		    "VBT backlight PWM modulation frequency %u Hz, "
+		    "active %s, min brightness %u, level %u, controller %u\n",
+		    dev_priv->vbt.backlight.pwm_freq_hz,
+		    dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+		    dev_priv->vbt.backlight.min_brightness,
+		    backlight_data->level[panel_type],
+		    dev_priv->vbt.backlight.controller);
 }
 
 /* Try to find sdvo panel data */
@@ -469,7 +481,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
 
 	index = i915_modparams.vbt_sdvo_panel_type;
 	if (index == -2) {
-		DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Ignore SDVO panel mode from BIOS VBT tables.\n");
 		return;
 	}
 
@@ -495,7 +508,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
 
 	dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
 
-	DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "Found SDVO panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 }
 
@@ -540,13 +554,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
 	} else {
 		dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
 	}
-	DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
-		      dev_priv->vbt.int_tv_support,
-		      dev_priv->vbt.int_crt_support,
-		      dev_priv->vbt.lvds_use_ssc,
-		      dev_priv->vbt.lvds_ssc_freq,
-		      dev_priv->vbt.display_clock_mode,
-		      dev_priv->vbt.fdi_rx_polarity_inverted);
+	drm_dbg_kms(&dev_priv->drm,
+		    "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+		    dev_priv->vbt.int_tv_support,
+		    dev_priv->vbt.int_crt_support,
+		    dev_priv->vbt.lvds_use_ssc,
+		    dev_priv->vbt.lvds_ssc_freq,
+		    dev_priv->vbt.display_clock_mode,
+		    dev_priv->vbt.fdi_rx_polarity_inverted);
 }
 
 static const struct child_device_config *
@@ -568,7 +583,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
 	 * accurate and doesn't have to be, as long as it's not too strict.
 	 */
 	if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
-		DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
+		drm_dbg_kms(&dev_priv->drm, "Skipping SDVO device mapping\n");
 		return;
 	}
 
@@ -586,14 +601,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
 		if (child->dvo_port != DEVICE_PORT_DVOB &&
 		    child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
-			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Incorrect SDVO port. Skip it\n");
 			continue;
 		}
-		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
-			      " %s port\n",
-			      child->slave_addr,
-			      (child->dvo_port == DEVICE_PORT_DVOB) ?
-			      "SDVOB" : "SDVOC");
+		drm_dbg_kms(&dev_priv->drm,
+			    "the SDVO device with slave addr %2x is found on"
+			    " %s port\n",
+			    child->slave_addr,
+			    (child->dvo_port == DEVICE_PORT_DVOB) ?
+			    "SDVOB" : "SDVOC");
 		mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
 		if (!mapping->initialized) {
 			mapping->dvo_port = child->dvo_port;
@@ -602,28 +619,30 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
 			mapping->ddc_pin = child->ddc_pin;
 			mapping->i2c_pin = child->i2c_pin;
 			mapping->initialized = 1;
-			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
-				      mapping->dvo_port,
-				      mapping->slave_addr,
-				      mapping->dvo_wiring,
-				      mapping->ddc_pin,
-				      mapping->i2c_pin);
+			drm_dbg_kms(&dev_priv->drm,
+				    "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+				    mapping->dvo_port, mapping->slave_addr,
+				    mapping->dvo_wiring, mapping->ddc_pin,
+				    mapping->i2c_pin);
 		} else {
-			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
-					 "two SDVO device.\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Maybe one SDVO port is shared by "
+				    "two SDVO device.\n");
 		}
 		if (child->slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
-			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
-				" is a SDVO device with multiple inputs.\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "there exists the slave2_addr. Maybe this"
+				    " is a SDVO device with multiple inputs.\n");
 		}
 		count++;
 	}
 
 	if (!count) {
 		/* No SDVO device info is found */
-		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "No SDVO device info is found in VBT\n");
 	}
 }
 
@@ -664,7 +683,8 @@ parse_driver_features(struct drm_i915_private *dev_priv,
 	}
 
 	if (bdb->version < 228) {
-		DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+		drm_dbg_kms(&dev_priv->drm, "DRRS State Enabled:%d\n",
+			    driver->drrs_enabled);
 		/*
 		 * If DRRS is not supported, drrs_type has to be set to 0.
 		 * This is because, VBT is configured in such a way that
@@ -688,7 +708,7 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
 	if (bdb->version < 228)
 		return;
 
-	power = find_section(bdb, BDB_LVDS_POWER);
+	power = find_section(bdb, BDB_LFP_POWER);
 	if (!power)
 		return;
 
@@ -742,8 +762,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 		dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
 		break;
 	default:
-		DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
-			      edp_link_params->rate);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT has unknown eDP link rate value %u\n",
+			     edp_link_params->rate);
 		break;
 	}
 
@@ -758,8 +779,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 		dev_priv->vbt.edp.lanes = 4;
 		break;
 	default:
-		DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
-			      edp_link_params->lanes);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT has unknown eDP lane count value %u\n",
+			    edp_link_params->lanes);
 		break;
 	}
 
@@ -777,8 +799,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 		dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
 		break;
 	default:
-		DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
-			      edp_link_params->preemphasis);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT has unknown eDP pre-emphasis value %u\n",
+			    edp_link_params->preemphasis);
 		break;
 	}
 
@@ -796,8 +819,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 		dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 		break;
 	default:
-		DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
-			      edp_link_params->vswing);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT has unknown eDP voltage swing value %u\n",
+			    edp_link_params->vswing);
 		break;
 	}
 
@@ -824,7 +848,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 
 	psr = find_section(bdb, BDB_PSR);
 	if (!psr) {
-		DRM_DEBUG_KMS("No PSR BDB found.\n");
+		drm_dbg_kms(&dev_priv->drm, "No PSR BDB found.\n");
 		return;
 	}
 
@@ -851,8 +875,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 		dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
 		break;
 	default:
-		DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
-			      psr_table->lines_to_wait);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT has unknown PSR lines to wait %u\n",
+			    psr_table->lines_to_wait);
 		break;
 	}
 
@@ -874,8 +899,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 			dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
 			break;
 		default:
-			DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
-					psr_table->tp1_wakeup_time);
+			drm_dbg_kms(&dev_priv->drm,
+				    "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+				    psr_table->tp1_wakeup_time);
 			/* fallthrough */
 		case 2:
 			dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
@@ -893,8 +919,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 			dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
 			break;
 		default:
-			DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
-					psr_table->tp2_tp3_wakeup_time);
+			drm_dbg_kms(&dev_priv->drm,
+				    "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+				    psr_table->tp2_tp3_wakeup_time);
 			/* fallthrough */
 		case 2:
 			dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
@@ -1000,12 +1027,12 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
 	 */
 	start = find_section(bdb, BDB_MIPI_CONFIG);
 	if (!start) {
-		DRM_DEBUG_KMS("No MIPI config BDB found");
+		drm_dbg_kms(&dev_priv->drm, "No MIPI config BDB found");
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
-								panel_type);
+	drm_dbg(&dev_priv->drm, "Found MIPI Config block, panel index = %d\n",
+		panel_type);
 
 	/*
 	 * get hold of the correct configuration block and pps data as per
@@ -1220,7 +1247,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
 	const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
 	int index, len;
 
-	if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+	if (drm_WARN_ON(&dev_priv->drm,
+			!data || dev_priv->vbt.dsi.seq_version != 1))
 		return 0;
 
 	/* index = 1 to skip sequence byte */
@@ -1273,7 +1301,8 @@ static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
 	if (!len)
 		return;
 
-	DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "Using init OTP fragment to deassert reset\n");
 
 	/* Copy the fragment, update seq byte and terminate it */
 	init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
@@ -1308,18 +1337,21 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
 
 	sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
 	if (!sequence) {
-		DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "No MIPI Sequence found, parsing complete\n");
 		return;
 	}
 
 	/* Fail gracefully for forward incompatible sequence block. */
 	if (sequence->version >= 4) {
-		DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
-			  sequence->version);
+		drm_err(&dev_priv->drm,
+			"Unable to parse MIPI Sequence Block v%u\n",
+			sequence->version);
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+	drm_dbg(&dev_priv->drm, "Found MIPI sequence block v%u\n",
+		sequence->version);
 
 	seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
 	if (!seq_data)
@@ -1336,13 +1368,15 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
 			break;
 
 		if (seq_id >= MIPI_SEQ_MAX) {
-			DRM_ERROR("Unknown sequence %u\n", seq_id);
+			drm_err(&dev_priv->drm, "Unknown sequence %u\n",
+				seq_id);
 			goto err;
 		}
 
 		/* Log about presence of sequences we won't run. */
 		if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
-			DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+			drm_dbg_kms(&dev_priv->drm,
+				    "Unsupported sequence %u\n", seq_id);
 
 		dev_priv->vbt.dsi.sequence[seq_id] = data + index;
 
@@ -1351,7 +1385,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
 		else
 			index = goto_next_sequence(data, index, seq_size);
 		if (!index) {
-			DRM_ERROR("Invalid sequence %u\n", seq_id);
+			drm_err(&dev_priv->drm, "Invalid sequence %u\n",
+				seq_id);
 			goto err;
 		}
 	}
@@ -1362,7 +1397,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
 
 	fixup_mipi_sequences(dev_priv);
 
-	DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
+	drm_dbg(&dev_priv->drm, "MIPI related VBT parsing complete\n");
 	return;
 
 err:
@@ -1387,13 +1422,15 @@ parse_compression_parameters(struct drm_i915_private *i915,
 	if (params) {
 		/* Sanity checks */
 		if (params->entry_size != sizeof(params->data[0])) {
-			DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+			drm_dbg_kms(&i915->drm,
+				    "VBT: unsupported compression param entry size\n");
 			return;
 		}
 
 		block_size = get_blocksize(params);
 		if (block_size < sizeof(*params)) {
-			DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+			drm_dbg_kms(&i915->drm,
+				    "VBT: expected 16 compression param entries\n");
 			return;
 		}
 	}
@@ -1405,12 +1442,14 @@ parse_compression_parameters(struct drm_i915_private *i915,
 			continue;
 
 		if (!params) {
-			DRM_DEBUG_KMS("VBT: compression params not available\n");
+			drm_dbg_kms(&i915->drm,
+				    "VBT: compression params not available\n");
 			continue;
 		}
 
 		if (child->compression_method_cps) {
-			DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+			drm_dbg_kms(&i915->drm,
+				    "VBT: CPS compression not supported\n");
 			continue;
 		}
 
@@ -1458,10 +1497,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
 
 	p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
 	if (p != PORT_NONE) {
-		DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
-			      "disabling port %c DVI/HDMI support\n",
-			      port_name(port), info->alternate_ddc_pin,
-			      port_name(p), port_name(p));
+		drm_dbg_kms(&dev_priv->drm,
+			    "port %c trying to use the same DDC pin (0x%x) as port %c, "
+			    "disabling port %c DVI/HDMI support\n",
+			    port_name(port), info->alternate_ddc_pin,
+			    port_name(p), port_name(p));
 
 		/*
 		 * If we have multiple ports supposedly sharing the
@@ -1509,10 +1549,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
 
 	p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
 	if (p != PORT_NONE) {
-		DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
-			      "disabling port %c DP support\n",
-			      port_name(port), info->alternate_aux_channel,
-			      port_name(p), port_name(p));
+		drm_dbg_kms(&dev_priv->drm,
+			    "port %c trying to use the same AUX CH (0x%x) as port %c, "
+			    "disabling port %c DP support\n",
+			    port_name(port), info->alternate_aux_channel,
+			    port_name(p), port_name(p));
 
 		/*
 		 * If we have multiple ports supposedlt sharing the
@@ -1572,8 +1613,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 	if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
 		return ddc_pin_map[vbt_pin];
 
-	DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
-		      vbt_pin);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+		    vbt_pin);
 	return 0;
 }
 
@@ -1624,8 +1666,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 	info = &dev_priv->vbt.ddi_port_info[port];
 
 	if (info->child) {
-		DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
-			      port_name(port));
+		drm_dbg_kms(&dev_priv->drm,
+			    "More than one child device for port %c in VBT, using the first.\n",
+			    port_name(port));
 		return;
 	}
 
@@ -1636,8 +1679,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 	is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
 
 	if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
-		DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
-			      is_hdmi ? "/HDMI" : "");
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT claims port A supports DVI%s, ignoring\n",
+			    is_hdmi ? "/HDMI" : "");
 		is_dvi = false;
 		is_hdmi = false;
 	}
@@ -1653,11 +1697,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 	if (bdb_version >= 209)
 		info->supports_tbt = child->tbt;
 
-	DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
-		      port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
-		      HAS_LSPCON(dev_priv) && child->lspcon,
-		      info->supports_typec_usb, info->supports_tbt,
-		      devdata->dsc != NULL);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+		    port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+		    HAS_LSPCON(dev_priv) && child->lspcon,
+		    info->supports_typec_usb, info->supports_tbt,
+		    devdata->dsc != NULL);
 
 	if (is_dvi) {
 		u8 ddc_pin;
@@ -1667,9 +1712,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 			info->alternate_ddc_pin = ddc_pin;
 			sanitize_ddc_pin(dev_priv, port);
 		} else {
-			DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
-				      "sticking to defaults\n",
-				      port_name(port), ddc_pin);
+			drm_dbg_kms(&dev_priv->drm,
+				    "Port %c has invalid DDC pin %d, "
+				    "sticking to defaults\n",
+				    port_name(port), ddc_pin);
 		}
 	}
 
@@ -1682,9 +1728,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 	if (bdb_version >= 158) {
 		/* The VBT HDMI level shift values match the table we have. */
 		u8 hdmi_level_shift = child->hdmi_level_shifter_value;
-		DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
-			      port_name(port),
-			      hdmi_level_shift);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT HDMI level shift for port %c: %d\n",
+			    port_name(port),
+			    hdmi_level_shift);
 		info->hdmi_level_shift = hdmi_level_shift;
 		info->hdmi_level_shift_set = true;
 	}
@@ -1708,19 +1755,22 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 		}
 
 		if (max_tmds_clock)
-			DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
-				      port_name(port), max_tmds_clock);
+			drm_dbg_kms(&dev_priv->drm,
+				    "VBT HDMI max TMDS clock for port %c: %d kHz\n",
+				    port_name(port), max_tmds_clock);
 		info->max_tmds_clock = max_tmds_clock;
 	}
 
 	/* Parse the I_boost config for SKL and above */
 	if (bdb_version >= 196 && child->iboost) {
 		info->dp_boost_level = translate_iboost(child->dp_iboost_level);
-		DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
-			      port_name(port), info->dp_boost_level);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT (e)DP boost level for port %c: %d\n",
+			    port_name(port), info->dp_boost_level);
 		info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
-		DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
-			      port_name(port), info->hdmi_boost_level);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT HDMI boost level for port %c: %d\n",
+			    port_name(port), info->hdmi_boost_level);
 	}
 
 	/* DP max link rate for CNL+ */
@@ -1740,8 +1790,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
 			info->dp_max_link_rate = 162000;
 			break;
 		}
-		DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
-			      port_name(port), info->dp_max_link_rate);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT DP max link rate for port %c: %d\n",
+			    port_name(port), info->dp_max_link_rate);
 	}
 
 	info->child = child;
@@ -1775,19 +1826,21 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 
 	defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!defs) {
-		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "No general definition block is found, no devices defined.\n");
 		return;
 	}
 
 	block_size = get_blocksize(defs);
 	if (block_size < sizeof(*defs)) {
-		DRM_DEBUG_KMS("General definitions block too small (%u)\n",
-			      block_size);
+		drm_dbg_kms(&dev_priv->drm,
+			    "General definitions block too small (%u)\n",
+			    block_size);
 		return;
 	}
 
 	bus_pin = defs->crt_ddc_gmbus_pin;
-	DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+	drm_dbg_kms(&dev_priv->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
 	if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
 		dev_priv->vbt.crt_ddc_pin = bus_pin;
 
@@ -1806,19 +1859,22 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 	} else {
 		expected_size = sizeof(*child);
 		BUILD_BUG_ON(sizeof(*child) < 39);
-		DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
-				 bdb->version, expected_size);
+		drm_dbg(&dev_priv->drm,
+			"Expected child device config size for VBT version %u not known; assuming %u\n",
+			bdb->version, expected_size);
 	}
 
 	/* Flag an error for unexpected size, but continue anyway. */
 	if (defs->child_dev_size != expected_size)
-		DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
-			  defs->child_dev_size, expected_size, bdb->version);
+		drm_err(&dev_priv->drm,
+			"Unexpected child device config size %u (expected %u for VBT version %u)\n",
+			defs->child_dev_size, expected_size, bdb->version);
 
 	/* The legacy sized child device config is the minimum we need. */
 	if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
-		DRM_DEBUG_KMS("Child device config size %u is too small.\n",
-			      defs->child_dev_size);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Child device config size %u is too small.\n",
+			    defs->child_dev_size);
 		return;
 	}
 
@@ -1830,8 +1886,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 		if (!child->device_type)
 			continue;
 
-		DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
-			      child->device_type);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Found VBT child device with type 0x%x\n",
+			    child->device_type);
 
 		devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
 		if (!devdata)
@@ -1849,7 +1906,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 	}
 
 	if (list_empty(&dev_priv->vbt.display_devices))
-		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "no child dev is parsed from VBT\n");
 }
 
 /* Common defaults which may be overridden by VBT. */
@@ -1882,7 +1940,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 	 */
 	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
 			!HAS_PCH_SPLIT(dev_priv));
-	DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+	drm_dbg_kms(&dev_priv->drm, "Set default to SSC at %d kHz\n",
+		    dev_priv->vbt.lvds_ssc_freq);
 }
 
 /* Defaults to initialize only if there is no VBT. */
@@ -1992,13 +2051,14 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
 		goto err_unmap_oprom;
 
 	if (sizeof(struct vbt_header) > size) {
-		DRM_DEBUG_DRIVER("VBT header incomplete\n");
+		drm_dbg(&dev_priv->drm, "VBT header incomplete\n");
 		goto err_unmap_oprom;
 	}
 
 	vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
 	if (vbt_size > size) {
-		DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+		drm_dbg(&dev_priv->drm,
+			"VBT incomplete (vbt_size overflows)\n");
 		goto err_unmap_oprom;
 	}
 
@@ -2041,7 +2101,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 	INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
 
 	if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
-		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Skipping VBT init due to disabled display.\n");
 		return;
 	}
 
@@ -2055,13 +2116,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 
 		vbt = oprom_vbt;
 
-		DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+		drm_dbg_kms(&dev_priv->drm, "Found valid VBT in PCI ROM\n");
 	}
 
 	bdb = get_bdb_header(vbt);
 
-	DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
-		      (int)sizeof(vbt->signature), vbt->signature, bdb->version);
+	drm_dbg_kms(&dev_priv->drm,
+		    "VBT signature \"%.*s\", BDB version %d\n",
+		    (int)sizeof(vbt->signature), vbt->signature, bdb->version);
 
 	/* Grab useful general definitions */
 	parse_general_features(dev_priv, bdb);
@@ -2086,7 +2148,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
 
 out:
 	if (!vbt) {
-		DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+		drm_info(&dev_priv->drm,
+			 "Failed to find VBIOS tables (VBT)\n");
 		init_vbt_missing_defaults(dev_priv);
 	}
 
@@ -2238,13 +2301,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
 		const struct ddi_vbt_port_info *port_info =
 			&dev_priv->vbt.ddi_port_info[port];
 
-		return port_info->supports_dp ||
-		       port_info->supports_dvi ||
-		       port_info->supports_hdmi;
+		return port_info->child;
 	}
 
 	/* FIXME maybe deal with port A as well? */
-	if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+	if (drm_WARN_ON(&dev_priv->drm,
+			port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
 		return false;
 
 	list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
@@ -2373,8 +2435,9 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
 		} else if (dvo_port == DVO_PORT_MIPIB ||
 			   dvo_port == DVO_PORT_MIPIC ||
 			   dvo_port == DVO_PORT_MIPID) {
-			DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
-				      port_name(dvo_port - DVO_PORT_MIPIA));
+			drm_dbg_kms(&dev_priv->drm,
+				    "VBT has unsupported DSI port %c\n",
+				    port_name(dvo_port - DVO_PORT_MIPIA));
 		}
 	}
 
@@ -2493,7 +2556,7 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
 	const struct child_device_config *child =
 		i915->vbt.ddi_port_info[port].child;
 
-	if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
+	if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915)))
 		return false;
 
 	return child && child->hpd_invert;
@@ -2526,8 +2589,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
 	if (!info->alternate_aux_channel) {
 		aux_ch = (enum aux_ch)port;
 
-		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
-			      aux_ch_name(aux_ch), port_name(port));
+		drm_dbg_kms(&dev_priv->drm,
+			    "using AUX %c for port %c (platform default)\n",
+			    aux_ch_name(aux_ch), port_name(port));
 		return aux_ch;
 	}
 
@@ -2559,8 +2623,78 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
 		break;
 	}
 
-	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
-		      aux_ch_name(aux_ch), port_name(port));
+	drm_dbg_kms(&dev_priv->drm, "using AUX %c for port %c (VBT)\n",
+		    aux_ch_name(aux_ch), port_name(port));
 
 	return aux_ch;
 }
+
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+	return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock;
+}
+
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	const struct ddi_vbt_port_info *info =
+		&i915->vbt.ddi_port_info[encoder->port];
+
+	return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
+}
+
+int intel_bios_dp_boost_level(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+	return i915->vbt.ddi_port_info[encoder->port].dp_boost_level;
+}
+
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+	return i915->vbt.ddi_port_info[encoder->port].hdmi_boost_level;
+}
+
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+	return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate;
+}
+
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+	return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
+}
+
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port)
+{
+	return i915->vbt.ddi_port_info[port].supports_dvi;
+}
+
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port)
+{
+	return i915->vbt.ddi_port_info[port].supports_hdmi;
+}
+
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port)
+{
+	return i915->vbt.ddi_port_info[port].supports_dp;
+}
+
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915,
+					enum port port)
+{
+	return i915->vbt.ddi_port_info[port].supports_typec_usb;
+}
+
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port)
+{
+	return i915->vbt.ddi_port_info[port].supports_tbt;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index d6a0c29..e29e79f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,6 @@
 
 #include <linux/types.h>
 
-#include <drm/i915_drm.h>
-
 struct drm_i915_private;
 struct intel_crtc_state;
 struct intel_encoder;
@@ -247,5 +245,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port
 bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
 			       struct intel_crtc_state *crtc_state,
 			       int dsc_max_bpc);
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
+int intel_bios_dp_boost_level(struct intel_encoder *encoder);
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder);
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
 
 #endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index b228671..58b264b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -122,7 +122,8 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
 	if (ret)
 		return ret;
 
-	if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			qi->num_points > ARRAY_SIZE(qi->points)))
 		qi->num_points = ARRAY_SIZE(qi->points);
 
 	for (i = 0; i < qi->num_points; i++) {
@@ -132,9 +133,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
 		if (ret)
 			return ret;
 
-		DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
-			      i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
-			      sp->t_rcd, sp->t_rc);
+		drm_dbg_kms(&dev_priv->drm,
+			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+			    sp->t_rcd, sp->t_rc);
 	}
 
 	return 0;
@@ -187,7 +189,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
 
 	ret = icl_get_qgv_points(dev_priv, &qi);
 	if (ret) {
-		DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Failed to get memory subsystem information, ignoring bandwidth limits");
 		return ret;
 	}
 	num_channels = qi.num_channels;
@@ -228,8 +231,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
 			bi->deratedbw[j] = min(maxdebw,
 					       bw * 9 / 10); /* 90% */
 
-			DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
-				      i, j, bi->num_planes, bi->deratedbw[j]);
+			drm_dbg_kms(&dev_priv->drm,
+				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+				    i, j, bi->num_planes, bi->deratedbw[j]);
 		}
 
 		if (bi->num_planes == 1)
@@ -374,10 +378,9 @@ static struct intel_bw_state *
 intel_atomic_get_bw_state(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct drm_private_state *bw_state;
+	struct intel_global_state *bw_state;
 
-	bw_state = drm_atomic_get_private_obj_state(&state->base,
-						    &dev_priv->bw_obj);
+	bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
 	if (IS_ERR(bw_state))
 		return ERR_CAST(bw_state);
 
@@ -392,7 +395,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
 	unsigned int data_rate, max_data_rate;
 	unsigned int num_active_planes;
 	struct intel_crtc *crtc;
-	int i;
+	int i, ret;
 
 	/* FIXME earlier gens need some checks too */
 	if (INTEL_GEN(dev_priv) < 11)
@@ -424,15 +427,20 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
 		bw_state->data_rate[crtc->pipe] = new_data_rate;
 		bw_state->num_active_planes[crtc->pipe] = new_active_planes;
 
-		DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
-			      pipe_name(crtc->pipe),
-			      bw_state->data_rate[crtc->pipe],
-			      bw_state->num_active_planes[crtc->pipe]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "pipe %c data rate %u num active planes %u\n",
+			    pipe_name(crtc->pipe),
+			    bw_state->data_rate[crtc->pipe],
+			    bw_state->num_active_planes[crtc->pipe]);
 	}
 
 	if (!bw_state)
 		return 0;
 
+	ret = intel_atomic_lock_global_state(&bw_state->base);
+	if (ret)
+		return ret;
+
 	data_rate = intel_bw_data_rate(dev_priv, bw_state);
 	num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
 
@@ -441,15 +449,17 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
 	data_rate = DIV_ROUND_UP(data_rate, 1000);
 
 	if (data_rate > max_data_rate) {
-		DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
-			      data_rate, max_data_rate, num_active_planes);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+			    data_rate, max_data_rate, num_active_planes);
 		return -EINVAL;
 	}
 
 	return 0;
 }
 
-static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+static struct intel_global_state *
+intel_bw_duplicate_state(struct intel_global_obj *obj)
 {
 	struct intel_bw_state *state;
 
@@ -457,18 +467,16 @@ static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj
 	if (!state)
 		return NULL;
 
-	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
 	return &state->base;
 }
 
-static void intel_bw_destroy_state(struct drm_private_obj *obj,
-				   struct drm_private_state *state)
+static void intel_bw_destroy_state(struct intel_global_obj *obj,
+				   struct intel_global_state *state)
 {
 	kfree(state);
 }
 
-static const struct drm_private_state_funcs intel_bw_funcs = {
+static const struct intel_global_state_funcs intel_bw_funcs = {
 	.atomic_duplicate_state = intel_bw_duplicate_state,
 	.atomic_destroy_state = intel_bw_destroy_state,
 };
@@ -481,13 +489,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
 	if (!state)
 		return -ENOMEM;
 
-	drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
-				    &state->base, &intel_bw_funcs);
+	intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+				     &state->base, &intel_bw_funcs);
 
 	return 0;
 }
-
-void intel_bw_cleanup(struct drm_i915_private *dev_priv)
-{
-	drm_atomic_private_obj_fini(&dev_priv->bw_obj);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 20b9ad2..a8aa762 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -9,13 +9,14 @@
 #include <drm/drm_atomic.h>
 
 #include "intel_display.h"
+#include "intel_global_state.h"
 
 struct drm_i915_private;
 struct intel_atomic_state;
 struct intel_crtc_state;
 
 struct intel_bw_state {
-	struct drm_private_state base;
+	struct intel_global_state base;
 
 	unsigned int data_rate[I915_MAX_PIPES];
 	u8 num_active_planes[I915_MAX_PIPES];
@@ -25,7 +26,6 @@ struct intel_bw_state {
 
 void intel_bw_init_hw(struct drm_i915_private *dev_priv);
 int intel_bw_init(struct drm_i915_private *dev_priv);
-void intel_bw_cleanup(struct drm_i915_private *dev_priv);
 int intel_bw_atomic_check(struct intel_atomic_state *state);
 void intel_bw_crtc_update(struct intel_bw_state *bw_state,
 			  const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0ce5926..979a024 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -55,43 +55,43 @@
  */
 
 static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
-				   struct intel_cdclk_state *cdclk_state)
+				   struct intel_cdclk_config *cdclk_config)
 {
-	cdclk_state->cdclk = 133333;
+	cdclk_config->cdclk = 133333;
 }
 
 static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
-				   struct intel_cdclk_state *cdclk_state)
+				   struct intel_cdclk_config *cdclk_config)
 {
-	cdclk_state->cdclk = 200000;
+	cdclk_config->cdclk = 200000;
 }
 
 static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
-				   struct intel_cdclk_state *cdclk_state)
+				   struct intel_cdclk_config *cdclk_config)
 {
-	cdclk_state->cdclk = 266667;
+	cdclk_config->cdclk = 266667;
 }
 
 static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
-				   struct intel_cdclk_state *cdclk_state)
+				   struct intel_cdclk_config *cdclk_config)
 {
-	cdclk_state->cdclk = 333333;
+	cdclk_config->cdclk = 333333;
 }
 
 static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
-				   struct intel_cdclk_state *cdclk_state)
+				   struct intel_cdclk_config *cdclk_config)
 {
-	cdclk_state->cdclk = 400000;
+	cdclk_config->cdclk = 400000;
 }
 
 static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
-				   struct intel_cdclk_state *cdclk_state)
+				   struct intel_cdclk_config *cdclk_config)
 {
-	cdclk_state->cdclk = 450000;
+	cdclk_config->cdclk = 450000;
 }
 
 static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
-			   struct intel_cdclk_state *cdclk_state)
+			   struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 hpllcc = 0;
@@ -102,7 +102,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
 	 * FIXME is this the right way to detect 852GM/852GMV?
 	 */
 	if (pdev->revision == 0x1) {
-		cdclk_state->cdclk = 133333;
+		cdclk_config->cdclk = 133333;
 		return;
 	}
 
@@ -116,24 +116,24 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
 	case GC_CLOCK_133_200:
 	case GC_CLOCK_133_200_2:
 	case GC_CLOCK_100_200:
-		cdclk_state->cdclk = 200000;
+		cdclk_config->cdclk = 200000;
 		break;
 	case GC_CLOCK_166_250:
-		cdclk_state->cdclk = 250000;
+		cdclk_config->cdclk = 250000;
 		break;
 	case GC_CLOCK_100_133:
-		cdclk_state->cdclk = 133333;
+		cdclk_config->cdclk = 133333;
 		break;
 	case GC_CLOCK_133_266:
 	case GC_CLOCK_133_266_2:
 	case GC_CLOCK_166_266:
-		cdclk_state->cdclk = 266667;
+		cdclk_config->cdclk = 266667;
 		break;
 	}
 }
 
 static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
-			     struct intel_cdclk_state *cdclk_state)
+			     struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 gcfgc = 0;
@@ -141,23 +141,23 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
 	pci_read_config_word(pdev, GCFGC, &gcfgc);
 
 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
-		cdclk_state->cdclk = 133333;
+		cdclk_config->cdclk = 133333;
 		return;
 	}
 
 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
 	case GC_DISPLAY_CLOCK_333_320_MHZ:
-		cdclk_state->cdclk = 333333;
+		cdclk_config->cdclk = 333333;
 		break;
 	default:
 	case GC_DISPLAY_CLOCK_190_200_MHZ:
-		cdclk_state->cdclk = 190000;
+		cdclk_config->cdclk = 190000;
 		break;
 	}
 }
 
 static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
-			     struct intel_cdclk_state *cdclk_state)
+			     struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 gcfgc = 0;
@@ -165,17 +165,17 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
 	pci_read_config_word(pdev, GCFGC, &gcfgc);
 
 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
-		cdclk_state->cdclk = 133333;
+		cdclk_config->cdclk = 133333;
 		return;
 	}
 
 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
 	case GC_DISPLAY_CLOCK_333_320_MHZ:
-		cdclk_state->cdclk = 320000;
+		cdclk_config->cdclk = 320000;
 		break;
 	default:
 	case GC_DISPLAY_CLOCK_190_200_MHZ:
-		cdclk_state->cdclk = 200000;
+		cdclk_config->cdclk = 200000;
 		break;
 	}
 }
@@ -237,20 +237,21 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
 	else
 		return 0;
 
-	tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
-			HPLLVCO_MOBILE : HPLLVCO);
+	tmp = intel_de_read(dev_priv,
+			    IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
 
 	vco = vco_table[tmp & 0x7];
 	if (vco == 0)
-		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+		drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
+			tmp);
 	else
-		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+		drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
 
 	return vco;
 }
 
 static void g33_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	static const u8 div_3200[] = { 12, 10,  8,  7, 5, 16 };
@@ -261,7 +262,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
 	unsigned int cdclk_sel;
 	u16 tmp = 0;
 
-	cdclk_state->vco = intel_hpll_vco(dev_priv);
+	cdclk_config->vco = intel_hpll_vco(dev_priv);
 
 	pci_read_config_word(pdev, GCFGC, &tmp);
 
@@ -270,7 +271,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
 		goto fail;
 
-	switch (cdclk_state->vco) {
+	switch (cdclk_config->vco) {
 	case 3200000:
 		div_table = div_3200;
 		break;
@@ -287,18 +288,19 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
 		goto fail;
 	}
 
-	cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
-					       div_table[cdclk_sel]);
+	cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+						div_table[cdclk_sel]);
 	return;
 
 fail:
-	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
-		  cdclk_state->vco, tmp);
-	cdclk_state->cdclk = 190476;
+	drm_err(&dev_priv->drm,
+		"Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+		cdclk_config->vco, tmp);
+	cdclk_config->cdclk = 190476;
 }
 
 static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u16 gcfgc = 0;
@@ -307,31 +309,32 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
 
 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
-		cdclk_state->cdclk = 266667;
+		cdclk_config->cdclk = 266667;
 		break;
 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
-		cdclk_state->cdclk = 333333;
+		cdclk_config->cdclk = 333333;
 		break;
 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
-		cdclk_state->cdclk = 444444;
+		cdclk_config->cdclk = 444444;
 		break;
 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
-		cdclk_state->cdclk = 200000;
+		cdclk_config->cdclk = 200000;
 		break;
 	default:
-		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+		drm_err(&dev_priv->drm,
+			"Unknown pnv display core clock 0x%04x\n", gcfgc);
 		/* fall through */
 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
-		cdclk_state->cdclk = 133333;
+		cdclk_config->cdclk = 133333;
 		break;
 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
-		cdclk_state->cdclk = 166667;
+		cdclk_config->cdclk = 166667;
 		break;
 	}
 }
 
 static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
-			     struct intel_cdclk_state *cdclk_state)
+			     struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	static const u8 div_3200[] = { 16, 10,  8 };
@@ -341,7 +344,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
 	unsigned int cdclk_sel;
 	u16 tmp = 0;
 
-	cdclk_state->vco = intel_hpll_vco(dev_priv);
+	cdclk_config->vco = intel_hpll_vco(dev_priv);
 
 	pci_read_config_word(pdev, GCFGC, &tmp);
 
@@ -350,7 +353,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
 		goto fail;
 
-	switch (cdclk_state->vco) {
+	switch (cdclk_config->vco) {
 	case 3200000:
 		div_table = div_3200;
 		break;
@@ -364,62 +367,64 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
 		goto fail;
 	}
 
-	cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
-					       div_table[cdclk_sel]);
+	cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+						div_table[cdclk_sel]);
 	return;
 
 fail:
-	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
-		  cdclk_state->vco, tmp);
-	cdclk_state->cdclk = 200000;
+	drm_err(&dev_priv->drm,
+		"Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+		cdclk_config->vco, tmp);
+	cdclk_config->cdclk = 200000;
 }
 
 static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
-			   struct intel_cdclk_state *cdclk_state)
+			   struct intel_cdclk_config *cdclk_config)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	unsigned int cdclk_sel;
 	u16 tmp = 0;
 
-	cdclk_state->vco = intel_hpll_vco(dev_priv);
+	cdclk_config->vco = intel_hpll_vco(dev_priv);
 
 	pci_read_config_word(pdev, GCFGC, &tmp);
 
 	cdclk_sel = (tmp >> 12) & 0x1;
 
-	switch (cdclk_state->vco) {
+	switch (cdclk_config->vco) {
 	case 2666667:
 	case 4000000:
 	case 5333333:
-		cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
+		cdclk_config->cdclk = cdclk_sel ? 333333 : 222222;
 		break;
 	case 3200000:
-		cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
+		cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
 		break;
 	default:
-		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
-			  cdclk_state->vco, tmp);
-		cdclk_state->cdclk = 222222;
+		drm_err(&dev_priv->drm,
+			"Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+			cdclk_config->vco, tmp);
+		cdclk_config->cdclk = 222222;
 		break;
 	}
 }
 
 static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
-	u32 lcpll = I915_READ(LCPLL_CTL);
+	u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
 	u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
-		cdclk_state->cdclk = 800000;
-	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-		cdclk_state->cdclk = 450000;
+		cdclk_config->cdclk = 800000;
+	else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		cdclk_config->cdclk = 450000;
 	else if (freq == LCPLL_CLK_FREQ_450)
-		cdclk_state->cdclk = 450000;
+		cdclk_config->cdclk = 450000;
 	else if (IS_HSW_ULT(dev_priv))
-		cdclk_state->cdclk = 337500;
+		cdclk_config->cdclk = 337500;
 	else
-		cdclk_state->cdclk = 540000;
+		cdclk_config->cdclk = 540000;
 }
 
 static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
@@ -462,17 +467,17 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
 }
 
 static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
 	u32 val;
 
 	vlv_iosf_sb_get(dev_priv,
 			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
 
-	cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
-	cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
-					       CCK_DISPLAY_CLOCK_CONTROL,
-					       cdclk_state->vco);
+	cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
+	cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+						CCK_DISPLAY_CLOCK_CONTROL,
+						cdclk_config->vco);
 
 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
 
@@ -480,10 +485,10 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
 			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
 
 	if (IS_VALLEYVIEW(dev_priv))
-		cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+		cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
 			DSPFREQGUAR_SHIFT;
 	else
-		cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+		cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
 			DSPFREQGUAR_SHIFT_CHV;
 }
 
@@ -510,25 +515,26 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
 	 * WA - write default credits before re-programming
 	 * FIXME: should we also set the resend bit here?
 	 */
-	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
-		   default_credits);
+	intel_de_write(dev_priv, GCI_CONTROL,
+		       VGA_FAST_MODE_DISABLE | default_credits);
 
-	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
-		   credits | PFI_CREDIT_RESEND);
+	intel_de_write(dev_priv, GCI_CONTROL,
+		       VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
 
 	/*
 	 * FIXME is this guaranteed to clear
 	 * immediately or should we poll for it?
 	 */
-	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
 }
 
 static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
-			  const struct intel_cdclk_state *cdclk_state,
+			  const struct intel_cdclk_config *cdclk_config,
 			  enum pipe pipe)
 {
-	int cdclk = cdclk_state->cdclk;
-	u32 val, cmd = cdclk_state->voltage_level;
+	int cdclk = cdclk_config->cdclk;
+	u32 val, cmd = cdclk_config->voltage_level;
 	intel_wakeref_t wakeref;
 
 	switch (cdclk) {
@@ -563,7 +569,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
 		     50)) {
-		DRM_ERROR("timed out waiting for CDclk change\n");
+		drm_err(&dev_priv->drm,
+			"timed out waiting for CDclk change\n");
 	}
 
 	if (cdclk == 400000) {
@@ -581,7 +588,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
 		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
 			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
 			     50))
-			DRM_ERROR("timed out waiting for CDclk change\n");
+			drm_err(&dev_priv->drm,
+				"timed out waiting for CDclk change\n");
 	}
 
 	/* adjust self-refresh exit latency value */
@@ -611,11 +619,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
 }
 
 static void chv_set_cdclk(struct drm_i915_private *dev_priv,
-			  const struct intel_cdclk_state *cdclk_state,
+			  const struct intel_cdclk_config *cdclk_config,
 			  enum pipe pipe)
 {
-	int cdclk = cdclk_state->cdclk;
-	u32 val, cmd = cdclk_state->voltage_level;
+	int cdclk = cdclk_config->cdclk;
+	u32 val, cmd = cdclk_config->voltage_level;
 	intel_wakeref_t wakeref;
 
 	switch (cdclk) {
@@ -645,7 +653,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
 		     50)) {
-		DRM_ERROR("timed out waiting for CDclk change\n");
+		drm_err(&dev_priv->drm,
+			"timed out waiting for CDclk change\n");
 	}
 
 	vlv_punit_put(dev_priv);
@@ -685,68 +694,70 @@ static u8 bdw_calc_voltage_level(int cdclk)
 }
 
 static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
-	u32 lcpll = I915_READ(LCPLL_CTL);
+	u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
 	u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
-		cdclk_state->cdclk = 800000;
-	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
-		cdclk_state->cdclk = 450000;
+		cdclk_config->cdclk = 800000;
+	else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		cdclk_config->cdclk = 450000;
 	else if (freq == LCPLL_CLK_FREQ_450)
-		cdclk_state->cdclk = 450000;
+		cdclk_config->cdclk = 450000;
 	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
-		cdclk_state->cdclk = 540000;
+		cdclk_config->cdclk = 540000;
 	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
-		cdclk_state->cdclk = 337500;
+		cdclk_config->cdclk = 337500;
 	else
-		cdclk_state->cdclk = 675000;
+		cdclk_config->cdclk = 675000;
 
 	/*
 	 * Can't read this out :( Let's assume it's
 	 * at least what the CDCLK frequency requires.
 	 */
-	cdclk_state->voltage_level =
-		bdw_calc_voltage_level(cdclk_state->cdclk);
+	cdclk_config->voltage_level =
+		bdw_calc_voltage_level(cdclk_config->cdclk);
 }
 
 static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
-			  const struct intel_cdclk_state *cdclk_state,
+			  const struct intel_cdclk_config *cdclk_config,
 			  enum pipe pipe)
 {
-	int cdclk = cdclk_state->cdclk;
+	int cdclk = cdclk_config->cdclk;
 	u32 val;
 	int ret;
 
-	if (WARN((I915_READ(LCPLL_CTL) &
-		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
-		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
-		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
-		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
-		 "trying to change cdclk frequency with cdclk not enabled\n"))
+	if (drm_WARN(&dev_priv->drm,
+		     (intel_de_read(dev_priv, LCPLL_CTL) &
+		      (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+		       LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+		       LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+		       LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+		     "trying to change cdclk frequency with cdclk not enabled\n"))
 		return;
 
 	ret = sandybridge_pcode_write(dev_priv,
 				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
 	if (ret) {
-		DRM_ERROR("failed to inform pcode about cdclk change\n");
+		drm_err(&dev_priv->drm,
+			"failed to inform pcode about cdclk change\n");
 		return;
 	}
 
-	val = I915_READ(LCPLL_CTL);
+	val = intel_de_read(dev_priv, LCPLL_CTL);
 	val |= LCPLL_CD_SOURCE_FCLK;
-	I915_WRITE(LCPLL_CTL, val);
+	intel_de_write(dev_priv, LCPLL_CTL, val);
 
 	/*
 	 * According to the spec, it should be enough to poll for this 1 us.
 	 * However, extensive testing shows that this can take longer.
 	 */
-	if (wait_for_us(I915_READ(LCPLL_CTL) &
+	if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
 			LCPLL_CD_SOURCE_FCLK_DONE, 100))
-		DRM_ERROR("Switching to FCLK failed\n");
+		drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
 
-	val = I915_READ(LCPLL_CTL);
+	val = intel_de_read(dev_priv, LCPLL_CTL);
 	val &= ~LCPLL_CLK_FREQ_MASK;
 
 	switch (cdclk) {
@@ -767,20 +778,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
 		break;
 	}
 
-	I915_WRITE(LCPLL_CTL, val);
+	intel_de_write(dev_priv, LCPLL_CTL, val);
 
-	val = I915_READ(LCPLL_CTL);
+	val = intel_de_read(dev_priv, LCPLL_CTL);
 	val &= ~LCPLL_CD_SOURCE_FCLK;
-	I915_WRITE(LCPLL_CTL, val);
+	intel_de_write(dev_priv, LCPLL_CTL, val);
 
-	if (wait_for_us((I915_READ(LCPLL_CTL) &
-			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
-		DRM_ERROR("Switching back to LCPLL failed\n");
+	if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+			 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+		drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
 
 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				cdclk_state->voltage_level);
+				cdclk_config->voltage_level);
 
-	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+	intel_de_write(dev_priv, CDCLK_FREQ,
+		       DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
 
 	intel_update_cdclk(dev_priv);
 }
@@ -821,26 +833,27 @@ static u8 skl_calc_voltage_level(int cdclk)
 }
 
 static void skl_dpll0_update(struct drm_i915_private *dev_priv,
-			     struct intel_cdclk_state *cdclk_state)
+			     struct intel_cdclk_config *cdclk_config)
 {
 	u32 val;
 
-	cdclk_state->ref = 24000;
-	cdclk_state->vco = 0;
+	cdclk_config->ref = 24000;
+	cdclk_config->vco = 0;
 
-	val = I915_READ(LCPLL1_CTL);
+	val = intel_de_read(dev_priv, LCPLL1_CTL);
 	if ((val & LCPLL_PLL_ENABLE) == 0)
 		return;
 
-	if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+	if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
 		return;
 
-	val = I915_READ(DPLL_CTRL1);
+	val = intel_de_read(dev_priv, DPLL_CTRL1);
 
-	if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
-			    DPLL_CTRL1_SSC(SKL_DPLL0) |
-			    DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
-		    DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			(val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+				DPLL_CTRL1_SSC(SKL_DPLL0) |
+				DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+			DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
 		return;
 
 	switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
@@ -848,11 +861,11 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
-		cdclk_state->vco = 8100000;
+		cdclk_config->vco = 8100000;
 		break;
 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
-		cdclk_state->vco = 8640000;
+		cdclk_config->vco = 8640000;
 		break;
 	default:
 		MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -861,32 +874,32 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
 }
 
 static void skl_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
 	u32 cdctl;
 
-	skl_dpll0_update(dev_priv, cdclk_state);
+	skl_dpll0_update(dev_priv, cdclk_config);
 
-	cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+	cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
 
-	if (cdclk_state->vco == 0)
+	if (cdclk_config->vco == 0)
 		goto out;
 
-	cdctl = I915_READ(CDCLK_CTL);
+	cdctl = intel_de_read(dev_priv, CDCLK_CTL);
 
-	if (cdclk_state->vco == 8640000) {
+	if (cdclk_config->vco == 8640000) {
 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
 		case CDCLK_FREQ_450_432:
-			cdclk_state->cdclk = 432000;
+			cdclk_config->cdclk = 432000;
 			break;
 		case CDCLK_FREQ_337_308:
-			cdclk_state->cdclk = 308571;
+			cdclk_config->cdclk = 308571;
 			break;
 		case CDCLK_FREQ_540:
-			cdclk_state->cdclk = 540000;
+			cdclk_config->cdclk = 540000;
 			break;
 		case CDCLK_FREQ_675_617:
-			cdclk_state->cdclk = 617143;
+			cdclk_config->cdclk = 617143;
 			break;
 		default:
 			MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -895,16 +908,16 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
 	} else {
 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
 		case CDCLK_FREQ_450_432:
-			cdclk_state->cdclk = 450000;
+			cdclk_config->cdclk = 450000;
 			break;
 		case CDCLK_FREQ_337_308:
-			cdclk_state->cdclk = 337500;
+			cdclk_config->cdclk = 337500;
 			break;
 		case CDCLK_FREQ_540:
-			cdclk_state->cdclk = 540000;
+			cdclk_config->cdclk = 540000;
 			break;
 		case CDCLK_FREQ_675_617:
-			cdclk_state->cdclk = 675000;
+			cdclk_config->cdclk = 675000;
 			break;
 		default:
 			MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -917,8 +930,8 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
 	 * Can't read this out :( Let's assume it's
 	 * at least what the CDCLK frequency requires.
 	 */
-	cdclk_state->voltage_level =
-		skl_calc_voltage_level(cdclk_state->cdclk);
+	cdclk_config->voltage_level =
+		skl_calc_voltage_level(cdclk_config->cdclk);
 }
 
 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -942,7 +955,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 {
 	u32 val;
 
-	WARN_ON(vco != 8100000 && vco != 8640000);
+	drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
 
 	/*
 	 * We always enable DPLL0 with the lowest link rate possible, but still
@@ -953,7 +966,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 	 * rate later on, with the constraint of choosing a frequency that
 	 * works with vco.
 	 */
-	val = I915_READ(DPLL_CTRL1);
+	val = intel_de_read(dev_priv, DPLL_CTRL1);
 
 	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
 		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -965,13 +978,14 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
 					    SKL_DPLL0);
 
-	I915_WRITE(DPLL_CTRL1, val);
-	POSTING_READ(DPLL_CTRL1);
+	intel_de_write(dev_priv, DPLL_CTRL1, val);
+	intel_de_posting_read(dev_priv, DPLL_CTRL1);
 
-	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+	intel_de_write(dev_priv, LCPLL1_CTL,
+		       intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE);
 
 	if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
-		DRM_ERROR("DPLL0 not locked\n");
+		drm_err(&dev_priv->drm, "DPLL0 not locked\n");
 
 	dev_priv->cdclk.hw.vco = vco;
 
@@ -981,19 +995,20 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 
 static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+	intel_de_write(dev_priv, LCPLL1_CTL,
+		       intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
 	if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
-		DRM_ERROR("Couldn't disable DPLL0\n");
+		drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
 
 	dev_priv->cdclk.hw.vco = 0;
 }
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv,
-			  const struct intel_cdclk_state *cdclk_state,
+			  const struct intel_cdclk_config *cdclk_config,
 			  enum pipe pipe)
 {
-	int cdclk = cdclk_state->cdclk;
-	int vco = cdclk_state->vco;
+	int cdclk = cdclk_config->cdclk;
+	int vco = cdclk_config->vco;
 	u32 freq_select, cdclk_ctl;
 	int ret;
 
@@ -1005,23 +1020,25 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
 	 * use the corresponding VCO freq as that always leads to using the
 	 * minimum 308MHz CDCLK.
 	 */
-	WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+	drm_WARN_ON_ONCE(&dev_priv->drm,
+			 IS_SKYLAKE(dev_priv) && vco == 8640000);
 
 	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
 				SKL_CDCLK_PREPARE_FOR_CHANGE,
 				SKL_CDCLK_READY_FOR_CHANGE,
 				SKL_CDCLK_READY_FOR_CHANGE, 3);
 	if (ret) {
-		DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
-			  ret);
+		drm_err(&dev_priv->drm,
+			"Failed to inform PCU about cdclk change (%d)\n", ret);
 		return;
 	}
 
 	/* Choose frequency for this cdclk */
 	switch (cdclk) {
 	default:
-		WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
-		WARN_ON(vco != 0);
+		drm_WARN_ON(&dev_priv->drm,
+			    cdclk != dev_priv->cdclk.hw.bypass);
+		drm_WARN_ON(&dev_priv->drm, vco != 0);
 		/* fall through */
 	case 308571:
 	case 337500:
@@ -1044,38 +1061,38 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
 	    dev_priv->cdclk.hw.vco != vco)
 		skl_dpll0_disable(dev_priv);
 
-	cdclk_ctl = I915_READ(CDCLK_CTL);
+	cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
 
 	if (dev_priv->cdclk.hw.vco != vco) {
 		/* Wa Display #1183: skl,kbl,cfl */
 		cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
 		cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
-		I915_WRITE(CDCLK_CTL, cdclk_ctl);
+		intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
 	}
 
 	/* Wa Display #1183: skl,kbl,cfl */
 	cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
-	I915_WRITE(CDCLK_CTL, cdclk_ctl);
-	POSTING_READ(CDCLK_CTL);
+	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+	intel_de_posting_read(dev_priv, CDCLK_CTL);
 
 	if (dev_priv->cdclk.hw.vco != vco)
 		skl_dpll0_enable(dev_priv, vco);
 
 	/* Wa Display #1183: skl,kbl,cfl */
 	cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
-	I915_WRITE(CDCLK_CTL, cdclk_ctl);
+	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
 
 	cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
-	I915_WRITE(CDCLK_CTL, cdclk_ctl);
+	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
 
 	/* Wa Display #1183: skl,kbl,cfl */
 	cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
-	I915_WRITE(CDCLK_CTL, cdclk_ctl);
-	POSTING_READ(CDCLK_CTL);
+	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+	intel_de_posting_read(dev_priv, CDCLK_CTL);
 
 	/* inform PCU of the change */
 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-				cdclk_state->voltage_level);
+				cdclk_config->voltage_level);
 
 	intel_update_cdclk(dev_priv);
 }
@@ -1089,11 +1106,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	 * There is SWF18 scratchpad register defined which is set by the
 	 * pre-os which can be used by the OS drivers to check the status
 	 */
-	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+	if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
 		goto sanitize;
 
 	intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
 
 	/* Is PLL enabled and locked ? */
 	if (dev_priv->cdclk.hw.vco == 0 ||
@@ -1106,7 +1123,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	 * decimal part is programmed wrong from BIOS where pre-os does not
 	 * enable display. Verify the same as well.
 	 */
-	cdctl = I915_READ(CDCLK_CTL);
+	cdctl = intel_de_read(dev_priv, CDCLK_CTL);
 	expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
 		skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
 	if (cdctl == expected)
@@ -1114,7 +1131,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 		return;
 
 sanitize:
-	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+	drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
 
 	/* force cdclk programming */
 	dev_priv->cdclk.hw.cdclk = 0;
@@ -1122,9 +1139,9 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	dev_priv->cdclk.hw.vco = -1;
 }
 
-static void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
 {
-	struct intel_cdclk_state cdclk_state;
+	struct intel_cdclk_config cdclk_config;
 
 	skl_sanitize_cdclk(dev_priv);
 
@@ -1140,26 +1157,26 @@ static void skl_init_cdclk(struct drm_i915_private *dev_priv)
 		return;
 	}
 
-	cdclk_state = dev_priv->cdclk.hw;
+	cdclk_config = dev_priv->cdclk.hw;
 
-	cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
-	if (cdclk_state.vco == 0)
-		cdclk_state.vco = 8100000;
-	cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
-	cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+	cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+	if (cdclk_config.vco == 0)
+		cdclk_config.vco = 8100000;
+	cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
+	cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
 
-	skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
 }
 
-static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
 {
-	struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+	struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
 
-	cdclk_state.cdclk = cdclk_state.bypass;
-	cdclk_state.vco = 0;
-	cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+	cdclk_config.cdclk = cdclk_config.bypass;
+	cdclk_config.vco = 0;
+	cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
 
-	skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
 }
 
 static const struct intel_cdclk_vals bxt_cdclk_table[] = {
@@ -1223,8 +1240,9 @@ static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
 		    table[i].cdclk >= min_cdclk)
 			return table[i].cdclk;
 
-	WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
-	     min_cdclk, dev_priv->cdclk.hw.ref);
+	drm_WARN(&dev_priv->drm, 1,
+		 "Cannot satisfy minimum cdclk %d with refclk %u\n",
+		 min_cdclk, dev_priv->cdclk.hw.ref);
 	return 0;
 }
 
@@ -1241,8 +1259,8 @@ static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
 		    table[i].cdclk == cdclk)
 			return dev_priv->cdclk.hw.ref * table[i].ratio;
 
-	WARN(1, "cdclk %d not valid for refclk %u\n",
-	     cdclk, dev_priv->cdclk.hw.ref);
+	drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+		 cdclk, dev_priv->cdclk.hw.ref);
 	return 0;
 }
 
@@ -1283,56 +1301,68 @@ static u8 ehl_calc_voltage_level(int cdclk)
 		return 0;
 }
 
-static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
-			       struct intel_cdclk_state *cdclk_state)
+static u8 tgl_calc_voltage_level(int cdclk)
 {
-	if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
-		cdclk_state->ref = 24000;
+	if (cdclk > 556800)
+		return 3;
+	else if (cdclk > 326400)
+		return 2;
+	else if (cdclk > 312000)
+		return 1;
 	else
-		cdclk_state->ref = 19200;
+		return 0;
+}
+
+static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
+			       struct intel_cdclk_config *cdclk_config)
+{
+	if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+		cdclk_config->ref = 24000;
+	else
+		cdclk_config->ref = 19200;
 }
 
 static void icl_readout_refclk(struct drm_i915_private *dev_priv,
-			       struct intel_cdclk_state *cdclk_state)
+			       struct intel_cdclk_config *cdclk_config)
 {
-	u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
+	u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
 
 	switch (dssm) {
 	default:
 		MISSING_CASE(dssm);
 		/* fall through */
 	case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
-		cdclk_state->ref = 24000;
+		cdclk_config->ref = 24000;
 		break;
 	case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
-		cdclk_state->ref = 19200;
+		cdclk_config->ref = 19200;
 		break;
 	case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
-		cdclk_state->ref = 38400;
+		cdclk_config->ref = 38400;
 		break;
 	}
 }
 
 static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
-			       struct intel_cdclk_state *cdclk_state)
+			       struct intel_cdclk_config *cdclk_config)
 {
 	u32 val, ratio;
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		icl_readout_refclk(dev_priv, cdclk_state);
+		icl_readout_refclk(dev_priv, cdclk_config);
 	else if (IS_CANNONLAKE(dev_priv))
-		cnl_readout_refclk(dev_priv, cdclk_state);
+		cnl_readout_refclk(dev_priv, cdclk_config);
 	else
-		cdclk_state->ref = 19200;
+		cdclk_config->ref = 19200;
 
-	val = I915_READ(BXT_DE_PLL_ENABLE);
+	val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
 	if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
 	    (val & BXT_DE_PLL_LOCK) == 0) {
 		/*
 		 * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
 		 * setting it to zero is a way to signal that.
 		 */
-		cdclk_state->vco = 0;
+		cdclk_config->vco = 0;
 		return;
 	}
 
@@ -1343,47 +1373,49 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 10)
 		ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
 	else
-		ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+		ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
 
-	cdclk_state->vco = ratio * cdclk_state->ref;
+	cdclk_config->vco = ratio * cdclk_config->ref;
 }
 
 static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state)
+			  struct intel_cdclk_config *cdclk_config)
 {
 	u32 divider;
 	int div;
 
-	bxt_de_pll_readout(dev_priv, cdclk_state);
+	bxt_de_pll_readout(dev_priv, cdclk_config);
 
 	if (INTEL_GEN(dev_priv) >= 12)
-		cdclk_state->bypass = cdclk_state->ref / 2;
+		cdclk_config->bypass = cdclk_config->ref / 2;
 	else if (INTEL_GEN(dev_priv) >= 11)
-		cdclk_state->bypass = 50000;
+		cdclk_config->bypass = 50000;
 	else
-		cdclk_state->bypass = cdclk_state->ref;
+		cdclk_config->bypass = cdclk_config->ref;
 
-	if (cdclk_state->vco == 0) {
-		cdclk_state->cdclk = cdclk_state->bypass;
+	if (cdclk_config->vco == 0) {
+		cdclk_config->cdclk = cdclk_config->bypass;
 		goto out;
 	}
 
-	divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+	divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
 
 	switch (divider) {
 	case BXT_CDCLK_CD2X_DIV_SEL_1:
 		div = 2;
 		break;
 	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
-		WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
-		     "Unsupported divider\n");
+		drm_WARN(&dev_priv->drm,
+			 IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+			 "Unsupported divider\n");
 		div = 3;
 		break;
 	case BXT_CDCLK_CD2X_DIV_SEL_2:
 		div = 4;
 		break;
 	case BXT_CDCLK_CD2X_DIV_SEL_4:
-		WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+		drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+			 "Unsupported divider\n");
 		div = 8;
 		break;
 	default:
@@ -1391,25 +1423,25 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+	cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
 
  out:
 	/*
 	 * Can't read this out :( Let's assume it's
 	 * at least what the CDCLK frequency requires.
 	 */
-	cdclk_state->voltage_level =
-		dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
+	cdclk_config->voltage_level =
+		dev_priv->display.calc_voltage_level(cdclk_config->cdclk);
 }
 
 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
 
 	/* Timeout 200us */
 	if (intel_de_wait_for_clear(dev_priv,
 				    BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
-		DRM_ERROR("timeout waiting for DE PLL unlock\n");
+		drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
 
 	dev_priv->cdclk.hw.vco = 0;
 }
@@ -1419,17 +1451,17 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
 	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
 	u32 val;
 
-	val = I915_READ(BXT_DE_PLL_CTL);
+	val = intel_de_read(dev_priv, BXT_DE_PLL_CTL);
 	val &= ~BXT_DE_PLL_RATIO_MASK;
 	val |= BXT_DE_PLL_RATIO(ratio);
-	I915_WRITE(BXT_DE_PLL_CTL, val);
+	intel_de_write(dev_priv, BXT_DE_PLL_CTL, val);
 
-	I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
 
 	/* Timeout 200us */
 	if (intel_de_wait_for_set(dev_priv,
 				  BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
-		DRM_ERROR("timeout waiting for DE PLL lock\n");
+		drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
 
 	dev_priv->cdclk.hw.vco = vco;
 }
@@ -1438,13 +1470,14 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	val = I915_READ(BXT_DE_PLL_ENABLE);
+	val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
 	val &= ~BXT_DE_PLL_PLL_ENABLE;
-	I915_WRITE(BXT_DE_PLL_ENABLE, val);
+	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
 
 	/* Timeout 200us */
-	if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
-		DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
+	if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+		drm_err(&dev_priv->drm,
+			"timeout waiting for CDCLK PLL unlock\n");
 
 	dev_priv->cdclk.hw.vco = 0;
 }
@@ -1455,14 +1488,15 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
 	u32 val;
 
 	val = CNL_CDCLK_PLL_RATIO(ratio);
-	I915_WRITE(BXT_DE_PLL_ENABLE, val);
+	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
 
 	val |= BXT_DE_PLL_PLL_ENABLE;
-	I915_WRITE(BXT_DE_PLL_ENABLE, val);
+	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
 
 	/* Timeout 200us */
-	if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
-		DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
+	if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+		drm_err(&dev_priv->drm,
+			"timeout waiting for CDCLK PLL lock\n");
 
 	dev_priv->cdclk.hw.vco = vco;
 }
@@ -1488,11 +1522,11 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
 }
 
 static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
-			  const struct intel_cdclk_state *cdclk_state,
+			  const struct intel_cdclk_config *cdclk_config,
 			  enum pipe pipe)
 {
-	int cdclk = cdclk_state->cdclk;
-	int vco = cdclk_state->vco;
+	int cdclk = cdclk_config->cdclk;
+	int vco = cdclk_config->vco;
 	u32 val, divider;
 	int ret;
 
@@ -1512,30 +1546,34 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 						      0x80000000, 150, 2);
 
 	if (ret) {
-		DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
-			  ret, cdclk);
+		drm_err(&dev_priv->drm,
+			"Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+			ret, cdclk);
 		return;
 	}
 
 	/* cdclk = vco / 2 / div{1,1.5,2,4} */
 	switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
 	default:
-		WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
-		WARN_ON(vco != 0);
+		drm_WARN_ON(&dev_priv->drm,
+			    cdclk != dev_priv->cdclk.hw.bypass);
+		drm_WARN_ON(&dev_priv->drm, vco != 0);
 		/* fall through */
 	case 2:
 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
 		break;
 	case 3:
-		WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
-		     "Unsupported divider\n");
+		drm_WARN(&dev_priv->drm,
+			 IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+			 "Unsupported divider\n");
 		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
 		break;
 	case 4:
 		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
 		break;
 	case 8:
-		WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+		drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+			 "Unsupported divider\n");
 		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
 		break;
 	}
@@ -1566,14 +1604,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 	 */
 	if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
 		val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-	I915_WRITE(CDCLK_CTL, val);
+	intel_de_write(dev_priv, CDCLK_CTL, val);
 
 	if (pipe != INVALID_PIPE)
 		intel_wait_for_vblank(dev_priv, pipe);
 
 	if (INTEL_GEN(dev_priv) >= 10) {
 		ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
-					      cdclk_state->voltage_level);
+					      cdclk_config->voltage_level);
 	} else {
 		/*
 		 * The timeout isn't specified, the 2ms used here is based on
@@ -1583,13 +1621,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 		 */
 		ret = sandybridge_pcode_write_timeout(dev_priv,
 						      HSW_PCODE_DE_WRITE_FREQ_REQ,
-						      cdclk_state->voltage_level,
+						      cdclk_config->voltage_level,
 						      150, 2);
 	}
 
 	if (ret) {
-		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
-			  ret, cdclk);
+		drm_err(&dev_priv->drm,
+			"PCode CDCLK freq set failed, (err %d, freq %d)\n",
+			ret, cdclk);
 		return;
 	}
 
@@ -1600,7 +1639,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 		 * Can't read out the voltage level :(
 		 * Let's just assume everything is as expected.
 		 */
-		dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+		dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
 }
 
 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1609,7 +1648,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	int cdclk, vco;
 
 	intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
 
 	if (dev_priv->cdclk.hw.vco == 0 ||
 	    dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
@@ -1621,7 +1660,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
 	 * so sanitize this register.
 	 */
-	cdctl = I915_READ(CDCLK_CTL);
+	cdctl = intel_de_read(dev_priv, CDCLK_CTL);
 	/*
 	 * Let's ignore the pipe field, since BIOS could have configured the
 	 * dividers both synching to an active pipe, or asynchronously
@@ -1672,7 +1711,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 		return;
 
 sanitize:
-	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+	drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
 
 	/* force cdclk programming */
 	dev_priv->cdclk.hw.cdclk = 0;
@@ -1681,9 +1720,9 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 	dev_priv->cdclk.hw.vco = -1;
 }
 
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
 {
-	struct intel_cdclk_state cdclk_state;
+	struct intel_cdclk_config cdclk_config;
 
 	bxt_sanitize_cdclk(dev_priv);
 
@@ -1691,35 +1730,35 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
 	    dev_priv->cdclk.hw.vco != 0)
 		return;
 
-	cdclk_state = dev_priv->cdclk.hw;
+	cdclk_config = dev_priv->cdclk.hw;
 
 	/*
 	 * FIXME:
 	 * - The initial CDCLK needs to be read from VBT.
 	 *   Need to make this change after VBT has changes for BXT.
 	 */
-	cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
-	cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
-	cdclk_state.voltage_level =
-		dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+	cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
+	cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
+	cdclk_config.voltage_level =
+		dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
 
-	bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
 }
 
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
 {
-	struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+	struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
 
-	cdclk_state.cdclk = cdclk_state.bypass;
-	cdclk_state.vco = 0;
-	cdclk_state.voltage_level =
-		dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+	cdclk_config.cdclk = cdclk_config.bypass;
+	cdclk_config.vco = 0;
+	cdclk_config.voltage_level =
+		dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
 
-	bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+	bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
 }
 
 /**
- * intel_cdclk_init - Initialize CDCLK
+ * intel_cdclk_init_hw - Initialize CDCLK hardware
  * @i915: i915 device
  *
  * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
@@ -1727,39 +1766,41 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
  * during the display core initialization sequence, after which the DMC will
  * take care of turning CDCLK off/on as needed.
  */
-void intel_cdclk_init(struct drm_i915_private *i915)
+void intel_cdclk_init_hw(struct drm_i915_private *i915)
 {
 	if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
-		bxt_init_cdclk(i915);
+		bxt_cdclk_init_hw(i915);
 	else if (IS_GEN9_BC(i915))
-		skl_init_cdclk(i915);
+		skl_cdclk_init_hw(i915);
 }
 
 /**
- * intel_cdclk_uninit - Uninitialize CDCLK
+ * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
  * @i915: i915 device
  *
  * Uninitialize CDCLK. This is done only during the display core
  * uninitialization sequence.
  */
-void intel_cdclk_uninit(struct drm_i915_private *i915)
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
 {
 	if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
-		bxt_uninit_cdclk(i915);
+		bxt_cdclk_uninit_hw(i915);
 	else if (IS_GEN9_BC(i915))
-		skl_uninit_cdclk(i915);
+		skl_cdclk_uninit_hw(i915);
 }
 
 /**
- * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
+ *                             configurations requires a modeset on all pipes
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
  *
  * Returns:
- * True if the CDCLK states require pipes to be off during reprogramming, false if not.
+ * True if changing between the two CDCLK configurations
+ * requires all pipes to be off, false if not.
  */
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
-			       const struct intel_cdclk_state *b)
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+			       const struct intel_cdclk_config *b)
 {
 	return a->cdclk != b->cdclk ||
 		a->vco != b->vco ||
@@ -1767,17 +1808,19 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
 }
 
 /**
- * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
- * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
+ *                               configurations requires only a cd2x divider update
+ * @dev_priv: i915 device
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
  *
  * Returns:
- * True if the CDCLK states require just a cd2x divider update, false if not.
+ * True if changing between the two CDCLK configurations
+ * can be done with just a cd2x divider update, false if not.
  */
-static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
-					  const struct intel_cdclk_state *a,
-					  const struct intel_cdclk_state *b)
+static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
+					const struct intel_cdclk_config *a,
+					const struct intel_cdclk_config *b)
 {
 	/* Older hw doesn't have the capability */
 	if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -1789,117 +1832,138 @@ static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
 }
 
 /**
- * intel_cdclk_changed - Determine if two CDCLK states are different
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_changed - Determine if two CDCLK configurations are different
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
  *
  * Returns:
- * True if the CDCLK states don't match, false if they do.
+ * True if the CDCLK configurations don't match, false if they do.
  */
-static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
-				const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
+				const struct intel_cdclk_config *b)
 {
 	return intel_cdclk_needs_modeset(a, b) ||
 		a->voltage_level != b->voltage_level;
 }
 
-/**
- * intel_cdclk_swap_state - make atomic CDCLK configuration effective
- * @state: atomic state
- *
- * This is the CDCLK version of drm_atomic_helper_swap_state() since the
- * helper does not handle driver-specific global state.
- *
- * Similarly to the atomic helpers this function does a complete swap,
- * i.e. it also puts the old state into @state. This is used by the commit
- * code to determine how CDCLK has changed (for instance did it increase or
- * decrease).
- */
-void intel_cdclk_swap_state(struct intel_atomic_state *state)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
-	swap(state->cdclk.logical, dev_priv->cdclk.logical);
-	swap(state->cdclk.actual, dev_priv->cdclk.actual);
-}
-
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
-			    const char *context)
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+			     const char *context)
 {
 	DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
-			 context, cdclk_state->cdclk, cdclk_state->vco,
-			 cdclk_state->ref, cdclk_state->bypass,
-			 cdclk_state->voltage_level);
+			 context, cdclk_config->cdclk, cdclk_config->vco,
+			 cdclk_config->ref, cdclk_config->bypass,
+			 cdclk_config->voltage_level);
 }
 
 /**
- * intel_set_cdclk - Push the CDCLK state to the hardware
+ * intel_set_cdclk - Push the CDCLK configuration to the hardware
  * @dev_priv: i915 device
- * @cdclk_state: new CDCLK state
+ * @cdclk_config: new CDCLK configuration
  * @pipe: pipe with which to synchronize the update
  *
  * Program the hardware based on the passed in CDCLK state,
  * if necessary.
  */
 static void intel_set_cdclk(struct drm_i915_private *dev_priv,
-			    const struct intel_cdclk_state *cdclk_state,
+			    const struct intel_cdclk_config *cdclk_config,
 			    enum pipe pipe)
 {
-	if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
+	struct intel_encoder *encoder;
+
+	if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
 		return;
 
-	if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
 		return;
 
-	intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
+	intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
 
-	dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
+	/*
+	 * Lock aux/gmbus while we change cdclk in case those
+	 * functions use cdclk. Not all platforms/ports do,
+	 * but we'll lock them all for simplicity.
+	 */
+	mutex_lock(&dev_priv->gmbus_mutex);
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-	if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
-		 "cdclk state doesn't match!\n")) {
-		intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
-		intel_dump_cdclk_state(cdclk_state, "[sw state]");
+		mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+				     &dev_priv->gmbus_mutex);
+	}
+
+	dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
+
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		mutex_unlock(&intel_dp->aux.hw_mutex);
+	}
+	mutex_unlock(&dev_priv->gmbus_mutex);
+
+	if (drm_WARN(&dev_priv->drm,
+		     intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+		     "cdclk state doesn't match!\n")) {
+		intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]");
+		intel_dump_cdclk_config(cdclk_config, "[sw state]");
 	}
 }
 
 /**
  * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
  *
- * Program the hardware before updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware before updating the HW plane state based on the
+ * new CDCLK state, if necessary.
  */
 void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
-				 const struct intel_cdclk_state *old_state,
-				 const struct intel_cdclk_state *new_state,
-				 enum pipe pipe)
+intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
 {
-	if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
-		intel_set_cdclk(dev_priv, new_state, pipe);
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_cdclk_state *old_cdclk_state =
+		intel_atomic_get_old_cdclk_state(state);
+	const struct intel_cdclk_state *new_cdclk_state =
+		intel_atomic_get_new_cdclk_state(state);
+	enum pipe pipe = new_cdclk_state->pipe;
+
+	if (!intel_cdclk_changed(&old_cdclk_state->actual,
+				 &new_cdclk_state->actual))
+		return;
+
+	if (pipe == INVALID_PIPE ||
+	    old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+		drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+		intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+	}
 }
 
 /**
  * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
  *
- * Program the hardware after updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware after updating the HW plane state based on the
+ * new CDCLK state, if necessary.
  */
 void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
-				  const struct intel_cdclk_state *old_state,
-				  const struct intel_cdclk_state *new_state,
-				  enum pipe pipe)
+intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
 {
-	if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
-		intel_set_cdclk(dev_priv, new_state, pipe);
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_cdclk_state *old_cdclk_state =
+		intel_atomic_get_old_cdclk_state(state);
+	const struct intel_cdclk_state *new_cdclk_state =
+		intel_atomic_get_new_cdclk_state(state);
+	enum pipe pipe = new_cdclk_state->pipe;
+
+	if (!intel_cdclk_changed(&old_cdclk_state->actual,
+				 &new_cdclk_state->actual))
+		return;
+
+	if (pipe != INVALID_PIPE &&
+	    old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+		drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+		intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+	}
 }
 
 static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2017,25 +2081,24 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
 		min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
 
 	if (min_cdclk > dev_priv->max_cdclk_freq) {
-		DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
-			      min_cdclk, dev_priv->max_cdclk_freq);
+		drm_dbg_kms(&dev_priv->drm,
+			    "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+			    min_cdclk, dev_priv->max_cdclk_freq);
 		return -EINVAL;
 	}
 
 	return min_cdclk;
 }
 
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
 {
+	struct intel_atomic_state *state = cdclk_state->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *crtc_state;
 	int min_cdclk, i;
 	enum pipe pipe;
 
-	memcpy(state->min_cdclk, dev_priv->min_cdclk,
-	       sizeof(state->min_cdclk));
-
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		int ret;
 
@@ -2043,19 +2106,19 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
 		if (min_cdclk < 0)
 			return min_cdclk;
 
-		if (state->min_cdclk[i] == min_cdclk)
+		if (cdclk_state->min_cdclk[i] == min_cdclk)
 			continue;
 
-		state->min_cdclk[i] = min_cdclk;
+		cdclk_state->min_cdclk[i] = min_cdclk;
 
-		ret = intel_atomic_lock_global_state(state);
+		ret = intel_atomic_lock_global_state(&cdclk_state->base);
 		if (ret)
 			return ret;
 	}
 
-	min_cdclk = state->cdclk.force_min_cdclk;
+	min_cdclk = cdclk_state->force_min_cdclk;
 	for_each_pipe(dev_priv, pipe)
-		min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
+		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
 
 	return min_cdclk;
 }
@@ -2073,8 +2136,9 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
  * future platforms this code will need to be
  * adjusted.
  */
-static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
 {
+	struct intel_atomic_state *state = cdclk_state->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *crtc_state;
@@ -2082,9 +2146,6 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
 	int i;
 	enum pipe pipe;
 
-	memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
-	       sizeof(state->min_voltage_level));
-
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		int ret;
 
@@ -2093,57 +2154,58 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
 		else
 			min_voltage_level = 0;
 
-		if (state->min_voltage_level[i] == min_voltage_level)
+		if (cdclk_state->min_voltage_level[i] == min_voltage_level)
 			continue;
 
-		state->min_voltage_level[i] = min_voltage_level;
+		cdclk_state->min_voltage_level[i] = min_voltage_level;
 
-		ret = intel_atomic_lock_global_state(state);
+		ret = intel_atomic_lock_global_state(&cdclk_state->base);
 		if (ret)
 			return ret;
 	}
 
 	min_voltage_level = 0;
 	for_each_pipe(dev_priv, pipe)
-		min_voltage_level = max(state->min_voltage_level[pipe],
+		min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
 					min_voltage_level);
 
 	return min_voltage_level;
 }
 
-static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
 {
+	struct intel_atomic_state *state = cdclk_state->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	int min_cdclk, cdclk;
 
-	min_cdclk = intel_compute_min_cdclk(state);
+	min_cdclk = intel_compute_min_cdclk(cdclk_state);
 	if (min_cdclk < 0)
 		return min_cdclk;
 
 	cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
 
-	state->cdclk.logical.cdclk = cdclk;
-	state->cdclk.logical.voltage_level =
+	cdclk_state->logical.cdclk = cdclk;
+	cdclk_state->logical.voltage_level =
 		vlv_calc_voltage_level(dev_priv, cdclk);
 
-	if (!state->active_pipes) {
-		cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+	if (!cdclk_state->active_pipes) {
+		cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
 
-		state->cdclk.actual.cdclk = cdclk;
-		state->cdclk.actual.voltage_level =
+		cdclk_state->actual.cdclk = cdclk;
+		cdclk_state->actual.voltage_level =
 			vlv_calc_voltage_level(dev_priv, cdclk);
 	} else {
-		state->cdclk.actual = state->cdclk.logical;
+		cdclk_state->actual = cdclk_state->logical;
 	}
 
 	return 0;
 }
 
-static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
 {
 	int min_cdclk, cdclk;
 
-	min_cdclk = intel_compute_min_cdclk(state);
+	min_cdclk = intel_compute_min_cdclk(cdclk_state);
 	if (min_cdclk < 0)
 		return min_cdclk;
 
@@ -2153,31 +2215,32 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
 	 */
 	cdclk = bdw_calc_cdclk(min_cdclk);
 
-	state->cdclk.logical.cdclk = cdclk;
-	state->cdclk.logical.voltage_level =
+	cdclk_state->logical.cdclk = cdclk;
+	cdclk_state->logical.voltage_level =
 		bdw_calc_voltage_level(cdclk);
 
-	if (!state->active_pipes) {
-		cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
+	if (!cdclk_state->active_pipes) {
+		cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk);
 
-		state->cdclk.actual.cdclk = cdclk;
-		state->cdclk.actual.voltage_level =
+		cdclk_state->actual.cdclk = cdclk;
+		cdclk_state->actual.voltage_level =
 			bdw_calc_voltage_level(cdclk);
 	} else {
-		state->cdclk.actual = state->cdclk.logical;
+		cdclk_state->actual = cdclk_state->logical;
 	}
 
 	return 0;
 }
 
-static int skl_dpll0_vco(struct intel_atomic_state *state)
+static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
 {
+	struct intel_atomic_state *state = cdclk_state->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *crtc_state;
 	int vco, i;
 
-	vco = state->cdclk.logical.vco;
+	vco = cdclk_state->logical.vco;
 	if (!vco)
 		vco = dev_priv->skl_preferred_vco_freq;
 
@@ -2206,15 +2269,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
 	return vco;
 }
 
-static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
 {
 	int min_cdclk, cdclk, vco;
 
-	min_cdclk = intel_compute_min_cdclk(state);
+	min_cdclk = intel_compute_min_cdclk(cdclk_state);
 	if (min_cdclk < 0)
 		return min_cdclk;
 
-	vco = skl_dpll0_vco(state);
+	vco = skl_dpll0_vco(cdclk_state);
 
 	/*
 	 * FIXME should also account for plane ratio
@@ -2222,57 +2285,58 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
 	 */
 	cdclk = skl_calc_cdclk(min_cdclk, vco);
 
-	state->cdclk.logical.vco = vco;
-	state->cdclk.logical.cdclk = cdclk;
-	state->cdclk.logical.voltage_level =
+	cdclk_state->logical.vco = vco;
+	cdclk_state->logical.cdclk = cdclk;
+	cdclk_state->logical.voltage_level =
 		skl_calc_voltage_level(cdclk);
 
-	if (!state->active_pipes) {
-		cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
+	if (!cdclk_state->active_pipes) {
+		cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco);
 
-		state->cdclk.actual.vco = vco;
-		state->cdclk.actual.cdclk = cdclk;
-		state->cdclk.actual.voltage_level =
+		cdclk_state->actual.vco = vco;
+		cdclk_state->actual.cdclk = cdclk;
+		cdclk_state->actual.voltage_level =
 			skl_calc_voltage_level(cdclk);
 	} else {
-		state->cdclk.actual = state->cdclk.logical;
+		cdclk_state->actual = cdclk_state->logical;
 	}
 
 	return 0;
 }
 
-static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
 {
+	struct intel_atomic_state *state = cdclk_state->base.state;
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	int min_cdclk, min_voltage_level, cdclk, vco;
 
-	min_cdclk = intel_compute_min_cdclk(state);
+	min_cdclk = intel_compute_min_cdclk(cdclk_state);
 	if (min_cdclk < 0)
 		return min_cdclk;
 
-	min_voltage_level = bxt_compute_min_voltage_level(state);
+	min_voltage_level = bxt_compute_min_voltage_level(cdclk_state);
 	if (min_voltage_level < 0)
 		return min_voltage_level;
 
 	cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
 	vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
 
-	state->cdclk.logical.vco = vco;
-	state->cdclk.logical.cdclk = cdclk;
-	state->cdclk.logical.voltage_level =
+	cdclk_state->logical.vco = vco;
+	cdclk_state->logical.cdclk = cdclk;
+	cdclk_state->logical.voltage_level =
 		max_t(int, min_voltage_level,
 		      dev_priv->display.calc_voltage_level(cdclk));
 
-	if (!state->active_pipes) {
-		cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+	if (!cdclk_state->active_pipes) {
+		cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
 		vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
 
-		state->cdclk.actual.vco = vco;
-		state->cdclk.actual.cdclk = cdclk;
-		state->cdclk.actual.voltage_level =
+		cdclk_state->actual.vco = vco;
+		cdclk_state->actual.cdclk = cdclk;
+		cdclk_state->actual.voltage_level =
 			dev_priv->display.calc_voltage_level(cdclk);
 	} else {
-		state->cdclk.actual = state->cdclk.logical;
+		cdclk_state->actual = cdclk_state->logical;
 	}
 
 	return 0;
@@ -2317,7 +2381,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
 	return 0;
 }
 
-static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
 {
 	int min_cdclk;
 
@@ -2326,54 +2390,113 @@ static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
 	 * check that the required minimum frequency doesn't exceed
 	 * the actual cdclk frequency.
 	 */
-	min_cdclk = intel_compute_min_cdclk(state);
+	min_cdclk = intel_compute_min_cdclk(cdclk_state);
 	if (min_cdclk < 0)
 		return min_cdclk;
 
 	return 0;
 }
 
+static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj)
+{
+	struct intel_cdclk_state *cdclk_state;
+
+	cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL);
+	if (!cdclk_state)
+		return NULL;
+
+	cdclk_state->force_min_cdclk_changed = false;
+	cdclk_state->pipe = INVALID_PIPE;
+
+	return &cdclk_state->base;
+}
+
+static void intel_cdclk_destroy_state(struct intel_global_obj *obj,
+				      struct intel_global_state *state)
+{
+	kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_cdclk_funcs = {
+	.atomic_duplicate_state = intel_cdclk_duplicate_state,
+	.atomic_destroy_state = intel_cdclk_destroy_state,
+};
+
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_global_state *cdclk_state;
+
+	cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+	if (IS_ERR(cdclk_state))
+		return ERR_CAST(cdclk_state);
+
+	return to_intel_cdclk_state(cdclk_state);
+}
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv)
+{
+	struct intel_cdclk_state *cdclk_state;
+
+	cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
+	if (!cdclk_state)
+		return -ENOMEM;
+
+	intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+				     &cdclk_state->base, &intel_cdclk_funcs);
+
+	return 0;
+}
+
 int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	const struct intel_cdclk_state *old_cdclk_state;
+	struct intel_cdclk_state *new_cdclk_state;
 	enum pipe pipe;
 	int ret;
 
-	ret = dev_priv->display.modeset_calc_cdclk(state);
+	new_cdclk_state = intel_atomic_get_cdclk_state(state);
+	if (IS_ERR(new_cdclk_state))
+		return PTR_ERR(new_cdclk_state);
+
+	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+	new_cdclk_state->active_pipes =
+		intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+	ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state);
 	if (ret)
 		return ret;
 
-	/*
-	 * Writes to dev_priv->cdclk.{actual,logical} must protected
-	 * by holding all the crtc mutexes even if we don't end up
-	 * touching the hardware
-	 */
-	if (intel_cdclk_changed(&dev_priv->cdclk.actual,
-				&state->cdclk.actual)) {
+	if (intel_cdclk_changed(&old_cdclk_state->actual,
+				&new_cdclk_state->actual)) {
 		/*
 		 * Also serialize commits across all crtcs
 		 * if the actual hw needs to be poked.
 		 */
-		ret = intel_atomic_serialize_global_state(state);
+		ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
 		if (ret)
 			return ret;
-	} else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
-				       &state->cdclk.logical)) {
-		ret = intel_atomic_lock_global_state(state);
+	} else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+		   intel_cdclk_changed(&old_cdclk_state->logical,
+				       &new_cdclk_state->logical)) {
+		ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
 		if (ret)
 			return ret;
 	} else {
 		return 0;
 	}
 
-	if (is_power_of_2(state->active_pipes) &&
-	    intel_cdclk_needs_cd2x_update(dev_priv,
-					  &dev_priv->cdclk.actual,
-					  &state->cdclk.actual)) {
+	if (is_power_of_2(new_cdclk_state->active_pipes) &&
+	    intel_cdclk_can_cd2x_update(dev_priv,
+					&old_cdclk_state->actual,
+					&new_cdclk_state->actual)) {
 		struct intel_crtc *crtc;
 		struct intel_crtc_state *crtc_state;
 
-		pipe = ilog2(state->active_pipes);
+		pipe = ilog2(new_cdclk_state->active_pipes);
 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -2387,28 +2510,32 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
 	}
 
 	if (pipe != INVALID_PIPE) {
-		state->cdclk.pipe = pipe;
+		new_cdclk_state->pipe = pipe;
 
-		DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
-			      pipe_name(pipe));
-	} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
-					     &state->cdclk.actual)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Can change cdclk with pipe %c active\n",
+			    pipe_name(pipe));
+	} else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+					     &new_cdclk_state->actual)) {
 		/* All pipes must be switched off while we change the cdclk. */
 		ret = intel_modeset_all_pipes(state);
 		if (ret)
 			return ret;
 
-		state->cdclk.pipe = INVALID_PIPE;
+		new_cdclk_state->pipe = INVALID_PIPE;
 
-		DRM_DEBUG_KMS("Modeset required for cdclk change\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Modeset required for cdclk change\n");
 	}
 
-	DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
-		      state->cdclk.logical.cdclk,
-		      state->cdclk.actual.cdclk);
-	DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
-		      state->cdclk.logical.voltage_level,
-		      state->cdclk.actual.voltage_level);
+	drm_dbg_kms(&dev_priv->drm,
+		    "New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+		    new_cdclk_state->logical.cdclk,
+		    new_cdclk_state->actual.cdclk);
+	drm_dbg_kms(&dev_priv->drm,
+		    "New voltage level calculated to be logical %u, actual %u\n",
+		    new_cdclk_state->logical.voltage_level,
+		    new_cdclk_state->actual.voltage_level);
 
 	return 0;
 }
@@ -2453,11 +2580,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 	} else if (IS_CANNONLAKE(dev_priv)) {
 		dev_priv->max_cdclk_freq = 528000;
 	} else if (IS_GEN9_BC(dev_priv)) {
-		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+		u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
 		int max_cdclk, vco;
 
 		vco = dev_priv->skl_preferred_vco_freq;
-		WARN_ON(vco != 8100000 && vco != 8640000);
+		drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
 
 		/*
 		 * Use the lower (vco 8640) cdclk values as a
@@ -2485,7 +2612,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 		 * How can we know if extra cooling is
 		 * available? PCI ID, VTB, something else?
 		 */
-		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
 			dev_priv->max_cdclk_freq = 450000;
 		else if (IS_BDW_ULX(dev_priv))
 			dev_priv->max_cdclk_freq = 450000;
@@ -2504,11 +2631,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
 
 	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
 
-	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
-			 dev_priv->max_cdclk_freq);
+	drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
+		dev_priv->max_cdclk_freq);
 
-	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
-			 dev_priv->max_dotclk_freq);
+	drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
+		dev_priv->max_dotclk_freq);
 }
 
 /**
@@ -2528,8 +2655,8 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
 	 * generate GMBus clock. This will vary with the cdclk freq.
 	 */
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		I915_WRITE(GMBUSFREQ_VLV,
-			   DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+		intel_de_write(dev_priv, GMBUSFREQ_VLV,
+		               DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
 }
 
 static int cnp_rawclk(struct drm_i915_private *dev_priv)
@@ -2537,7 +2664,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
 	u32 rawclk;
 	int divider, fraction;
 
-	if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+	if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
 		/* 24 MHz */
 		divider = 24000;
 		fraction = 0;
@@ -2557,13 +2684,13 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
 			rawclk |= ICP_RAWCLK_NUM(numerator);
 	}
 
-	I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
+	intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
 	return divider + fraction;
 }
 
 static int pch_rawclk(struct drm_i915_private *dev_priv)
 {
-	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+	return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
 }
 
 static int vlv_hrawclk(struct drm_i915_private *dev_priv)
@@ -2578,7 +2705,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
 	u32 clkcfg;
 
 	/* hrawclock is 1/4 the FSB frequency */
-	clkcfg = I915_READ(CLKCFG);
+	clkcfg = intel_de_read(dev_priv, CLKCFG);
 	switch (clkcfg & CLKCFG_FSB_MASK) {
 	case CLKCFG_FSB_400:
 		return 100000;
@@ -2600,27 +2727,29 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
 }
 
 /**
- * intel_update_rawclk - Determine the current RAWCLK frequency
+ * intel_read_rawclk - Determine the current RAWCLK frequency
  * @dev_priv: i915 device
  *
  * Determine the current RAWCLK frequency. RAWCLK is a fixed
  * frequency clock so this needs to done only once.
  */
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
 {
+	u32 freq;
+
 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
-		dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
+		freq = cnp_rawclk(dev_priv);
 	else if (HAS_PCH_SPLIT(dev_priv))
-		dev_priv->rawclk_freq = pch_rawclk(dev_priv);
+		freq = pch_rawclk(dev_priv);
 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
+		freq = vlv_hrawclk(dev_priv);
 	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
-		dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+		freq = g4x_hrawclk(dev_priv);
 	else
 		/* no rawclk on other platforms, or no need to know it */
-		return;
+		return 0;
 
-	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+	return freq;
 }
 
 /**
@@ -2629,7 +2758,12 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
  */
 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
 {
-	if (IS_ELKHARTLAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 12) {
+		dev_priv->display.set_cdclk = bxt_set_cdclk;
+		dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+		dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+		dev_priv->cdclk.table = icl_cdclk_table;
+	} else if (IS_ELKHARTLAKE(dev_priv)) {
 		dev_priv->display.set_cdclk = bxt_set_cdclk;
 		dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
 		dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
@@ -2709,8 +2843,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
 	else if (IS_I845G(dev_priv))
 		dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
 	else { /* 830 */
-		WARN(!IS_I830(dev_priv),
-		     "Unknown platform. Assuming 133 MHz CDCLK\n");
+		drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
+			 "Unknown platform. Assuming 133 MHz CDCLK\n");
 		dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
 	}
 }
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cf71394..5731806 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,11 +8,12 @@
 
 #include <linux/types.h>
 
+#include "i915_drv.h"
 #include "intel_display.h"
+#include "intel_global_state.h"
 
 struct drm_i915_private;
 struct intel_atomic_state;
-struct intel_cdclk_state;
 struct intel_crtc_state;
 
 struct intel_cdclk_vals {
@@ -22,28 +23,62 @@ struct intel_cdclk_vals {
 	u8 ratio;
 };
 
+struct intel_cdclk_state {
+	struct intel_global_state base;
+
+	/*
+	 * Logical configuration of cdclk (used for all scaling,
+	 * watermark, etc. calculations and checks). This is
+	 * computed as if all enabled crtcs were active.
+	 */
+	struct intel_cdclk_config logical;
+
+	/*
+	 * Actual configuration of cdclk, can be different from the
+	 * logical configuration only when all crtc's are DPMS off.
+	 */
+	struct intel_cdclk_config actual;
+
+	/* minimum acceptable cdclk for each pipe */
+	int min_cdclk[I915_MAX_PIPES];
+	/* minimum acceptable voltage level for each pipe */
+	u8 min_voltage_level[I915_MAX_PIPES];
+
+	/* pipe to which cd2x update is synchronized */
+	enum pipe pipe;
+
+	/* forced minimum cdclk for glk+ audio w/a */
+	int force_min_cdclk;
+	bool force_min_cdclk_changed;
+
+	/* bitmask of active pipes */
+	u8 active_pipes;
+};
+
 int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_cdclk_init(struct drm_i915_private *i915);
-void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_cdclk_init_hw(struct drm_i915_private *i915);
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915);
 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
 void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
 void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
-			       const struct intel_cdclk_state *b);
-void intel_cdclk_swap_state(struct intel_atomic_state *state);
-void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
-				 const struct intel_cdclk_state *old_state,
-				 const struct intel_cdclk_state *new_state,
-				 enum pipe pipe);
-void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
-				  const struct intel_cdclk_state *old_state,
-				  const struct intel_cdclk_state *new_state,
-				  enum pipe pipe);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
-			    const char *context);
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+			       const struct intel_cdclk_config *b);
+void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
+void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+			     const char *context);
 int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
 
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+
+#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define intel_atomic_get_old_cdclk_state(state) \
+	to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+#define intel_atomic_get_new_cdclk_state(state) \
+	to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv);
+
 #endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 3980e8b..c1cce93 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -157,23 +157,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
-	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
-	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+	intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+	intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+	intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
 
-	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
-	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+	intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+		       coeff[0] << 16 | coeff[1]);
+	intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
 
-	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
-	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+	intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+		       coeff[3] << 16 | coeff[4]);
+	intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
 
-	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
-	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+	intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+		       coeff[6] << 16 | coeff[7]);
+	intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
 
 	if (INTEL_GEN(dev_priv) >= 7) {
-		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
-		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
-		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+		intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+			       postoff[0]);
+		intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+			       postoff[1]);
+		intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+			       postoff[2]);
 	}
 }
 
@@ -185,22 +191,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
-	I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
-	I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
 
-	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
-	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+		       coeff[0] << 16 | coeff[1]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+		       coeff[2] << 16);
 
-	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
-	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+		       coeff[3] << 16 | coeff[4]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+		       coeff[5] << 16);
 
-	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
-	I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+		       coeff[6] << 16 | coeff[7]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+		       coeff[8] << 16);
 
-	I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
-	I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
-	I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+	intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
 }
 
 static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
@@ -297,14 +309,16 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 		 * LUT is needed but CSC is not we need to load an
 		 * identity matrix.
 		 */
-		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+			    !IS_GEMINILAKE(dev_priv));
 
 		ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
 				    ilk_csc_coeff_identity,
 				    ilk_csc_off_zero);
 	}
 
-	I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+	intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+		       crtc_state->csc_mode);
 }
 
 static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
@@ -330,51 +344,74 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
 				      ilk_csc_postoff_limited_range);
 	}
 
-	I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+	intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+		       crtc_state->csc_mode);
 }
 
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+			     const struct drm_property_blob *blob)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_color_ctm *ctm = blob->data;
 	enum pipe pipe = crtc->pipe;
+	u16 coeffs[9];
+	int i;
 
-	if (crtc_state->hw.ctm) {
-		const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
-		u16 coeffs[9] = {};
-		int i;
+	for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+		u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
 
-		for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
-			u64 abs_coeff =
-				((1ULL << 63) - 1) & ctm->matrix[i];
+		/* Round coefficient. */
+		abs_coeff += 1 << (32 - 13);
+		/* Clamp to hardware limits. */
+		abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
 
-			/* Round coefficient. */
-			abs_coeff += 1 << (32 - 13);
-			/* Clamp to hardware limits. */
-			abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+		coeffs[i] = 0;
 
-			/* Write coefficients in S3.12 format. */
-			if (ctm->matrix[i] & (1ULL << 63))
-				coeffs[i] = 1 << 15;
-			coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
-			coeffs[i] |= (abs_coeff >> 20) & 0xfff;
-		}
+		/* Write coefficients in S3.12 format. */
+		if (ctm->matrix[i] & (1ULL << 63))
+			coeffs[i] |= 1 << 15;
 
-		I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
-			   coeffs[1] << 16 | coeffs[0]);
-		I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
-			   coeffs[3] << 16 | coeffs[2]);
-		I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
-			   coeffs[5] << 16 | coeffs[4]);
-		I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
-			   coeffs[7] << 16 | coeffs[6]);
-		I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+		coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+		coeffs[i] |= (abs_coeff >> 20) & 0xfff;
 	}
 
-	I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+	intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+		       coeffs[1] << 16 | coeffs[0]);
+	intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+		       coeffs[3] << 16 | coeffs[2]);
+	intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+		       coeffs[5] << 16 | coeffs[4]);
+	intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+		       coeffs[7] << 16 | coeffs[6]);
+	intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+		       coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+	u32 max = 0xffff >> (16 - bit_precision);
+
+	val = clamp_val(val, 0, max);
+
+	if (bit_precision < 16)
+		val <<= 16 - bit_precision;
+
+	return val;
+}
+
+static u32 i9xx_lut_8(const struct drm_color_lut *color)
+{
+	return drm_color_lut_extract(color->red, 8) << 16 |
+		drm_color_lut_extract(color->green, 8) << 8 |
+		drm_color_lut_extract(color->blue, 8);
+}
+
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+	entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+	entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+	entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
 }
 
 /* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
@@ -393,6 +430,22 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
 		(color->blue >> 8);
 }
 
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+	entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+		REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+	entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+		REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+	entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+		REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
+}
+
+static u16 i965_lut_11p6_max_pack(u32 val)
+{
+	/* PIPEGCMAX is 11.6, clamp to 10.6 */
+	return clamp_val(val, 0, 0xffff);
+}
+
 static u32 ilk_lut_10(const struct drm_color_lut *color)
 {
 	return drm_color_lut_extract(color->red, 10) << 20 |
@@ -400,42 +453,11 @@ static u32 ilk_lut_10(const struct drm_color_lut *color)
 		drm_color_lut_extract(color->blue, 10);
 }
 
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
-				    const struct drm_property_blob *blob)
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum pipe pipe = crtc->pipe;
-	int i;
-
-	if (HAS_GMCH(dev_priv)) {
-		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
-			assert_dsi_pll_enabled(dev_priv);
-		else
-			assert_pll_enabled(dev_priv, pipe);
-	}
-
-	if (blob) {
-		const struct drm_color_lut *lut = blob->data;
-
-		for (i = 0; i < 256; i++) {
-			u32 word =
-				(drm_color_lut_extract(lut[i].red, 8) << 16) |
-				(drm_color_lut_extract(lut[i].green, 8) << 8) |
-				drm_color_lut_extract(lut[i].blue, 8);
-
-			if (HAS_GMCH(dev_priv))
-				I915_WRITE(PALETTE(pipe, i), word);
-			else
-				I915_WRITE(LGC_PALETTE(pipe, i), word);
-		}
-	}
-}
-
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
-{
-	i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
+	entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+	entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+	entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
 }
 
 static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
@@ -445,10 +467,10 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
 	enum pipe pipe = crtc->pipe;
 	u32 val;
 
-	val = I915_READ(PIPECONF(pipe));
+	val = intel_de_read(dev_priv, PIPECONF(pipe));
 	val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
-	I915_WRITE(PIPECONF(pipe), val);
+	intel_de_write(dev_priv, PIPECONF(pipe), val);
 }
 
 static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
@@ -458,10 +480,10 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
 	enum pipe pipe = crtc->pipe;
 	u32 val;
 
-	val = I915_READ(PIPECONF(pipe));
+	val = intel_de_read(dev_priv, PIPECONF(pipe));
 	val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
-	I915_WRITE(PIPECONF(pipe), val);
+	intel_de_write(dev_priv, PIPECONF(pipe), val);
 
 	ilk_load_csc_matrix(crtc_state);
 }
@@ -471,7 +493,8 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+	intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+		       crtc_state->gamma_mode);
 
 	ilk_load_csc_matrix(crtc_state);
 }
@@ -492,9 +515,10 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
 		val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
 	if (crtc_state->csc_enable)
 		val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
-	I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
+	intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val);
 
-	I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+	intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+		       crtc_state->gamma_mode);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_load_csc_matrix(crtc_state);
@@ -502,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
 		ilk_load_csc_matrix(crtc_state);
 }
 
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+			    const struct drm_property_blob *blob)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_color_lut *lut;
+	enum pipe pipe = crtc->pipe;
+	int i;
+
+	if (!blob)
+		return;
+
+	lut = blob->data;
+
+	for (i = 0; i < 256; i++)
+		intel_de_write(dev_priv, PALETTE(pipe, i),
+			       i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+	assert_pll_enabled(dev_priv, crtc->pipe);
+
+	i9xx_load_lut_8(crtc, gamma_lut);
+}
+
 static void i965_load_lut_10p6(struct intel_crtc *crtc,
 			       const struct drm_property_blob *blob)
 {
@@ -511,28 +564,52 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
 	enum pipe pipe = crtc->pipe;
 
 	for (i = 0; i < lut_size - 1; i++) {
-		I915_WRITE(PALETTE(pipe, 2 * i + 0),
-			   i965_lut_10p6_ldw(&lut[i]));
-		I915_WRITE(PALETTE(pipe, 2 * i + 1),
-			   i965_lut_10p6_udw(&lut[i]));
+		intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0),
+			       i965_lut_10p6_ldw(&lut[i]));
+		intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1),
+			       i965_lut_10p6_udw(&lut[i]));
 	}
 
-	I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
-	I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
-	I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
+	intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
+	intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
+	intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
 }
 
 static void i965_load_luts(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
 
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+		assert_dsi_pll_enabled(dev_priv);
+	else
+		assert_pll_enabled(dev_priv, crtc->pipe);
+
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		i9xx_load_luts(crtc_state);
+		i9xx_load_lut_8(crtc, gamma_lut);
 	else
 		i965_load_lut_10p6(crtc, gamma_lut);
 }
 
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+			   const struct drm_property_blob *blob)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_color_lut *lut;
+	enum pipe pipe = crtc->pipe;
+	int i;
+
+	if (!blob)
+		return;
+
+	lut = blob->data;
+
+	for (i = 0; i < 256; i++)
+		intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
+			       i9xx_lut_8(&lut[i]));
+}
+
 static void ilk_load_lut_10(struct intel_crtc *crtc,
 			    const struct drm_property_blob *blob)
 {
@@ -542,7 +619,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
 	enum pipe pipe = crtc->pipe;
 
 	for (i = 0; i < lut_size; i++)
-		I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+		intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
+			       ilk_lut_10(&lut[i]));
 }
 
 static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -551,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
 	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		i9xx_load_luts(crtc_state);
+		ilk_load_lut_8(crtc, gamma_lut);
 	else
 		ilk_load_lut_10(crtc, gamma_lut);
 }
@@ -584,15 +662,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
 		const struct drm_color_lut *entry =
 			&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
 
-		I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
-		I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+		intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
+		intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+			       ilk_lut_10(entry));
 	}
 
 	/*
 	 * Reset the index, otherwise it prevents the legacy palette to be
 	 * written properly.
 	 */
-	I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+	intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
 }
 
 /* On BDW+ the index auto increment mode actually works */
@@ -606,22 +685,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
 	int i, lut_size = drm_color_lut_size(blob);
 	enum pipe pipe = crtc->pipe;
 
-	I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
-		   PAL_PREC_AUTO_INCREMENT);
+	intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+		       prec_index | PAL_PREC_AUTO_INCREMENT);
 
 	for (i = 0; i < hw_lut_size; i++) {
 		/* We discard half the user entries in split gamma mode */
 		const struct drm_color_lut *entry =
 			&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
 
-		I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+		intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+			       ilk_lut_10(entry));
 	}
 
 	/*
 	 * Reset the index, otherwise it prevents the legacy palette to be
 	 * written properly.
 	 */
-	I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+	intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
 }
 
 static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
@@ -659,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
 	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
-		i9xx_load_luts(crtc_state);
+		ilk_load_lut_8(crtc, gamma_lut);
 	} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
 		ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
 				PAL_PREC_INDEX_VALUE(0));
@@ -682,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
 	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
-		i9xx_load_luts(crtc_state);
+		ilk_load_lut_8(crtc, gamma_lut);
 	} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
 		bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
 				PAL_PREC_INDEX_VALUE(0));
@@ -703,17 +783,17 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
-	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+	int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
 	const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
-	u32 i;
 
 	/*
 	 * When setting the auto-increment bit, the hardware seems to
 	 * ignore the index bits, so we need to reset it to index 0
 	 * separately.
 	 */
-	I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
-	I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+	intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+	intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+		       PRE_CSC_GAMC_AUTO_INCREMENT);
 
 	for (i = 0; i < lut_size; i++) {
 		/*
@@ -729,12 +809,13 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
 		 * ToDo: Extend to max 7.0. Enable 32 bit input value
 		 * as compared to just 16 to achieve this.
 		 */
-		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+		intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
+		               lut[i].green);
 	}
 
 	/* Clamp values > 1.0. */
 	while (i++ < 35)
-		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+		intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
 }
 
 static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
@@ -742,26 +823,26 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
-	const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
-	u32 i;
+	int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
 
 	/*
 	 * When setting the auto-increment bit, the hardware seems to
 	 * ignore the index bits, so we need to reset it to index 0
 	 * separately.
 	 */
-	I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
-	I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+	intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+	intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+		       PRE_CSC_GAMC_AUTO_INCREMENT);
 
 	for (i = 0; i < lut_size; i++) {
 		u32 v = (i << 16) / (lut_size - 1);
 
-		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+		intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
 	}
 
 	/* Clamp values > 1.0. */
 	while (i++ < 35)
-		I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+		intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
 }
 
 static void glk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -783,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
 		glk_load_degamma_lut_linear(crtc_state);
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
-		i9xx_load_luts(crtc_state);
+		ilk_load_lut_8(crtc, gamma_lut);
 	} else {
 		bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
 		ivb_load_lut_ext_max(crtc);
@@ -827,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
 	const struct drm_color_lut *lut = blob->data;
 	struct intel_dsb *dsb = intel_dsb_get(crtc);
 	enum pipe pipe = crtc->pipe;
-	u32 i;
+	int i;
 
 	/*
 	 * Program Super Fine segment (let's call it seg1)...
@@ -860,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
 	const struct drm_color_lut *entry;
 	struct intel_dsb *dsb = intel_dsb_get(crtc);
 	enum pipe pipe = crtc->pipe;
-	u32 i;
+	int i;
 
 	/*
 	 * Program Fine segment (let's call it seg2)...
@@ -919,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
 
 	switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
 	case GAMMA_MODE_MODE_8BIT:
-		i9xx_load_luts(crtc_state);
+		ilk_load_lut_8(crtc, gamma_lut);
 		break;
 	case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
 		icl_program_gamma_superfine_segment(crtc_state);
@@ -945,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
 	return drm_color_lut_extract(color->red, 14);
 }
 
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+	entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+	entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+	entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
 static void chv_load_cgm_degamma(struct intel_crtc *crtc,
 				 const struct drm_property_blob *blob)
 {
@@ -954,10 +1042,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc,
 	enum pipe pipe = crtc->pipe;
 
 	for (i = 0; i < lut_size; i++) {
-		I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
-			   chv_cgm_degamma_ldw(&lut[i]));
-		I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
-			   chv_cgm_degamma_udw(&lut[i]));
+		intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
+			       chv_cgm_degamma_ldw(&lut[i]));
+		intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
+			       chv_cgm_degamma_udw(&lut[i]));
 	}
 }
 
@@ -981,31 +1069,34 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
 	enum pipe pipe = crtc->pipe;
 
 	for (i = 0; i < lut_size; i++) {
-		I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
-			   chv_cgm_gamma_ldw(&lut[i]));
-		I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
-			   chv_cgm_gamma_udw(&lut[i]));
+		intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
+			       chv_cgm_gamma_ldw(&lut[i]));
+		intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
+			       chv_cgm_gamma_udw(&lut[i]));
 	}
 }
 
 static void chv_load_luts(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+	const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+	const struct drm_property_blob *ctm = crtc_state->hw.ctm;
 
-	cherryview_load_csc_matrix(crtc_state);
+	if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+		chv_load_cgm_csc(crtc, ctm);
 
-	if (crtc_state_is_legacy_gamma(crtc_state)) {
-		i9xx_load_luts(crtc_state);
-		return;
-	}
-
-	if (degamma_lut)
+	if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
 		chv_load_cgm_degamma(crtc, degamma_lut);
 
-	if (gamma_lut)
+	if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
 		chv_load_cgm_gamma(crtc, gamma_lut);
+	else
+		i965_load_luts(crtc_state);
+
+	intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+		       crtc_state->cgm_mode);
 }
 
 void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1167,7 +1258,8 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
 
 	/* C8 relies on its palette being stored in the legacy LUT */
 	if (crtc_state->c8_planes) {
-		DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "C8 pixelformat requires the legacy LUT\n");
 		return -EINVAL;
 	}
 
@@ -1630,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
 	return true;
 }
 
-/* convert hw value with given bit_precision to lut property val */
-static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
 {
-	u32 max = 0xffff >> (16 - bit_precision);
-
-	val = clamp_val(val, 0, max);
-
-	if (bit_precision < 16)
-		val <<= 16 - bit_precision;
-
-	return val;
-}
-
-static struct drm_property_blob *
-i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
-{
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 	struct drm_property_blob *blob;
-	struct drm_color_lut *blob_data;
-	u32 i, val;
+	struct drm_color_lut *lut;
+	int i;
 
 	blob = drm_property_create_blob(&dev_priv->drm,
 					sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
@@ -1659,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
 	if (IS_ERR(blob))
 		return NULL;
 
-	blob_data = blob->data;
+	lut = blob->data;
 
 	for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
-		if (HAS_GMCH(dev_priv))
-			val = I915_READ(PALETTE(pipe, i));
-		else
-			val = I915_READ(LGC_PALETTE(pipe, i));
+		u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
 
-		blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
-							LGC_PALETTE_RED_MASK, val), 8);
-		blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
-							  LGC_PALETTE_GREEN_MASK, val), 8);
-		blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
-							 LGC_PALETTE_BLUE_MASK, val), 8);
+		i9xx_lut_8_pack(&lut[i], val);
 	}
 
 	return blob;
@@ -1680,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
 
 static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
 	if (!crtc_state->gamma_enable)
 		return;
 
-	crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+	crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
 }
 
-static struct drm_property_blob *
-i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+	int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	enum pipe pipe = crtc->pipe;
 	struct drm_property_blob *blob;
-	struct drm_color_lut *blob_data;
-	u32 i, val1, val2;
+	struct drm_color_lut *lut;
 
 	blob = drm_property_create_blob(&dev_priv->drm,
 					sizeof(struct drm_color_lut) * lut_size,
@@ -1703,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
 	if (IS_ERR(blob))
 		return NULL;
 
-	blob_data = blob->data;
+	lut = blob->data;
 
 	for (i = 0; i < lut_size - 1; i++) {
-		val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
-		val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
+		u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
+		u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
 
-		blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
-						 REG_FIELD_GET(PALETTE_RED_MASK, val1);
-		blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
-						   REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
-		blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
-						  REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+		i965_lut_10p6_pack(&lut[i], ldw, udw);
 	}
 
-	blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
-					 I915_READ(PIPEGCMAX(pipe, 0)));
-	blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
-					   I915_READ(PIPEGCMAX(pipe, 1)));
-	blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
-					  I915_READ(PIPEGCMAX(pipe, 2)));
+	lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
+	lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
+	lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
 
 	return blob;
 }
 
 static void i965_read_luts(struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
 	if (!crtc_state->gamma_enable)
 		return;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
 	else
-		crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+		crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
 }
 
-static struct drm_property_blob *
-chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+	int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	enum pipe pipe = crtc->pipe;
 	struct drm_property_blob *blob;
-	struct drm_color_lut *blob_data;
-	u32 i, val;
+	struct drm_color_lut *lut;
 
 	blob = drm_property_create_blob(&dev_priv->drm,
 					sizeof(struct drm_color_lut) * lut_size,
@@ -1755,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
 	if (IS_ERR(blob))
 		return NULL;
 
-	blob_data = blob->data;
+	lut = blob->data;
 
 	for (i = 0; i < lut_size; i++) {
-		val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
-		blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
-							  CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
-		blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
-							 CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
+		u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+		u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
 
-		val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
-		blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
-							CGM_PIPE_GAMMA_RED_MASK, val), 10);
+		chv_cgm_gamma_pack(&lut[i], ldw, udw);
 	}
 
 	return blob;
@@ -1774,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
 
 static void chv_read_luts(struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
 	if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
-		crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+		crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
 	else
 		i965_read_luts(crtc_state);
 }
 
-static struct drm_property_blob *
-ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
 	enum pipe pipe = crtc->pipe;
 	struct drm_property_blob *blob;
-	struct drm_color_lut *blob_data;
-	u32 i, val;
+	struct drm_color_lut *lut;
+	int i;
+
+	blob = drm_property_create_blob(&dev_priv->drm,
+					sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+					NULL);
+	if (IS_ERR(blob))
+		return NULL;
+
+	lut = blob->data;
+
+	for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+		u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+
+		i9xx_lut_8_pack(&lut[i], val);
+	}
+
+	return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+	enum pipe pipe = crtc->pipe;
+	struct drm_property_blob *blob;
+	struct drm_color_lut *lut;
 
 	blob = drm_property_create_blob(&dev_priv->drm,
 					sizeof(struct drm_color_lut) * lut_size,
@@ -1797,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
 	if (IS_ERR(blob))
 		return NULL;
 
-	blob_data = blob->data;
+	lut = blob->data;
 
 	for (i = 0; i < lut_size; i++) {
-		val = I915_READ(PREC_PALETTE(pipe, i));
+		u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
 
-		blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
-							PREC_PALETTE_RED_MASK, val), 10);
-		blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
-							  PREC_PALETTE_GREEN_MASK, val), 10);
-		blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
-							 PREC_PALETTE_BLUE_MASK, val), 10);
+		ilk_lut_10_pack(&lut[i], val);
 	}
 
 	return blob;
@@ -1815,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
 
 static void ilk_read_luts(struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
 	if (!crtc_state->gamma_enable)
 		return;
 
@@ -1822,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
 		return;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+		crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
 	else
-		crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+		crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
 }
 
-static struct drm_property_blob *
-glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
+						 u32 prec_index)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	int hw_lut_size = ivb_lut_10_size(prec_index);
+	int i, hw_lut_size = ivb_lut_10_size(prec_index);
 	enum pipe pipe = crtc->pipe;
 	struct drm_property_blob *blob;
-	struct drm_color_lut *blob_data;
-	u32 i, val;
+	struct drm_color_lut *lut;
 
 	blob = drm_property_create_blob(&dev_priv->drm,
 					sizeof(struct drm_color_lut) * hw_lut_size,
@@ -1844,36 +1917,33 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
 	if (IS_ERR(blob))
 		return NULL;
 
-	blob_data = blob->data;
+	lut = blob->data;
 
-	I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
-		   PAL_PREC_AUTO_INCREMENT);
+	intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+		       prec_index | PAL_PREC_AUTO_INCREMENT);
 
 	for (i = 0; i < hw_lut_size; i++) {
-		val = I915_READ(PREC_PAL_DATA(pipe));
+		u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
 
-		blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
-							PREC_PAL_DATA_RED_MASK, val), 10);
-		blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
-							PREC_PAL_DATA_GREEN_MASK, val), 10);
-		blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
-							PREC_PAL_DATA_BLUE_MASK, val), 10);
+		ilk_lut_10_pack(&lut[i], val);
 	}
 
-	I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+	intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
 
 	return blob;
 }
 
 static void glk_read_luts(struct intel_crtc_state *crtc_state)
 {
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
 	if (!crtc_state->gamma_enable)
 		return;
 
 	if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
-		crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+		crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
 	else
-		crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+		crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
 }
 
 void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 44bbc7e..9ff05ec 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -48,7 +48,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
 	const struct cnl_procmon *procmon;
 	u32 val;
 
-	val = I915_READ(ICL_PORT_COMP_DW3(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
 	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
 	default:
 		MISSING_CASE(val);
@@ -81,26 +81,27 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
 
 	procmon = cnl_get_procmon_ref_values(dev_priv, phy);
 
-	val = I915_READ(ICL_PORT_COMP_DW1(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
 	val &= ~((0xff << 16) | 0xff);
 	val |= procmon->dw1;
-	I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
 
-	I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
-	I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
+	intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+	intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
 }
 
 static bool check_phy_reg(struct drm_i915_private *dev_priv,
 			  enum phy phy, i915_reg_t reg, u32 mask,
 			  u32 expected_val)
 {
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 
 	if ((val & mask) != expected_val) {
-		DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
-				 "current %08x mask %08x expected %08x\n",
-				 phy_name(phy),
-				 reg.reg, val, mask, expected_val);
+		drm_dbg(&dev_priv->drm,
+			"Combo PHY %c reg %08x state mismatch: "
+			"current %08x mask %08x expected %08x\n",
+			phy_name(phy),
+			reg.reg, val, mask, expected_val);
 		return false;
 	}
 
@@ -127,8 +128,8 @@ static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
 
 static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
 {
-	return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
-		(I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+	return !(intel_de_read(dev_priv, CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+		(intel_de_read(dev_priv, CNL_PORT_COMP_DW0) & COMP_INIT);
 }
 
 static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
@@ -151,20 +152,20 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	val = I915_READ(CHICKEN_MISC_2);
+	val = intel_de_read(dev_priv, CHICKEN_MISC_2);
 	val &= ~CNL_COMP_PWR_DOWN;
-	I915_WRITE(CHICKEN_MISC_2, val);
+	intel_de_write(dev_priv, CHICKEN_MISC_2, val);
 
 	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
 	cnl_set_procmon_ref_values(dev_priv, PHY_A);
 
-	val = I915_READ(CNL_PORT_COMP_DW0);
+	val = intel_de_read(dev_priv, CNL_PORT_COMP_DW0);
 	val |= COMP_INIT;
-	I915_WRITE(CNL_PORT_COMP_DW0, val);
+	intel_de_write(dev_priv, CNL_PORT_COMP_DW0, val);
 
-	val = I915_READ(CNL_PORT_CL1CM_DW5);
+	val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
 	val |= CL_POWER_DOWN_ENABLE;
-	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+	intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
 }
 
 static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
@@ -172,11 +173,12 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
 	u32 val;
 
 	if (!cnl_combo_phy_verify_state(dev_priv))
-		DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+		drm_warn(&dev_priv->drm,
+			 "Combo PHY HW state changed unexpectedly.\n");
 
-	val = I915_READ(CHICKEN_MISC_2);
+	val = intel_de_read(dev_priv, CHICKEN_MISC_2);
 	val |= CNL_COMP_PWR_DOWN;
-	I915_WRITE(CHICKEN_MISC_2, val);
+	intel_de_write(dev_priv, CHICKEN_MISC_2, val);
 }
 
 static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
@@ -184,27 +186,65 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
 {
 	/* The PHY C added by EHL has no PHY_MISC register */
 	if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
-		return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+		return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
 	else
-		return !(I915_READ(ICL_PHY_MISC(phy)) &
+		return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
 			 ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
-			(I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+			(intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+}
+
+static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+{
+	bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
+	bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
+	bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+	/*
+	 * VBT's 'dvo port' field for child devices references the DDI, not
+	 * the PHY.  So if combo PHY A is wired up to drive an external
+	 * display, we should see a child device present on PORT_D and
+	 * nothing on PORT_A and no DSI.
+	 */
+	if (ddi_d_present && !ddi_a_present && !dsi_present)
+		return true;
+
+	/*
+	 * If we encounter a VBT that claims to have an external display on
+	 * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+	 * in the log and let the internal display win.
+	 */
+	if (ddi_d_present)
+		drm_err(&i915->drm,
+			"VBT claims to have both internal and external displays on PHY A.  Configuring for internal.\n");
+
+	return false;
 }
 
 static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
 				       enum phy phy)
 {
 	bool ret;
+	u32 expected_val = 0;
 
 	if (!icl_combo_phy_enabled(dev_priv, phy))
 		return false;
 
 	ret = cnl_verify_procmon_ref_values(dev_priv, phy);
 
-	if (phy == PHY_A)
+	if (phy == PHY_A) {
 		ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
 				     IREFGEN, IREFGEN);
 
+		if (IS_ELKHARTLAKE(dev_priv)) {
+			if (ehl_vbt_ddi_d_present(dev_priv))
+				expected_val = ICL_PHY_MISC_MUX_DDID;
+
+			ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+					     ICL_PHY_MISC_MUX_DDID,
+					     expected_val);
+		}
+	}
+
 	ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
 			     CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
 
@@ -219,7 +259,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
 	u32 val;
 
 	if (is_dsi) {
-		WARN_ON(lane_reversal);
+		drm_WARN_ON(&dev_priv->drm, lane_reversal);
 
 		switch (lane_count) {
 		case 1:
@@ -257,36 +297,10 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	val = I915_READ(ICL_PORT_CL_DW10(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
 	val &= ~PWR_DOWN_LN_MASK;
 	val |= lane_mask << PWR_DOWN_LN_SHIFT;
-	I915_WRITE(ICL_PORT_CL_DW10(phy), val);
-}
-
-static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
-{
-	bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
-	bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
-	bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
-
-	/*
-	 * VBT's 'dvo port' field for child devices references the DDI, not
-	 * the PHY.  So if combo PHY A is wired up to drive an external
-	 * display, we should see a child device present on PORT_D and
-	 * nothing on PORT_A and no DSI.
-	 */
-	if (ddi_d_present && !ddi_a_present && !dsi_present)
-		return val | ICL_PHY_MISC_MUX_DDID;
-
-	/*
-	 * If we encounter a VBT that claims to have an external display on
-	 * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
-	 * in the log and let the internal display win.
-	 */
-	if (ddi_d_present)
-		DRM_ERROR("VBT claims to have both internal and external displays on PHY A.  Configuring for internal.\n");
-
-	return val & ~ICL_PHY_MISC_MUX_DDID;
+	intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
 }
 
 static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -297,8 +311,9 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
 		u32 val;
 
 		if (icl_combo_phy_verify_state(dev_priv, phy)) {
-			DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
-					 phy_name(phy));
+			drm_dbg(&dev_priv->drm,
+				"Combo PHY %c already enabled, won't reprogram it.\n",
+				phy_name(phy));
 			continue;
 		}
 
@@ -318,28 +333,33 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
 		 * based on whether our VBT indicates the presence of any
 		 * "internal" child devices.
 		 */
-		val = I915_READ(ICL_PHY_MISC(phy));
-		if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
-			val = ehl_combo_phy_a_mux(dev_priv, val);
+		val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
+		if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) {
+			val &= ~ICL_PHY_MISC_MUX_DDID;
+
+			if (ehl_vbt_ddi_d_present(dev_priv))
+				val |= ICL_PHY_MISC_MUX_DDID;
+		}
+
 		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-		I915_WRITE(ICL_PHY_MISC(phy), val);
+		intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
 
 skip_phy_misc:
 		cnl_set_procmon_ref_values(dev_priv, phy);
 
 		if (phy == PHY_A) {
-			val = I915_READ(ICL_PORT_COMP_DW8(phy));
+			val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
 			val |= IREFGEN;
-			I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
+			intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
 		}
 
-		val = I915_READ(ICL_PORT_COMP_DW0(phy));
+		val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
 		val |= COMP_INIT;
-		I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+		intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
 
-		val = I915_READ(ICL_PORT_CL_DW5(phy));
+		val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
 		val |= CL_POWER_DOWN_ENABLE;
-		I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+		intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
 	}
 }
 
@@ -352,7 +372,8 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
 
 		if (phy == PHY_A &&
 		    !icl_combo_phy_verify_state(dev_priv, phy))
-			DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+			drm_warn(&dev_priv->drm,
+				 "Combo PHY %c HW state changed unexpectedly\n",
 				 phy_name(phy));
 
 		/*
@@ -363,14 +384,14 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
 		if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
 			goto skip_phy_misc;
 
-		val = I915_READ(ICL_PHY_MISC(phy));
+		val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
 		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
-		I915_WRITE(ICL_PHY_MISC(phy), val);
+		intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
 
 skip_phy_misc:
-		val = I915_READ(ICL_PORT_COMP_DW0(phy));
+		val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
 		val &= ~COMP_INIT;
-		I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+		intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1133c4e..903e496 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -153,7 +153,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
 bool intel_connector_get_hw_state(struct intel_connector *connector)
 {
 	enum pipe pipe = 0;
-	struct intel_encoder *encoder = connector->encoder;
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
 
 	return encoder->get_hw_state(encoder, &pipe);
 }
@@ -162,7 +162,8 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
 
-	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+	drm_WARN_ON(dev,
+		    !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
 	if (!connector->base.state->crtc)
 		return INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index f976b80..78f9b6c 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -32,7 +32,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_connector.h"
@@ -75,7 +74,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(adpa_reg);
+	val = intel_de_read(dev_priv, adpa_reg);
 
 	/* asserts want to know the pipe even if the port is disabled */
 	if (HAS_PCH_CPT(dev_priv))
@@ -112,7 +111,7 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
 	u32 tmp, flags = 0;
 
-	tmp = I915_READ(crt->adpa_reg);
+	tmp = intel_de_read(dev_priv, crt->adpa_reg);
 
 	if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
 		flags |= DRM_MODE_FLAG_PHSYNC;
@@ -184,7 +183,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
 		adpa |= ADPA_PIPE_SEL(crtc->pipe);
 
 	if (!HAS_PCH_SPLIT(dev_priv))
-		I915_WRITE(BCLRPAT(crtc->pipe), 0);
+		intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
@@ -201,7 +200,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
 		break;
 	}
 
-	I915_WRITE(crt->adpa_reg, adpa);
+	intel_de_write(dev_priv, crt->adpa_reg, adpa);
 }
 
 static void intel_disable_crt(struct intel_encoder *encoder,
@@ -230,7 +229,7 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	WARN_ON(!old_crtc_state->has_pch_encoder);
+	drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
 
 	intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 }
@@ -258,7 +257,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
 
 	intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
 
-	WARN_ON(!old_crtc_state->has_pch_encoder);
+	drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
 
 	intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
 }
@@ -269,7 +268,7 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	WARN_ON(!crtc_state->has_pch_encoder);
+	drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
 
 	intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 }
@@ -282,7 +281,7 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 
-	WARN_ON(!crtc_state->has_pch_encoder);
+	drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
@@ -299,7 +298,13 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	enum pipe pipe = crtc->pipe;
 
-	WARN_ON(!crtc_state->has_pch_encoder);
+	drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+	intel_enable_pipe(crtc_state);
+
+	lpt_pch_enable(crtc_state);
+
+	intel_crtc_vblank_on(crtc_state);
 
 	intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
 
@@ -414,7 +419,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
 	/* LPT FDI RX only supports 8bpc. */
 	if (HAS_PCH_LPT(dev_priv)) {
 		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
-			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "LPT only supports 24bpp\n");
 			return -EINVAL;
 		}
 
@@ -442,34 +448,37 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
 
 		crt->force_hotplug_required = false;
 
-		save_adpa = adpa = I915_READ(crt->adpa_reg);
-		DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+		save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+		drm_dbg_kms(&dev_priv->drm,
+			    "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
 		adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
 		if (turn_off_dac)
 			adpa &= ~ADPA_DAC_ENABLE;
 
-		I915_WRITE(crt->adpa_reg, adpa);
+		intel_de_write(dev_priv, crt->adpa_reg, adpa);
 
 		if (intel_de_wait_for_clear(dev_priv,
 					    crt->adpa_reg,
 					    ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
 					    1000))
-			DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+			drm_dbg_kms(&dev_priv->drm,
+				    "timed out waiting for FORCE_TRIGGER");
 
 		if (turn_off_dac) {
-			I915_WRITE(crt->adpa_reg, save_adpa);
-			POSTING_READ(crt->adpa_reg);
+			intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+			intel_de_posting_read(dev_priv, crt->adpa_reg);
 		}
 	}
 
 	/* Check the status to see if both blue and green are on now */
-	adpa = I915_READ(crt->adpa_reg);
+	adpa = intel_de_read(dev_priv, crt->adpa_reg);
 	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
 		ret = true;
 	else
 		ret = false;
-	DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+	drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+		    adpa, ret);
 
 	return ret;
 }
@@ -498,27 +507,30 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 	 */
 	reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
 
-	save_adpa = adpa = I915_READ(crt->adpa_reg);
-	DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+	save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+	drm_dbg_kms(&dev_priv->drm,
+		    "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
 	adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
 
-	I915_WRITE(crt->adpa_reg, adpa);
+	intel_de_write(dev_priv, crt->adpa_reg, adpa);
 
 	if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
 				    ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
-		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
-		I915_WRITE(crt->adpa_reg, save_adpa);
+		drm_dbg_kms(&dev_priv->drm,
+			    "timed out waiting for FORCE_TRIGGER");
+		intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
 	}
 
 	/* Check the status to see if both blue and green are on now */
-	adpa = I915_READ(crt->adpa_reg);
+	adpa = intel_de_read(dev_priv, crt->adpa_reg);
 	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
 		ret = true;
 	else
 		ret = false;
 
-	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+	drm_dbg_kms(&dev_priv->drm,
+		    "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
 	if (reenable_hpd)
 		intel_hpd_enable(dev_priv, crt->base.hpd_pin);
@@ -558,15 +570,16 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 		/* wait for FORCE_DETECT to go off */
 		if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
 					    CRT_HOTPLUG_FORCE_DETECT, 1000))
-			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+			drm_dbg_kms(&dev_priv->drm,
+				    "timed out waiting for FORCE_DETECT to go off");
 	}
 
-	stat = I915_READ(PORT_HOTPLUG_STAT);
+	stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT);
 	if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
 		ret = true;
 
 	/* clear the interrupt we just generated, if any */
-	I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+	intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
 
 	i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
 
@@ -629,13 +642,16 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
 		 * have to check the EDID input spec of the attached device.
 		 */
 		if (!is_digital) {
-			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "CRT detected via DDC:0x50 [EDID]\n");
 			ret = true;
 		} else {
-			DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
 		}
 	} else {
-		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "CRT not detected via DDC:0x50 [no valid EDID found]\n");
 	}
 
 	kfree(edid);
@@ -660,7 +676,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
 	u8 st00;
 	enum drm_connector_status status;
 
-	DRM_DEBUG_KMS("starting load-detect on CRT\n");
+	drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
 
 	bclrpat_reg = BCLRPAT(pipe);
 	vtotal_reg = VTOTAL(pipe);
@@ -706,7 +722,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
 		* Yes, this will flicker
 		*/
 		if (vblank_start <= vactive && vblank_end >= vtotal) {
-			u32 vsync = I915_READ(vsync_reg);
+			u32 vsync = intel_de_read(dev_priv, vsync_reg);
 			u32 vsync_start = (vsync & 0xffff) + 1;
 
 			vblank_start = vsync_start;
@@ -801,9 +817,9 @@ intel_crt_detect(struct drm_connector *connector,
 	int status, ret;
 	struct intel_load_detect_pipe tmp;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
-		      connector->base.id, connector->name,
-		      force);
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
+		    connector->base.id, connector->name,
+		    force);
 
 	if (i915_modparams.load_detect_test) {
 		wakeref = intel_display_power_get(dev_priv,
@@ -824,11 +840,13 @@ intel_crt_detect(struct drm_connector *connector,
 		 * only trust an assertion that the monitor is connected.
 		 */
 		if (intel_crt_detect_hotplug(connector)) {
-			DRM_DEBUG_KMS("CRT detected via hotplug\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "CRT detected via hotplug\n");
 			status = connector_status_connected;
 			goto out;
 		} else
-			DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "CRT not detected via hotplug\n");
 	}
 
 	if (intel_crt_detect_ddc(connector)) {
@@ -918,13 +936,13 @@ void intel_crt_reset(struct drm_encoder *encoder)
 	if (INTEL_GEN(dev_priv) >= 5) {
 		u32 adpa;
 
-		adpa = I915_READ(crt->adpa_reg);
+		adpa = intel_de_read(dev_priv, crt->adpa_reg);
 		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
 		adpa |= ADPA_HOTPLUG_BITS;
-		I915_WRITE(crt->adpa_reg, adpa);
-		POSTING_READ(crt->adpa_reg);
+		intel_de_write(dev_priv, crt->adpa_reg, adpa);
+		intel_de_posting_read(dev_priv, crt->adpa_reg);
 
-		DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
+		drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa);
 		crt->force_hotplug_required = true;
 	}
 
@@ -969,7 +987,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 	else
 		adpa_reg = ADPA;
 
-	adpa = I915_READ(adpa_reg);
+	adpa = intel_de_read(dev_priv, adpa_reg);
 	if ((adpa & ADPA_DAC_ENABLE) == 0) {
 		/*
 		 * On some machines (some IVB at least) CRT can be
@@ -979,11 +997,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 		 * take. So the only way to tell is attempt to enable
 		 * it and see what happens.
 		 */
-		I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
-			   ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
-		if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
+		intel_de_write(dev_priv, adpa_reg,
+			       adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+		if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0)
 			return;
-		I915_WRITE(adpa_reg, adpa);
+		intel_de_write(dev_priv, adpa_reg, adpa);
 	}
 
 	crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
@@ -1027,6 +1045,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 	    !dmi_check_system(intel_spurious_crt_detect)) {
 		crt->base.hpd_pin = HPD_CRT;
 		crt->base.hotplug = intel_encoder_hotplug;
+		intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+	} else {
+		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 	}
 
 	if (HAS_DDI(dev_priv)) {
@@ -1057,14 +1078,6 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
-	if (!I915_HAS_HOTPLUG(dev_priv))
-		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
-	/*
-	 * Configure the automatic hotplug detection stuff
-	 */
-	crt->force_hotplug_required = false;
-
 	/*
 	 * TODO: find a proper way to discover whether we need to set the the
 	 * polarity and link reversal bits or not, instead of relying on the
@@ -1074,7 +1087,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
 		u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
 				 FDI_RX_LINK_REVERSAL_OVERRIDE;
 
-		dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
+		dev_priv->fdi_rx_config = intel_de_read(dev_priv,
+							FDI_RX_CTL(PIPE_A)) & fdi_config;
 	}
 
 	intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
similarity index 94%
rename from drivers/gpu/drm/i915/intel_csr.c
rename to drivers/gpu/drm/i915/display/intel_csr.c
index 09870a3..3112572 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -27,6 +27,7 @@
 #include "i915_drv.h"
 #include "i915_reg.h"
 #include "intel_csr.h"
+#include "intel_de.h"
 
 /**
  * DOC: csr support for dmc
@@ -39,8 +40,8 @@
 
 #define GEN12_CSR_MAX_FW_SIZE		ICL_CSR_MAX_FW_SIZE
 
-#define TGL_CSR_PATH			"i915/tgl_dmc_ver2_04.bin"
-#define TGL_CSR_VERSION_REQUIRED	CSR_VERSION(2, 4)
+#define TGL_CSR_PATH			"i915/tgl_dmc_ver2_06.bin"
+#define TGL_CSR_VERSION_REQUIRED	CSR_VERSION(2, 6)
 #define TGL_CSR_MAX_FW_SIZE		0x6000
 MODULE_FIRMWARE(TGL_CSR_PATH);
 
@@ -276,11 +277,11 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
 		mask |= DC_STATE_DEBUG_MASK_CORES;
 
 	/* The below bit doesn't need to be cleared ever afterwards */
-	val = I915_READ(DC_STATE_DEBUG);
+	val = intel_de_read(dev_priv, DC_STATE_DEBUG);
 	if ((val & mask) != mask) {
 		val |= mask;
-		I915_WRITE(DC_STATE_DEBUG, val);
-		POSTING_READ(DC_STATE_DEBUG);
+		intel_de_write(dev_priv, DC_STATE_DEBUG, val);
+		intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
 	}
 }
 
@@ -298,12 +299,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
 	u32 i, fw_size;
 
 	if (!HAS_CSR(dev_priv)) {
-		DRM_ERROR("No CSR support available for this platform\n");
+		drm_err(&dev_priv->drm,
+			"No CSR support available for this platform\n");
 		return;
 	}
 
 	if (!dev_priv->csr.dmc_payload) {
-		DRM_ERROR("Tried to program CSR with empty payload\n");
+		drm_err(&dev_priv->drm,
+			"Tried to program CSR with empty payload\n");
 		return;
 	}
 
@@ -313,13 +316,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
 	preempt_disable();
 
 	for (i = 0; i < fw_size; i++)
-		I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
+		intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
+				      payload[i]);
 
 	preempt_enable();
 
 	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
-		I915_WRITE(dev_priv->csr.mmioaddr[i],
-			   dev_priv->csr.mmiodata[i]);
+		intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
+			       dev_priv->csr.mmiodata[i]);
 	}
 
 	dev_priv->csr.dc_state = 0;
@@ -607,7 +611,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
 
 static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-	WARN_ON(dev_priv->csr.wakeref);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
 	dev_priv->csr.wakeref =
 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 }
@@ -636,16 +640,16 @@ static void csr_load_work_fn(struct work_struct *work)
 		intel_csr_load_program(dev_priv);
 		intel_csr_runtime_pm_put(dev_priv);
 
-		DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
-			 dev_priv->csr.fw_path,
-			 CSR_VERSION_MAJOR(csr->version),
+		drm_info(&dev_priv->drm,
+			 "Finished loading DMC firmware %s (v%u.%u)\n",
+			 dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
 			 CSR_VERSION_MINOR(csr->version));
 	} else {
-		dev_notice(dev_priv->drm.dev,
+		drm_notice(&dev_priv->drm,
 			   "Failed to load DMC firmware %s."
 			   " Disabling runtime power management.\n",
 			   csr->fw_path);
-		dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+		drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
 			   INTEL_UC_FIRMWARE_URL);
 	}
 
@@ -712,7 +716,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
 	if (i915_modparams.dmc_firmware_path) {
 		if (strlen(i915_modparams.dmc_firmware_path) == 0) {
 			csr->fw_path = NULL;
-			DRM_INFO("Disabling CSR firmware and runtime PM\n");
+			drm_info(&dev_priv->drm,
+				 "Disabling CSR firmware and runtime PM\n");
 			return;
 		}
 
@@ -722,11 +727,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
 	}
 
 	if (csr->fw_path == NULL) {
-		DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "No known CSR firmware for platform, disabling runtime PM\n");
 		return;
 	}
 
-	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+	drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
 	schedule_work(&dev_priv->csr.work);
 }
 
@@ -783,7 +789,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
 		return;
 
 	intel_csr_ucode_suspend(dev_priv);
-	WARN_ON(dev_priv->csr.wakeref);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
 
 	kfree(dev_priv->csr.dmc_payload);
 }
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h
similarity index 100%
rename from drivers/gpu/drm/i915/intel_csr.h
rename to drivers/gpu/drm/i915/display/intel_csr.h
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index d9a61f3..73d0f46 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,6 +568,20 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
 	{ 0x6, 0x7F, 0x35, 0x00, 0x0A },	/* 600   850      3.0   */
 };
 
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x33, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x47, 0x36, 0x00, 0x09 },	/* 350   500      3.1   */
+	{ 0xC, 0x64, 0x30, 0x00, 0x0F },	/* 350   700      6.0   */
+	{ 0x6, 0x7F, 0x2C, 0x00, 0x13 },	/* 350   900      8.2   */
+	{ 0xA, 0x46, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x64, 0x36, 0x00, 0x09 },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x30, 0x00, 0x0F },	/* 500   900      5.1   */
+	{ 0xC, 0x61, 0x3F, 0x00, 0x00 },	/* 650   700      0.6   */
+	{ 0x6, 0x7F, 0x37, 0x00, 0x08 },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
 struct icl_mg_phy_ddi_buf_trans {
 	u32 cri_txdeemph_override_5_0;
 	u32 cri_txdeemph_override_11_6;
@@ -622,6 +636,34 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
 	{ 0x0, 0x0, 0xA },	/* 10		Full	-3 dB */
 };
 
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x32, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x4F, 0x37, 0x00, 0x08 },	/* 350   500      3.1   */
+	{ 0xC, 0x71, 0x2F, 0x00, 0x10 },	/* 350   700      6.0   */
+	{ 0x6, 0x7D, 0x2B, 0x00, 0x14 },	/* 350   900      8.2   */
+	{ 0xA, 0x4C, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x73, 0x34, 0x00, 0x0B },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x2F, 0x00, 0x10 },	/* 500   900      5.1   */
+	{ 0xC, 0x6C, 0x3C, 0x00, 0x03 },	/* 650   700      0.6   */
+	{ 0x6, 0x7F, 0x35, 0x00, 0x0A },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+						/* NT mV Trans mV db    */
+	{ 0xA, 0x35, 0x3F, 0x00, 0x00 },	/* 350   350      0.0   */
+	{ 0xA, 0x4F, 0x37, 0x00, 0x08 },	/* 350   500      3.1   */
+	{ 0xC, 0x63, 0x2F, 0x00, 0x10 },	/* 350   700      6.0   */
+	{ 0x6, 0x7F, 0x2B, 0x00, 0x14 },	/* 350   900      8.2   */
+	{ 0xA, 0x47, 0x3F, 0x00, 0x00 },	/* 500   500      0.0   */
+	{ 0xC, 0x63, 0x34, 0x00, 0x0B },	/* 500   700      2.9   */
+	{ 0x6, 0x7F, 0x2F, 0x00, 0x10 },	/* 500   900      5.1   */
+	{ 0xC, 0x61, 0x3C, 0x00, 0x03 },	/* 650   700      0.6   */
+	{ 0x6, 0x7B, 0x35, 0x00, 0x0A },	/* 600   900      3.5   */
+	{ 0x6, 0x7F, 0x3F, 0x00, 0x00 },	/* 900   900      0.0   */
+};
+
 static const struct ddi_buf_trans *
 bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
@@ -818,7 +860,7 @@ bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct cnl_ddi_buf_trans *
 cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+	u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
 	if (voltage == VOLTAGE_INFO_0_85V) {
 		*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
@@ -839,7 +881,7 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct cnl_ddi_buf_trans *
 cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+	u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
 	if (voltage == VOLTAGE_INFO_0_85V) {
 		*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
@@ -860,7 +902,7 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 static const struct cnl_ddi_buf_trans *
 cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+	u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
 	if (dev_priv->vbt.edp.low_vswing) {
 		if (voltage == VOLTAGE_INFO_0_85V) {
@@ -901,15 +943,42 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
 	return icl_combo_phy_ddi_translations_dp_hbr2;
 }
 
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+			int *n_entries)
 {
-	struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
+	if (type == INTEL_OUTPUT_DP && rate > 270000) {
+		*n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
+		return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+	}
+
+	return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+			int *n_entries)
+{
+	if (type != INTEL_OUTPUT_DP) {
+		return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+	} else if (rate > 270000) {
+		*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+		return tgl_combo_phy_ddi_translations_dp_hbr2;
+	}
+
+	*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+	return tgl_combo_phy_ddi_translations_dp_hbr;
+}
+
+static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int n_entries, level, default_entry;
-	enum phy phy = intel_port_to_phy(dev_priv, port);
+	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 
 	if (INTEL_GEN(dev_priv) >= 12) {
 		if (intel_phy_is_combo(dev_priv, phy))
-			icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+			tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
 						0, &n_entries);
 		else
 			n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
@@ -937,19 +1006,18 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 		intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
 		default_entry = 6;
 	} else {
-		WARN(1, "ddi translation table missing\n");
+		drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
 		return 0;
 	}
 
-	if (WARN_ON_ONCE(n_entries == 0))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
 		return 0;
 
-	if (port_info->hdmi_level_shift_set)
-		level = port_info->hdmi_level_shift;
-	else
+	level = intel_bios_hdmi_level_shift(encoder);
+	if (level < 0)
 		level = default_entry;
 
-	if (WARN_ON_ONCE(level >= n_entries))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
 
 	return level;
@@ -980,15 +1048,14 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
 							      &n_entries);
 
 	/* If we're boosting the current, set bit 31 of trans1 */
-	if (IS_GEN9_BC(dev_priv) &&
-	    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+	if (IS_GEN9_BC(dev_priv) && intel_bios_dp_boost_level(encoder))
 		iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
 
 	for (i = 0; i < n_entries; i++) {
-		I915_WRITE(DDI_BUF_TRANS_LO(port, i),
-			   ddi_translations[i].trans1 | iboost_bit);
-		I915_WRITE(DDI_BUF_TRANS_HI(port, i),
-			   ddi_translations[i].trans2);
+		intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+			       ddi_translations[i].trans1 | iboost_bit);
+		intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+			       ddi_translations[i].trans2);
 	}
 }
 
@@ -1008,21 +1075,20 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
 
 	ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
 
-	if (WARN_ON_ONCE(!ddi_translations))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
 		return;
-	if (WARN_ON_ONCE(level >= n_entries))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
 
 	/* If we're boosting the current, set bit 31 of trans1 */
-	if (IS_GEN9_BC(dev_priv) &&
-	    dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+	if (IS_GEN9_BC(dev_priv) && intel_bios_hdmi_boost_level(encoder))
 		iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
 
 	/* Entry 9 is for HDMI: */
-	I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
-		   ddi_translations[level].trans1 | iboost_bit);
-	I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
-		   ddi_translations[level].trans2);
+	intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+		       ddi_translations[level].trans1 | iboost_bit);
+	intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+		       ddi_translations[level].trans2);
 }
 
 static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -1033,7 +1099,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 
 	for (i = 0; i < 16; i++) {
 		udelay(1);
-		if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+		if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
 			return;
 	}
 	DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
@@ -1124,70 +1190,64 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
 	 *
 	 * WaFDIAutoLinkSetTimingOverrride:hsw
 	 */
-	I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
-				  FDI_RX_PWRDN_LANE0_VAL(2) |
-				  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+		       FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
 	/* Enable the PCH Receiver FDI PLL */
 	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
 		     FDI_RX_PLL_ENABLE |
 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
-	POSTING_READ(FDI_RX_CTL(PIPE_A));
+	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 	udelay(220);
 
 	/* Switch from Rawclk to PCDclk */
 	rx_ctl_val |= FDI_PCDCLK;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
 
 	/* Configure Port Clock Select */
 	ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
-	I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
-	WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
+	intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+	drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
 
 	/* Start the training iterating through available voltages and emphasis,
 	 * testing each value twice. */
 	for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
 		/* Configure DP_TP_CTL with auto-training */
-		I915_WRITE(DP_TP_CTL(PORT_E),
-					DP_TP_CTL_FDI_AUTOTRAIN |
-					DP_TP_CTL_ENHANCED_FRAME_ENABLE |
-					DP_TP_CTL_LINK_TRAIN_PAT1 |
-					DP_TP_CTL_ENABLE);
+		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+			       DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
 
 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
 		 * DDI E does not support port reversal, the functionality is
 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
 		 * port reversal bit */
-		I915_WRITE(DDI_BUF_CTL(PORT_E),
-			   DDI_BUF_CTL_ENABLE |
-			   ((crtc_state->fdi_lanes - 1) << 1) |
-			   DDI_BUF_TRANS_SELECT(i / 2));
-		POSTING_READ(DDI_BUF_CTL(PORT_E));
+		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+			       DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 
 		udelay(600);
 
 		/* Program PCH FDI Receiver TU */
-		I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
 
 		/* Enable PCH FDI Receiver with auto-training */
 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
-		I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
-		POSTING_READ(FDI_RX_CTL(PIPE_A));
+		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 
 		/* Wait for FDI receiver lane calibration */
 		udelay(30);
 
 		/* Unset FDI_RX_MISC pwrdn lanes */
-		temp = I915_READ(FDI_RX_MISC(PIPE_A));
+		temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
 		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
-		I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
-		POSTING_READ(FDI_RX_MISC(PIPE_A));
+		intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
 
 		/* Wait for FDI auto training time */
 		udelay(5);
 
-		temp = I915_READ(DP_TP_STATUS(PORT_E));
+		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
 			DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
 			break;
@@ -1203,37 +1263,34 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
 		}
 
 		rx_ctl_val &= ~FDI_RX_ENABLE;
-		I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
-		POSTING_READ(FDI_RX_CTL(PIPE_A));
+		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
 
-		temp = I915_READ(DDI_BUF_CTL(PORT_E));
+		temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
 		temp &= ~DDI_BUF_CTL_ENABLE;
-		I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
-		POSTING_READ(DDI_BUF_CTL(PORT_E));
+		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
 
 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
-		temp = I915_READ(DP_TP_CTL(PORT_E));
+		temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
 		temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
 		temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
-		I915_WRITE(DP_TP_CTL(PORT_E), temp);
-		POSTING_READ(DP_TP_CTL(PORT_E));
+		intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
 
 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 
 		/* Reset FDI_RX_MISC pwrdn lanes */
-		temp = I915_READ(FDI_RX_MISC(PIPE_A));
+		temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
 		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
 		temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
-		I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
-		POSTING_READ(FDI_RX_MISC(PIPE_A));
+		intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
 	}
 
 	/* Enable normal pixel sending for FDI */
-	I915_WRITE(DP_TP_CTL(PORT_E),
-		   DP_TP_CTL_FDI_AUTOTRAIN |
-		   DP_TP_CTL_LINK_TRAIN_NORMAL |
-		   DP_TP_CTL_ENHANCED_FRAME_ENABLE |
-		   DP_TP_CTL_ENABLE);
+	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+		       DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
 }
 
 static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1260,175 +1317,18 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
 	}
 
 	if (num_encoders != 1)
-		WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
-		     pipe_name(crtc->pipe));
+		drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
+			 num_encoders,
+			 pipe_name(crtc->pipe));
 
 	BUG_ON(ret == NULL);
 	return ret;
 }
 
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
-				   i915_reg_t reg)
-{
-	int refclk;
-	int n, p, r;
-	u32 wrpll;
-
-	wrpll = I915_READ(reg);
-	switch (wrpll & WRPLL_REF_MASK) {
-	case WRPLL_REF_SPECIAL_HSW:
-		/*
-		 * muxed-SSC for BDW.
-		 * non-SSC for non-ULT HSW. Check FUSE_STRAP3
-		 * for the non-SSC reference frequency.
-		 */
-		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
-			if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
-				refclk = 24;
-			else
-				refclk = 135;
-			break;
-		}
-		/* fall through */
-	case WRPLL_REF_PCH_SSC:
-		/*
-		 * We could calculate spread here, but our checking
-		 * code only cares about 5% accuracy, and spread is a max of
-		 * 0.5% downspread.
-		 */
-		refclk = 135;
-		break;
-	case WRPLL_REF_LCPLL:
-		refclk = 2700;
-		break;
-	default:
-		MISSING_CASE(wrpll);
-		return 0;
-	}
-
-	r = wrpll & WRPLL_DIVIDER_REF_MASK;
-	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
-	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
-
-	/* Convert to KHz, p & r have a fixed point portion */
-	return (refclk * n * 100) / (p * r);
-}
-
-static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
-{
-	u32 p0, p1, p2, dco_freq;
-
-	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
-	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
-
-	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
-		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
-	else
-		p1 = 1;
-
-
-	switch (p0) {
-	case DPLL_CFGCR2_PDIV_1:
-		p0 = 1;
-		break;
-	case DPLL_CFGCR2_PDIV_2:
-		p0 = 2;
-		break;
-	case DPLL_CFGCR2_PDIV_3:
-		p0 = 3;
-		break;
-	case DPLL_CFGCR2_PDIV_7:
-		p0 = 7;
-		break;
-	}
-
-	switch (p2) {
-	case DPLL_CFGCR2_KDIV_5:
-		p2 = 5;
-		break;
-	case DPLL_CFGCR2_KDIV_2:
-		p2 = 2;
-		break;
-	case DPLL_CFGCR2_KDIV_3:
-		p2 = 3;
-		break;
-	case DPLL_CFGCR2_KDIV_1:
-		p2 = 1;
-		break;
-	}
-
-	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
-		* 24 * 1000;
-
-	dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
-		     * 24 * 1000) / 0x8000;
-
-	if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
-		return 0;
-
-	return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			struct intel_dpll_hw_state *pll_state)
-{
-	u32 p0, p1, p2, dco_freq, ref_clock;
-
-	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
-	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
-	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
-		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
-			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
-	else
-		p1 = 1;
-
-
-	switch (p0) {
-	case DPLL_CFGCR1_PDIV_2:
-		p0 = 2;
-		break;
-	case DPLL_CFGCR1_PDIV_3:
-		p0 = 3;
-		break;
-	case DPLL_CFGCR1_PDIV_5:
-		p0 = 5;
-		break;
-	case DPLL_CFGCR1_PDIV_7:
-		p0 = 7;
-		break;
-	}
-
-	switch (p2) {
-	case DPLL_CFGCR1_KDIV_1:
-		p2 = 1;
-		break;
-	case DPLL_CFGCR1_KDIV_2:
-		p2 = 2;
-		break;
-	case DPLL_CFGCR1_KDIV_3:
-		p2 = 3;
-		break;
-	}
-
-	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
-	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
-		* ref_clock;
-
-	dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
-		      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
-
-	if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
-		return 0;
-
-	return dco_freq / (p0 * p1 * p2 * 5);
-}
-
 static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
 				 enum port port)
 {
-	u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+	u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
 
 	switch (val) {
 	case DDI_CLK_SEL_NONE:
@@ -1447,77 +1347,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
 	}
 }
 
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
-				const struct intel_dpll_hw_state *pll_state)
-{
-	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
-	u64 tmp;
-
-	ref_clock = dev_priv->cdclk.hw.ref;
-
-	if (INTEL_GEN(dev_priv) >= 12) {
-		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
-		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
-		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
-
-		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
-			m2_frac = pll_state->mg_pll_bias &
-				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
-			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
-		} else {
-			m2_frac = 0;
-		}
-	} else {
-		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
-		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-
-		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
-			m2_frac = pll_state->mg_pll_div0 &
-				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
-			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
-		} else {
-			m2_frac = 0;
-		}
-	}
-
-	switch (pll_state->mg_clktop2_hsclkctl &
-		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
-	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
-		div1 = 2;
-		break;
-	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
-		div1 = 3;
-		break;
-	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
-		div1 = 5;
-		break;
-	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
-		div1 = 7;
-		break;
-	default:
-		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
-		return 0;
-	}
-
-	div2 = (pll_state->mg_clktop2_hsclkctl &
-		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
-		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
-
-	/* div2 value of 0 is same as 1 means no div */
-	if (div2 == 0)
-		div2 = 1;
-
-	/*
-	 * Adjust the original formula to delay the division by 2^22 in order to
-	 * minimize possible rounding errors.
-	 */
-	tmp = (u64)m1 * m2_int * ref_clock +
-	      (((u64)m1 * m2_frac * ref_clock) >> 22);
-	tmp = div_u64(tmp, 5 * div1 * div2);
-
-	return tmp;
-}
-
 static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 {
 	int dotclock;
@@ -1543,214 +1372,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 	pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
 }
 
-static void icl_ddi_clock_get(struct intel_encoder *encoder,
-			      struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
-	enum port port = encoder->port;
-	enum phy phy = intel_port_to_phy(dev_priv, port);
-	int link_clock;
-
-	if (intel_phy_is_combo(dev_priv, phy)) {
-		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
-	} else {
-		enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
-						pipe_config->shared_dpll);
-
-		if (pll_id == DPLL_ID_ICL_TBTPLL)
-			link_clock = icl_calc_tbt_pll_link(dev_priv, port);
-		else
-			link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
-	}
-
-	pipe_config->port_clock = link_clock;
-
-	ddi_dotclock_get(pipe_config);
-}
-
-static void cnl_ddi_clock_get(struct intel_encoder *encoder,
-			      struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
-	int link_clock;
-
-	if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
-		link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
-	} else {
-		link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
-
-		switch (link_clock) {
-		case DPLL_CFGCR0_LINK_RATE_810:
-			link_clock = 81000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_1080:
-			link_clock = 108000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_1350:
-			link_clock = 135000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_1620:
-			link_clock = 162000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_2160:
-			link_clock = 216000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_2700:
-			link_clock = 270000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_3240:
-			link_clock = 324000;
-			break;
-		case DPLL_CFGCR0_LINK_RATE_4050:
-			link_clock = 405000;
-			break;
-		default:
-			WARN(1, "Unsupported link rate\n");
-			break;
-		}
-		link_clock *= 2;
-	}
-
-	pipe_config->port_clock = link_clock;
-
-	ddi_dotclock_get(pipe_config);
-}
-
-static void skl_ddi_clock_get(struct intel_encoder *encoder,
-			      struct intel_crtc_state *pipe_config)
-{
-	struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
-	int link_clock;
-
-	/*
-	 * ctrl1 register is already shifted for each pll, just use 0 to get
-	 * the internal shift for each field
-	 */
-	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
-		link_clock = skl_calc_wrpll_link(pll_state);
-	} else {
-		link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
-		link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
-
-		switch (link_clock) {
-		case DPLL_CTRL1_LINK_RATE_810:
-			link_clock = 81000;
-			break;
-		case DPLL_CTRL1_LINK_RATE_1080:
-			link_clock = 108000;
-			break;
-		case DPLL_CTRL1_LINK_RATE_1350:
-			link_clock = 135000;
-			break;
-		case DPLL_CTRL1_LINK_RATE_1620:
-			link_clock = 162000;
-			break;
-		case DPLL_CTRL1_LINK_RATE_2160:
-			link_clock = 216000;
-			break;
-		case DPLL_CTRL1_LINK_RATE_2700:
-			link_clock = 270000;
-			break;
-		default:
-			WARN(1, "Unsupported link rate\n");
-			break;
-		}
-		link_clock *= 2;
-	}
-
-	pipe_config->port_clock = link_clock;
-
-	ddi_dotclock_get(pipe_config);
-}
-
-static void hsw_ddi_clock_get(struct intel_encoder *encoder,
-			      struct intel_crtc_state *pipe_config)
-{
-	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	int link_clock = 0;
-	u32 val, pll;
-
-	val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
-	switch (val & PORT_CLK_SEL_MASK) {
-	case PORT_CLK_SEL_LCPLL_810:
-		link_clock = 81000;
-		break;
-	case PORT_CLK_SEL_LCPLL_1350:
-		link_clock = 135000;
-		break;
-	case PORT_CLK_SEL_LCPLL_2700:
-		link_clock = 270000;
-		break;
-	case PORT_CLK_SEL_WRPLL1:
-		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
-		break;
-	case PORT_CLK_SEL_WRPLL2:
-		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
-		break;
-	case PORT_CLK_SEL_SPLL:
-		pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
-		if (pll == SPLL_FREQ_810MHz)
-			link_clock = 81000;
-		else if (pll == SPLL_FREQ_1350MHz)
-			link_clock = 135000;
-		else if (pll == SPLL_FREQ_2700MHz)
-			link_clock = 270000;
-		else {
-			WARN(1, "bad spll freq\n");
-			return;
-		}
-		break;
-	default:
-		WARN(1, "bad port clock sel\n");
-		return;
-	}
-
-	pipe_config->port_clock = link_clock * 2;
-
-	ddi_dotclock_get(pipe_config);
-}
-
-static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
-{
-	struct dpll clock;
-
-	clock.m1 = 2;
-	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
-	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
-		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
-	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
-	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
-	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
-
-	return chv_calc_dpll_params(100000, &clock);
-}
-
-static void bxt_ddi_clock_get(struct intel_encoder *encoder,
-			      struct intel_crtc_state *pipe_config)
-{
-	pipe_config->port_clock =
-		bxt_calc_pll_link(&pipe_config->dpll_hw_state);
-
-	ddi_dotclock_get(pipe_config);
-}
-
 static void intel_ddi_clock_get(struct intel_encoder *encoder,
 				struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 
-	if (INTEL_GEN(dev_priv) >= 11)
-		icl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_CANNONLAKE(dev_priv))
-		cnl_ddi_clock_get(encoder, pipe_config);
-	else if (IS_GEN9_LP(dev_priv))
-		bxt_ddi_clock_get(encoder, pipe_config);
-	else if (IS_GEN9_BC(dev_priv))
-		skl_ddi_clock_get(encoder, pipe_config);
-	else if (INTEL_GEN(dev_priv) <= 8)
-		hsw_ddi_clock_get(encoder, pipe_config);
+	if (intel_phy_is_tc(dev_priv, phy) &&
+	    intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
+	    DPLL_ID_ICL_TBTPLL)
+		pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
+								encoder->port);
+	else
+		pipe_config->port_clock =
+			intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll);
+
+	ddi_dotclock_get(pipe_config);
 }
 
 void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -1764,7 +1401,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
 	if (!intel_crtc_has_dp_encoder(crtc_state))
 		return;
 
-	WARN_ON(transcoder_is_dsi(cpu_transcoder));
+	drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
 
 	temp = DP_MSA_MISC_SYNC_CLOCK;
 
@@ -1787,8 +1424,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
 	}
 
 	/* nonsense combination */
-	WARN_ON(crtc_state->limited_color_range &&
-		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
 
 	if (crtc_state->limited_color_range)
 		temp |= DP_MSA_MISC_COLOR_CEA_RGB;
@@ -1810,7 +1447,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
 	if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
 		temp |= DP_MSA_MISC_COLOR_VSC_SDP;
 
-	I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+	intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
 }
 
 /*
@@ -1904,7 +1541,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
 			enum transcoder master;
 
 			master = crtc_state->mst_master_transcoder;
-			WARN_ON(master == INVALID_TRANSCODER);
+			drm_WARN_ON(&dev_priv->drm,
+				    master == INVALID_TRANSCODER);
 			temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
 		}
 	} else {
@@ -1925,7 +1563,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
 	temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
 		temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
-	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
 }
 
 /*
@@ -1942,7 +1580,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
 
 	temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
 	temp &= ~TRANS_DDI_FUNC_ENABLE;
-	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
 }
 
 void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1952,16 +1590,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 val;
 
-	val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
 	val &= ~TRANS_DDI_FUNC_ENABLE;
 
 	if (INTEL_GEN(dev_priv) >= 12) {
-		if (!intel_dp_mst_is_master_trans(crtc_state))
-			val &= ~TGL_TRANS_DDI_PORT_MASK;
+		if (!intel_dp_mst_is_master_trans(crtc_state)) {
+			val &= ~(TGL_TRANS_DDI_PORT_MASK |
+				 TRANS_DDI_MODE_SELECT_MASK);
+		}
 	} else {
-		val &= ~TRANS_DDI_PORT_MASK;
+		val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
 	}
-	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
 
 	if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
 	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -1983,20 +1623,21 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 
 	wakeref = intel_display_power_get_if_enabled(dev_priv,
 						     intel_encoder->power_domain);
-	if (WARN_ON(!wakeref))
+	if (drm_WARN_ON(dev, !wakeref))
 		return -ENXIO;
 
-	if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+	if (drm_WARN_ON(dev,
+			!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
 		ret = -EIO;
 		goto out;
 	}
 
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
+	tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe));
 	if (enable)
 		tmp |= TRANS_DDI_HDCP_SIGNALLING;
 	else
 		tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
-	I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
+	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp);
 out:
 	intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
 	return ret;
@@ -2006,7 +1647,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 {
 	struct drm_device *dev = intel_connector->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_encoder *encoder = intel_connector->encoder;
+	struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
 	int type = intel_connector->base.connector_type;
 	enum port port = encoder->port;
 	enum transcoder cpu_transcoder;
@@ -2030,7 +1671,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 	else
 		cpu_transcoder = (enum transcoder) pipe;
 
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
 	switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
 	case TRANS_DDI_MODE_SELECT_HDMI:
@@ -2083,12 +1724,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
 	if (!wakeref)
 		return;
 
-	tmp = I915_READ(DDI_BUF_CTL(port));
+	tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
 	if (!(tmp & DDI_BUF_CTL_ENABLE))
 		goto out;
 
 	if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+		tmp = intel_de_read(dev_priv,
+				    TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
 		default:
@@ -2128,7 +1770,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
 			ddi_select = TRANS_DDI_SELECT_PORT(port);
 		}
 
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		tmp = intel_de_read(dev_priv,
+				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
 		intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
 					trans_wakeref);
 
@@ -2162,7 +1805,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
 
 out:
 	if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
-		tmp = I915_READ(BXT_PHY_CTL(port));
+		tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
 		if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
 			    BXT_PHY_LANE_POWERDOWN_ACK |
 			    BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
@@ -2221,7 +1864,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
 	 * happen since fake-MST encoders don't set their get_power_domains()
 	 * hook.
 	 */
-	if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
 		return;
 
 	dig_port = enc_to_dig_port(encoder);
@@ -2254,11 +1898,13 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
 
 	if (cpu_transcoder != TRANSCODER_EDP) {
 		if (INTEL_GEN(dev_priv) >= 12)
-			I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
-				   TGL_TRANS_CLK_SEL_PORT(port));
+			intel_de_write(dev_priv,
+				       TRANS_CLK_SEL(cpu_transcoder),
+				       TGL_TRANS_CLK_SEL_PORT(port));
 		else
-			I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
-				   TRANS_CLK_SEL_PORT(port));
+			intel_de_write(dev_priv,
+				       TRANS_CLK_SEL(cpu_transcoder),
+				       TRANS_CLK_SEL_PORT(port));
 	}
 }
 
@@ -2269,11 +1915,13 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
 
 	if (cpu_transcoder != TRANSCODER_EDP) {
 		if (INTEL_GEN(dev_priv) >= 12)
-			I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
-				   TGL_TRANS_CLK_SEL_DISABLED);
+			intel_de_write(dev_priv,
+				       TRANS_CLK_SEL(cpu_transcoder),
+				       TGL_TRANS_CLK_SEL_DISABLED);
 		else
-			I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
-				   TRANS_CLK_SEL_DISABLED);
+			intel_de_write(dev_priv,
+				       TRANS_CLK_SEL(cpu_transcoder),
+				       TRANS_CLK_SEL_DISABLED);
 	}
 }
 
@@ -2282,13 +1930,13 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
 {
 	u32 tmp;
 
-	tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+	tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
 	tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
 	if (iboost)
 		tmp |= iboost << BALANCE_LEG_SHIFT(port);
 	else
 		tmp |= BALANCE_LEG_DISABLE(port);
-	I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+	intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
 }
 
 static void skl_ddi_set_iboost(struct intel_encoder *encoder,
@@ -2300,9 +1948,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
 	u8 iboost;
 
 	if (type == INTEL_OUTPUT_HDMI)
-		iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+		iboost = intel_bios_hdmi_boost_level(encoder);
 	else
-		iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+		iboost = intel_bios_dp_boost_level(encoder);
 
 	if (iboost == 0) {
 		const struct ddi_buf_trans *ddi_translations;
@@ -2315,9 +1963,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
 		else
 			ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
 
-		if (WARN_ON_ONCE(!ddi_translations))
+		if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
 			return;
-		if (WARN_ON_ONCE(level >= n_entries))
+		if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 			level = n_entries - 1;
 
 		iboost = ddi_translations[level].i_boost;
@@ -2350,9 +1998,9 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
 	else
 		ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
 
-	if (WARN_ON_ONCE(!ddi_translations))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
 		return;
-	if (WARN_ON_ONCE(level >= n_entries))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
 
 	bxt_ddi_phy_set_signal_level(dev_priv, port,
@@ -2372,12 +2020,15 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 
 	if (INTEL_GEN(dev_priv) >= 12) {
 		if (intel_phy_is_combo(dev_priv, phy))
-			icl_get_combo_buf_trans(dev_priv, encoder->type,
+			tgl_get_combo_buf_trans(dev_priv, encoder->type,
 						intel_dp->link_rate, &n_entries);
 		else
 			n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
 	} else if (INTEL_GEN(dev_priv) == 11) {
-		if (intel_phy_is_combo(dev_priv, phy))
+		if (IS_ELKHARTLAKE(dev_priv))
+			ehl_get_combo_buf_trans(dev_priv, encoder->type,
+						intel_dp->link_rate, &n_entries);
+		else if (intel_phy_is_combo(dev_priv, phy))
 			icl_get_combo_buf_trans(dev_priv, encoder->type,
 						intel_dp->link_rate, &n_entries);
 		else
@@ -2399,9 +2050,10 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 			intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
 	}
 
-	if (WARN_ON(n_entries < 1))
+	if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
 		n_entries = 1;
-	if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
 		n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
 
 	return index_to_dp_signal_levels[n_entries - 1] &
@@ -2444,52 +2096,52 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
 	else
 		ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
 
-	if (WARN_ON_ONCE(!ddi_translations))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
 		return;
-	if (WARN_ON_ONCE(level >= n_entries))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
 		level = n_entries - 1;
 
 	/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
-	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
 	val &= ~SCALING_MODE_SEL_MASK;
 	val |= SCALING_MODE_SEL(2);
-	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
 
 	/* Program PORT_TX_DW2 */
-	val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
 	val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
 		 RCOMP_SCALAR_MASK);
 	val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
 	val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
 	/* Rcomp scalar is fixed as 0x98 for every table entry */
 	val |= RCOMP_SCALAR(0x98);
-	I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
 
 	/* Program PORT_TX_DW4 */
 	/* We cannot write to GRP. It would overrite individual loadgen */
 	for (ln = 0; ln < 4; ln++) {
-		val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+		val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
 		val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 			 CURSOR_COEFF_MASK);
 		val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
 		val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
 		val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
-		I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+		intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
 	}
 
 	/* Program PORT_TX_DW5 */
 	/* All DW5 values are fixed for every table entry */
-	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
 	val &= ~RTERM_SELECT_MASK;
 	val |= RTERM_SELECT(6);
 	val |= TAP3_DISABLE;
-	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
 
 	/* Program PORT_TX_DW7 */
-	val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
 	val &= ~N_SCALAR_MASK;
 	val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
-	I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
 }
 
 static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2515,12 +2167,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
 	 * else clear to 0b.
 	 */
-	val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port));
 	if (type != INTEL_OUTPUT_HDMI)
 		val |= COMMON_KEEPER_EN;
 	else
 		val &= ~COMMON_KEEPER_EN;
-	I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val);
 
 	/* 2. Program loadgen select */
 	/*
@@ -2530,33 +2182,33 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
 	 */
 	for (ln = 0; ln <= 3; ln++) {
-		val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+		val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
 		val &= ~LOADGEN_SELECT;
 
 		if ((rate <= 600000 && width == 4 && ln >= 1)  ||
 		    (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
 			val |= LOADGEN_SELECT;
 		}
-		I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+		intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
 	}
 
 	/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
-	val = I915_READ(CNL_PORT_CL1CM_DW5);
+	val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
 	val |= SUS_CLOCK_CONFIG;
-	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+	intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
 
 	/* 4. Clear training enable to change swing values */
-	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
 	val &= ~TX_TRAINING_EN;
-	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
 
 	/* 5. Program swing and de-emphasis */
 	cnl_ddi_vswing_program(encoder, level, type);
 
 	/* 6. Set training enable to trigger update */
-	val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+	val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
 	val |= TX_TRAINING_EN;
-	I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+	intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
 }
 
 static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
@@ -2567,8 +2219,15 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
 	u32 n_entries, val;
 	int ln;
 
-	ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
-						   &n_entries);
+	if (INTEL_GEN(dev_priv) >= 12)
+		ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+							   &n_entries);
+	else if (IS_ELKHARTLAKE(dev_priv))
+		ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+							   &n_entries);
+	else
+		ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+							   &n_entries);
 	if (!ddi_translations)
 		return;
 
@@ -2578,41 +2237,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
 	}
 
 	/* Set PORT_TX_DW5 */
-	val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
 	val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
 		  TAP2_DISABLE | TAP3_DISABLE);
 	val |= SCALING_MODE_SEL(0x2);
 	val |= RTERM_SELECT(0x6);
 	val |= TAP3_DISABLE;
-	I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
 
 	/* Program PORT_TX_DW2 */
-	val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
 	val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
 		 RCOMP_SCALAR_MASK);
 	val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
 	val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
 	/* Program Rcomp scalar for every table entry */
 	val |= RCOMP_SCALAR(0x98);
-	I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
 
 	/* Program PORT_TX_DW4 */
 	/* We cannot write to GRP. It would overwrite individual loadgen. */
 	for (ln = 0; ln <= 3; ln++) {
-		val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+		val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
 		val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
 			 CURSOR_COEFF_MASK);
 		val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
 		val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
 		val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
-		I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
 	}
 
 	/* Program PORT_TX_DW7 */
-	val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
 	val &= ~N_SCALAR_MASK;
 	val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
-	I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
 }
 
 static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2641,12 +2300,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
 	 * else clear to 0b.
 	 */
-	val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
 	if (type == INTEL_OUTPUT_HDMI)
 		val &= ~COMMON_KEEPER_EN;
 	else
 		val |= COMMON_KEEPER_EN;
-	I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
 
 	/* 2. Program loadgen select */
 	/*
@@ -2656,33 +2315,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
 	 */
 	for (ln = 0; ln <= 3; ln++) {
-		val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+		val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
 		val &= ~LOADGEN_SELECT;
 
 		if ((rate <= 600000 && width == 4 && ln >= 1) ||
 		    (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
 			val |= LOADGEN_SELECT;
 		}
-		I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+		intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
 	}
 
 	/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
-	val = I915_READ(ICL_PORT_CL_DW5(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
 	val |= SUS_CLOCK_CONFIG;
-	I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
 
 	/* 4. Clear training enable to change swing values */
-	val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
 	val &= ~TX_TRAINING_EN;
-	I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
 
 	/* 5. Program swing and de-emphasis */
 	icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
 
 	/* 6. Set training enable to trigger update */
-	val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+	val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
 	val |= TX_TRAINING_EN;
-	I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+	intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
 }
 
 static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2706,33 +2365,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 
 	/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port));
 		val &= ~CRI_USE_FS32;
-		I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val);
 
-		val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port));
 		val &= ~CRI_USE_FS32;
-		I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val);
 	}
 
 	/* Program MG_TX_SWINGCTRL with values from vswing table */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
 		val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
 		val |= CRI_TXDEEMPH_OVERRIDE_17_12(
 			ddi_translations[level].cri_txdeemph_override_17_12);
-		I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
 
-		val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
 		val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
 		val |= CRI_TXDEEMPH_OVERRIDE_17_12(
 			ddi_translations[level].cri_txdeemph_override_17_12);
-		I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
 	}
 
 	/* Program MG_TX_DRVCTRL with values from vswing table */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port));
 		val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
 			 CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
 		val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2740,9 +2399,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			CRI_TXDEEMPH_OVERRIDE_11_6(
 				ddi_translations[level].cri_txdeemph_override_11_6) |
 			CRI_TXDEEMPH_OVERRIDE_EN;
-		I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
 
-		val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port));
 		val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
 			 CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
 		val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2750,7 +2409,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			CRI_TXDEEMPH_OVERRIDE_11_6(
 				ddi_translations[level].cri_txdeemph_override_11_6) |
 			CRI_TXDEEMPH_OVERRIDE_EN;
-		I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
 
 		/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
 	}
@@ -2761,17 +2420,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 	 * values from table for which TX1 and TX2 enabled.
 	 */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_CLKHUB(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port));
 		if (link_clock < 300000)
 			val |= CFG_LOW_RATE_LKREN_EN;
 		else
 			val &= ~CFG_LOW_RATE_LKREN_EN;
-		I915_WRITE(MG_CLKHUB(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val);
 	}
 
 	/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_DCC(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port));
 		val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
 		if (link_clock <= 500000) {
 			val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2779,9 +2438,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
 				CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
 		}
-		I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val);
 
-		val = I915_READ(MG_TX2_DCC(ln, tc_port));
+		val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port));
 		val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
 		if (link_clock <= 500000) {
 			val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2789,18 +2448,22 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
 			val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
 				CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
 		}
-		I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val);
 	}
 
 	/* Program MG_TX_PISO_READLOAD with values from vswing table */
 	for (ln = 0; ln < 2; ln++) {
-		val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
+		val = intel_de_read(dev_priv,
+				    MG_TX1_PISO_READLOAD(ln, tc_port));
 		val |= CRI_CALCINIT;
-		I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+			       val);
 
-		val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
+		val = intel_de_read(dev_priv,
+				    MG_TX2_PISO_READLOAD(ln, tc_port));
 		val |= CRI_CALCINIT;
-		I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
+		intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+			       val);
 	}
 }
 
@@ -2846,24 +2509,25 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
 	dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
 
 	for (ln = 0; ln < 2; ln++) {
-		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+			       HIP_INDEX_VAL(tc_port, ln));
 
-		I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+		intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
 
 		/* All the registers are RMW */
-		val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+		val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port));
 		val &= ~dpcnt_mask;
 		val |= dpcnt_val;
-		I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+		intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val);
 
-		val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+		val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port));
 		val &= ~dpcnt_mask;
 		val |= dpcnt_val;
-		I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+		intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val);
 
-		val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+		val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
 		val &= ~DKL_TX_DP20BITMODE;
-		I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+		intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
 	}
 }
 
@@ -2963,10 +2627,11 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 	u32 val;
 
-	mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&dev_priv->dpll.lock);
 
-	val = I915_READ(ICL_DPCLKA_CFGCR0);
-	WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+	val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+	drm_WARN_ON(&dev_priv->drm,
+		    (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
 
 	if (intel_phy_is_combo(dev_priv, phy)) {
 		/*
@@ -2981,14 +2646,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
 		 */
 		val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 		val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
-		I915_WRITE(ICL_DPCLKA_CFGCR0, val);
-		POSTING_READ(ICL_DPCLKA_CFGCR0);
+		intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+		intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
 	}
 
 	val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
-	I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
@@ -2997,13 +2662,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 	u32 val;
 
-	mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&dev_priv->dpll.lock);
 
-	val = I915_READ(ICL_DPCLKA_CFGCR0);
+	val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
 	val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
-	I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+	intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
@@ -3012,7 +2677,7 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
 	enum port port;
 	u32 val;
 
-	val = I915_READ(ICL_DPCLKA_CFGCR0);
+	val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
 	for_each_port_masked(port, port_mask) {
 		enum phy phy = intel_port_to_phy(dev_priv, port);
 		bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
@@ -3025,13 +2690,13 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
 		 * Punt on the case now where clock is gated, but it would
 		 * be needed by the port. Something else is really broken then.
 		 */
-		if (WARN_ON(ddi_clk_needed))
+		if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
 			continue;
 
 		DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
 			 phy_name(phy));
 		val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
-		I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+		intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
 	}
 }
 
@@ -3057,7 +2722,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 		 * In the unlikely case that BIOS enables DP in MST mode, just
 		 * warn since our MST HW readout is incomplete.
 		 */
-		if (WARN_ON(is_mst))
+		if (drm_WARN_ON(&dev_priv->drm, is_mst))
 			return;
 	}
 
@@ -3076,7 +2741,8 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
 			if (other_encoder == encoder)
 				continue;
 
-			if (WARN_ON(port_mask & BIT(other_encoder->port)))
+			if (drm_WARN_ON(&dev_priv->drm,
+					port_mask & BIT(other_encoder->port)))
 				return;
 		}
 		/*
@@ -3098,52 +2764,54 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 	u32 val;
 	const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
-	if (WARN_ON(!pll))
+	if (drm_WARN_ON(&dev_priv->drm, !pll))
 		return;
 
-	mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&dev_priv->dpll.lock);
 
 	if (INTEL_GEN(dev_priv) >= 11) {
 		if (!intel_phy_is_combo(dev_priv, phy))
-			I915_WRITE(DDI_CLK_SEL(port),
-				   icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+			intel_de_write(dev_priv, DDI_CLK_SEL(port),
+				       icl_pll_to_ddi_clk_sel(encoder, crtc_state));
 		else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
 			/*
 			 * MG does not exist but the programming is required
 			 * to ungate DDIC and DDID
 			 */
-			I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+			intel_de_write(dev_priv, DDI_CLK_SEL(port),
+				       DDI_CLK_SEL_MG);
 	} else if (IS_CANNONLAKE(dev_priv)) {
 		/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
-		val = I915_READ(DPCLKA_CFGCR0);
+		val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
 		val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 		val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
-		I915_WRITE(DPCLKA_CFGCR0, val);
+		intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
 
 		/*
 		 * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
 		 * This step and the step before must be done with separate
 		 * register writes.
 		 */
-		val = I915_READ(DPCLKA_CFGCR0);
+		val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
 		val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
-		I915_WRITE(DPCLKA_CFGCR0, val);
+		intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
 	} else if (IS_GEN9_BC(dev_priv)) {
 		/* DDI -> PLL mapping  */
-		val = I915_READ(DPLL_CTRL2);
+		val = intel_de_read(dev_priv, DPLL_CTRL2);
 
 		val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
 			 DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
 		val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
 			DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
-		I915_WRITE(DPLL_CTRL2, val);
+		intel_de_write(dev_priv, DPLL_CTRL2, val);
 
 	} else if (INTEL_GEN(dev_priv) < 9) {
-		I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+		intel_de_write(dev_priv, PORT_CLK_SEL(port),
+			       hsw_pll_to_ddi_pll_sel(pll));
 	}
 
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static void intel_ddi_clk_disable(struct intel_encoder *encoder)
@@ -3155,15 +2823,17 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
 	if (INTEL_GEN(dev_priv) >= 11) {
 		if (!intel_phy_is_combo(dev_priv, phy) ||
 		    (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
-			I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+			intel_de_write(dev_priv, DDI_CLK_SEL(port),
+				       DDI_CLK_SEL_NONE);
 	} else if (IS_CANNONLAKE(dev_priv)) {
-		I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
-			   DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+		intel_de_write(dev_priv, DPCLKA_CFGCR0,
+			       intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port));
 	} else if (IS_GEN9_BC(dev_priv)) {
-		I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
-			   DPLL_CTRL2_DDI_CLK_OFF(port));
+		intel_de_write(dev_priv, DPLL_CTRL2,
+			       intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port));
 	} else if (INTEL_GEN(dev_priv) < 9) {
-		I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+		intel_de_write(dev_priv, PORT_CLK_SEL(port),
+			       PORT_CLK_SEL_NONE);
 	}
 }
 
@@ -3180,13 +2850,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
 		return;
 
 	if (INTEL_GEN(dev_priv) >= 12) {
-		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
-		ln0 = I915_READ(DKL_DP_MODE(tc_port));
-		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
-		ln1 = I915_READ(DKL_DP_MODE(tc_port));
+		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+			       HIP_INDEX_VAL(tc_port, 0x0));
+		ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
+		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+			       HIP_INDEX_VAL(tc_port, 0x1));
+		ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
 	} else {
-		ln0 = I915_READ(MG_DP_MODE(0, tc_port));
-		ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+		ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
+		ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
 	}
 
 	ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
@@ -3198,7 +2870,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
 
 	switch (pin_assignment) {
 	case 0x0:
-		WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+		drm_WARN_ON(&dev_priv->drm,
+			    intel_dig_port->tc_mode != TC_PORT_LEGACY);
 		if (width == 1) {
 			ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
 		} else {
@@ -3243,13 +2916,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
 	}
 
 	if (INTEL_GEN(dev_priv) >= 12) {
-		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
-		I915_WRITE(DKL_DP_MODE(tc_port), ln0);
-		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
-		I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+			       HIP_INDEX_VAL(tc_port, 0x0));
+		intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
+		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+			       HIP_INDEX_VAL(tc_port, 0x1));
+		intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1);
 	} else {
-		I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
-		I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+		intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
+		intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
 	}
 }
 
@@ -3274,9 +2949,9 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
 		return;
 
 	intel_dp = enc_to_intel_dp(encoder);
-	val = I915_READ(intel_dp->regs.dp_tp_ctl);
+	val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 	val |= DP_TP_CTL_FEC_ENABLE;
-	I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+	intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
 
 	if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
 				  DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
@@ -3294,90 +2969,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
 		return;
 
 	intel_dp = enc_to_intel_dp(encoder);
-	val = I915_READ(intel_dp->regs.dp_tp_ctl);
+	val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 	val &= ~DP_TP_CTL_FEC_ENABLE;
-	I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
-	POSTING_READ(intel_dp->regs.dp_tp_ctl);
-}
-
-static void
-tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
-	struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
-	u32 val;
-
-	if (!cstate->dc3co_exitline)
-		return;
-
-	val = I915_READ(EXITLINE(cstate->cpu_transcoder));
-	val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
-	I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void
-tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
-	u32 val, exit_scanlines;
-	struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
-
-	if (!cstate->dc3co_exitline)
-		return;
-
-	exit_scanlines = cstate->dc3co_exitline;
-	exit_scanlines <<= EXITLINE_SHIFT;
-	val = I915_READ(EXITLINE(cstate->cpu_transcoder));
-	val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
-	val |= exit_scanlines;
-	val |= EXITLINE_ENABLE;
-	I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
-					      struct intel_crtc_state *cstate)
-{
-	u32 exit_scanlines;
-	struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
-	u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
-
-	cstate->dc3co_exitline = 0;
-
-	if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
-		return;
-
-	/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
-	if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
-	    encoder->port != PORT_A)
-		return;
-
-	if (!cstate->has_psr2 || !cstate->hw.active)
-		return;
-
-	/*
-	 * DC3CO Exit time 200us B.Spec 49196
-	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
-	 */
-	exit_scanlines =
-		intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
-
-	if (WARN_ON(exit_scanlines > crtc_vdisplay))
-		return;
-
-	cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
-	DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
-}
-
-static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
-{
-	u32 val;
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
-	if (INTEL_GEN(dev_priv) < 12)
-		return;
-
-	val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
-
-	if (val & EXITLINE_ENABLE)
-		crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+	intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+	intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 }
 
 static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
@@ -3392,7 +2987,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	int level = intel_ddi_dp_level(intel_dp);
 	enum transcoder transcoder = crtc_state->cpu_transcoder;
 
-	tgl_set_psr2_transcoder_exitline(crtc_state);
 	intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
 				 crtc_state->lane_count, is_mst);
 
@@ -3534,9 +3128,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	int level = intel_ddi_dp_level(intel_dp);
 
 	if (INTEL_GEN(dev_priv) < 11)
-		WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+		drm_WARN_ON(&dev_priv->drm,
+			    is_mst && (port == PORT_A || port == PORT_E));
 	else
-		WARN_ON(is_mst && port == PORT_A);
+		drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
 
 	intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
 				 crtc_state->lane_count, is_mst);
@@ -3607,8 +3202,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 	/* MST will call a setting of MSA after an allocating of Virtual Channel
 	 * from MST encoder pre_enable callback.
 	 */
-	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
 		intel_ddi_set_dp_msa(crtc_state, conn_state);
+
+		intel_dp_set_m_n(crtc_state, M1_N1);
+	}
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3618,8 +3216,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	enum port port = encoder->port;
-	int level = intel_ddi_hdmi_level(dev_priv, port);
+	int level = intel_ddi_hdmi_level(encoder);
 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 
 	intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -3673,7 +3270,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
 	 *   the DP link parameteres
 	 */
 
-	WARN_ON(crtc_state->has_pch_encoder);
+	drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_map_plls_to_ports(encoder, crtc_state);
@@ -3706,20 +3303,20 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
 	bool wait = false;
 	u32 val;
 
-	val = I915_READ(DDI_BUF_CTL(port));
+	val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
 	if (val & DDI_BUF_CTL_ENABLE) {
 		val &= ~DDI_BUF_CTL_ENABLE;
-		I915_WRITE(DDI_BUF_CTL(port), val);
+		intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
 		wait = true;
 	}
 
 	if (intel_crtc_has_dp_encoder(crtc_state)) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-		val = I915_READ(intel_dp->regs.dp_tp_ctl);
+		val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 		val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
 		val |= DP_TP_CTL_LINK_TRAIN_PAT1;
-		I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+		intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
 	}
 
 	/* Disable FEC in DP Sink */
@@ -3751,9 +3348,13 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 			enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 			u32 val;
 
-			val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
-			val &= ~TGL_TRANS_DDI_PORT_MASK;
-			I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+			val = intel_de_read(dev_priv,
+					    TRANS_DDI_FUNC_CTL(cpu_transcoder));
+			val &= ~(TGL_TRANS_DDI_PORT_MASK |
+				 TRANS_DDI_MODE_SELECT_MASK);
+			intel_de_write(dev_priv,
+				       TRANS_DDI_FUNC_CTL(cpu_transcoder),
+				       val);
 		}
 	} else {
 		if (!is_mst)
@@ -3779,7 +3380,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
 						  dig_port->ddi_io_power_domain);
 
 	intel_ddi_clk_disable(encoder);
-	tgl_clear_psr2_transcoder_exitline(old_crtc_state);
 }
 
 static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3816,7 +3416,8 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
 	DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
 		      transcoder_name(old_crtc_state->cpu_transcoder));
 
-	I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+	intel_de_write(dev_priv,
+		       TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
 }
 
 static void intel_ddi_post_disable(struct intel_encoder *encoder,
@@ -3890,25 +3491,25 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
 	 * step 13 is the correct place for it. Step 18 is where it was
 	 * originally before the BUN.
 	 */
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
 	val &= ~FDI_RX_ENABLE;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
 
 	intel_disable_ddi_buf(encoder, old_crtc_state);
 	intel_ddi_clk_disable(encoder);
 
-	val = I915_READ(FDI_RX_MISC(PIPE_A));
+	val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
 	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
 	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
-	I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
 
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
 	val &= ~FDI_PCDCLK;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
 
-	val = I915_READ(FDI_RX_CTL(PIPE_A));
+	val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
 	val &= ~FDI_RX_PLL_ENABLE;
-	I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
 }
 
 static void intel_enable_ddi_dp(struct intel_encoder *encoder,
@@ -3944,9 +3545,9 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
 		[PORT_E] = TRANSCODER_A,
 	};
 
-	WARN_ON(INTEL_GEN(dev_priv) < 9);
+	drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9);
 
-	if (WARN_ON(port < PORT_A || port > PORT_E))
+	if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
 		port = PORT_A;
 
 	return CHICKEN_TRANS(trans[port]);
@@ -3964,8 +3565,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 	if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
 					       crtc_state->hdmi_high_tmds_clock_ratio,
 					       crtc_state->hdmi_scrambling))
-		DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
-			  connector->base.id, connector->name);
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
+			      "scrambling/TMDS bit clock ratio\n",
+			       connector->base.id, connector->name);
 
 	/* Display WA #1143: skl,kbl,cfl */
 	if (IS_GEN9_BC(dev_priv)) {
@@ -3978,7 +3580,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 		i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
 		u32 val;
 
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 
 		if (port == PORT_E)
 			val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3987,8 +3589,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 			val |= DDI_TRAINING_OVERRIDE_ENABLE |
 				DDI_TRAINING_OVERRIDE_VALUE;
 
-		I915_WRITE(reg, val);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, val);
+		intel_de_posting_read(dev_priv, reg);
 
 		udelay(1);
 
@@ -3999,15 +3601,15 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
 			val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
 				 DDI_TRAINING_OVERRIDE_VALUE);
 
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 
 	/* In HDMI/DVI mode, the port width, and swing/emphasis values
 	 * are ignored so nothing special needs to be done besides
 	 * enabling the port.
 	 */
-	I915_WRITE(DDI_BUF_CTL(port),
-		   dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+	intel_de_write(dev_priv, DDI_BUF_CTL(port),
+		       dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
 
 	if (crtc_state->has_audio)
 		intel_audio_codec_enable(encoder, crtc_state, conn_state);
@@ -4017,6 +3619,12 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
 			     const struct intel_crtc_state *crtc_state,
 			     const struct drm_connector_state *conn_state)
 {
+	WARN_ON(crtc_state->has_pch_encoder);
+
+	intel_enable_pipe(crtc_state);
+
+	intel_crtc_vblank_on(crtc_state);
+
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
 		intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
 	else
@@ -4096,43 +3704,11 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *crtc_state,
 				  const struct drm_connector_state *conn_state)
 {
-	struct intel_connector *connector =
-				to_intel_connector(conn_state->connector);
-	struct intel_hdcp *hdcp = &connector->hdcp;
-	bool content_protection_type_changed =
-			(conn_state->hdcp_content_type != hdcp->content_type &&
-			 conn_state->content_protection !=
-			 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
 
 	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
 		intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
 
-	/*
-	 * During the HDCP encryption session if Type change is requested,
-	 * disable the HDCP and reenable it with new TYPE value.
-	 */
-	if (conn_state->content_protection ==
-	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
-	    content_protection_type_changed)
-		intel_hdcp_disable(connector);
-
-	/*
-	 * Mark the hdcp state as DESIRED after the hdcp disable of type
-	 * change procedure.
-	 */
-	if (content_protection_type_changed) {
-		mutex_lock(&hdcp->mutex);
-		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-		schedule_work(&hdcp->prop_work);
-		mutex_unlock(&hdcp->mutex);
-	}
-
-	if (conn_state->content_protection ==
-	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
-	    content_protection_type_changed)
-		intel_hdcp_enable(connector,
-				  crtc_state->cpu_transcoder,
-				  (u8)conn_state->hdcp_content_type);
+	intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
 }
 
 static void
@@ -4197,20 +3773,20 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 	u32 dp_tp_ctl, ddi_buf_ctl;
 	bool wait = false;
 
-	dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+	dp_tp_ctl = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 
 	if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
-		ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+		ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
 		if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
-			I915_WRITE(DDI_BUF_CTL(port),
-				   ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
+			intel_de_write(dev_priv, DDI_BUF_CTL(port),
+				       ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
 			wait = true;
 		}
 
 		dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
 		dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
-		I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
-		POSTING_READ(intel_dp->regs.dp_tp_ctl);
+		intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+		intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 
 		if (wait)
 			intel_wait_ddi_buf_idle(dev_priv, port);
@@ -4225,12 +3801,12 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
 	}
-	I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
-	POSTING_READ(intel_dp->regs.dp_tp_ctl);
+	intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+	intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 
 	intel_dp->DP |= DDI_BUF_CTL_ENABLE;
-	I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
-	POSTING_READ(DDI_BUF_CTL(port));
+	intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+	intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
 
 	udelay(600);
 }
@@ -4244,14 +3820,16 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
 	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
 		return false;
 
-	return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
+	return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
 		AUDIO_OUTPUT_ENABLE(cpu_transcoder);
 }
 
 void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 					 struct intel_crtc_state *crtc_state)
 {
-	if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
+	if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000)
+		crtc_state->min_voltage_level = 2;
+	else if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
 		crtc_state->min_voltage_level = 3;
 	else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
 		crtc_state->min_voltage_level = 1;
@@ -4268,12 +3846,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 	u32 temp, flags = 0;
 
 	/* XXX: DSI transcoder paranoia */
-	if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+	if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
 		return;
 
 	intel_dsc_get_config(encoder, pipe_config);
 
-	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
 	if (temp & TRANS_DDI_PHSYNC)
 		flags |= DRM_MODE_FLAG_PHSYNC;
 	else
@@ -4342,7 +3920,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 				dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
 
 			pipe_config->fec_enable =
-				I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+				intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
 
 			DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
 				      encoder->base.base.id, encoder->base.name,
@@ -4365,9 +3943,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
 		break;
 	}
 
-	if (encoder->type == INTEL_OUTPUT_EDP)
-		tgl_dc3co_exitline_get_config(pipe_config);
-
 	pipe_config->has_audio =
 		intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
 
@@ -4449,7 +4024,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
 		ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
 	} else {
 		ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
-		tgl_dc3co_exitline_compute_config(encoder, pipe_config);
 	}
 
 	if (ret)
@@ -4470,6 +4044,112 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
 	return 0;
 }
 
+static bool mode_equal(const struct drm_display_mode *mode1,
+		       const struct drm_display_mode *mode2)
+{
+	return drm_mode_match(mode1, mode2,
+			      DRM_MODE_MATCH_TIMINGS |
+			      DRM_MODE_MATCH_FLAGS |
+			      DRM_MODE_MATCH_3D_FLAGS) &&
+		mode1->clock == mode2->clock; /* we want an exact match */
+}
+
+static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+		      const struct intel_link_m_n *m_n_2)
+{
+	return m_n_1->tu == m_n_2->tu &&
+		m_n_1->gmch_m == m_n_2->gmch_m &&
+		m_n_1->gmch_n == m_n_2->gmch_n &&
+		m_n_1->link_m == m_n_2->link_m &&
+		m_n_1->link_n == m_n_2->link_n;
+}
+
+static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+				       const struct intel_crtc_state *crtc_state2)
+{
+	return crtc_state1->hw.active && crtc_state2->hw.active &&
+		crtc_state1->output_types == crtc_state2->output_types &&
+		crtc_state1->output_format == crtc_state2->output_format &&
+		crtc_state1->lane_count == crtc_state2->lane_count &&
+		crtc_state1->port_clock == crtc_state2->port_clock &&
+		mode_equal(&crtc_state1->hw.adjusted_mode,
+			   &crtc_state2->hw.adjusted_mode) &&
+		m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n);
+}
+
+static u8
+intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
+				int tile_group_id)
+{
+	struct drm_connector *connector;
+	const struct drm_connector_state *conn_state;
+	struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(ref_crtc_state->uapi.state);
+	u8 transcoders = 0;
+	int i;
+
+	if (INTEL_GEN(dev_priv) < 11)
+		return 0;
+
+	if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
+		return 0;
+
+	for_each_new_connector_in_state(&state->base, connector, conn_state, i) {
+		struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+		const struct intel_crtc_state *crtc_state;
+
+		if (!crtc)
+			continue;
+
+		if (!connector->has_tile ||
+		    connector->tile_group->id !=
+		    tile_group_id)
+			continue;
+		crtc_state = intel_atomic_get_new_crtc_state(state,
+							     crtc);
+		if (!crtcs_port_sync_compatible(ref_crtc_state,
+						crtc_state))
+			continue;
+		transcoders |= BIT(crtc_state->cpu_transcoder);
+	}
+
+	return transcoders;
+}
+
+static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
+					 struct intel_crtc_state *crtc_state,
+					 struct drm_connector_state *conn_state)
+{
+	struct drm_connector *connector = conn_state->connector;
+	u8 port_sync_transcoders = 0;
+
+	DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
+		      encoder->base.base.id, encoder->base.name,
+		      crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+
+	if (connector->has_tile)
+		port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
+									connector->tile_group->id);
+
+	/*
+	 * EDP Transcoders cannot be ensalved
+	 * make them a master always when present
+	 */
+	if (port_sync_transcoders & BIT(TRANSCODER_EDP))
+		crtc_state->master_transcoder = TRANSCODER_EDP;
+	else
+		crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1;
+
+	if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) {
+		crtc_state->master_transcoder = INVALID_TRANSCODER;
+		crtc_state->sync_mode_slaves_mask =
+			port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder);
+	}
+
+	return 0;
+}
+
 static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
 {
 	struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -4569,7 +4249,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
 
 	crtc_state = to_intel_crtc_state(crtc->base.state);
 
-	WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+	drm_WARN_ON(&dev_priv->drm,
+		    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
 
 	if (!crtc_state->hw.active)
 		return 0;
@@ -4636,7 +4317,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
 
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
-	WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+	drm_WARN(encoder->base.dev, ret,
+		 "Acquiring modeset locks failed with %i\n", ret);
 
 	/*
 	 * Unpowered type-c dongles can take some time to boot and be
@@ -4716,7 +4398,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
 		return max_lanes;
 
 	if (port == PORT_A || port == PORT_E) {
-		if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+		if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
 			max_lanes = port == PORT_A ? 4 : 0;
 		else
 			/* Both A and E share 2 lanes */
@@ -4739,15 +4421,14 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
 
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 {
-	struct ddi_vbt_port_info *port_info =
-		&dev_priv->vbt.ddi_port_info[port];
 	struct intel_digital_port *intel_dig_port;
 	struct intel_encoder *encoder;
 	bool init_hdmi, init_dp, init_lspcon = false;
 	enum phy phy = intel_port_to_phy(dev_priv, port);
 
-	init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
-	init_dp = port_info->supports_dp;
+	init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
+		intel_bios_port_supports_hdmi(dev_priv, port);
+	init_dp = intel_bios_port_supports_dp(dev_priv, port);
 
 	if (intel_bios_is_lspcon_present(dev_priv, port)) {
 		/*
@@ -4779,6 +4460,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	encoder->hotplug = intel_ddi_hotplug;
 	encoder->compute_output_type = intel_ddi_compute_output_type;
 	encoder->compute_config = intel_ddi_compute_config;
+	encoder->compute_config_late = intel_ddi_compute_config_late;
 	encoder->enable = intel_enable_ddi;
 	encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
 	encoder->pre_enable = intel_ddi_pre_enable;
@@ -4797,10 +4479,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	encoder->pipe_mask = ~0;
 
 	if (INTEL_GEN(dev_priv) >= 11)
-		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+		intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+								DDI_BUF_CTL(port)) &
 			DDI_BUF_PORT_REVERSAL;
 	else
-		intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+		intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+								DDI_BUF_CTL(port)) &
 			(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
 
 	intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
@@ -4808,8 +4492,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
 
 	if (intel_phy_is_tc(dev_priv, phy)) {
-		bool is_legacy = !port_info->supports_typec_usb &&
-				 !port_info->supports_tbt;
+		bool is_legacy =
+			!intel_bios_port_supports_typec_usb(dev_priv, port) &&
+			!intel_bios_port_supports_tbt(dev_priv, port);
 
 		intel_tc_port_init(intel_dig_port, is_legacy);
 
@@ -4817,7 +4502,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
 		encoder->update_complete = intel_ddi_update_complete;
 	}
 
-	WARN_ON(port > PORT_I);
+	drm_WARN_ON(&dev_priv->drm, port > PORT_I);
 	intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
 					      port - PORT_A;
 
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 167c657..55fd72b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,8 +6,6 @@
 #ifndef __INTEL_DDI_H__
 #define __INTEL_DDI_H__
 
-#include <drm/i915_drm.h>
-
 #include "intel_display.h"
 
 struct drm_connector_state;
@@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
 				     bool enable);
 void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
-			struct intel_dpll_hw_state *state);
 
 #endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
new file mode 100644
index 0000000..00da10b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DE_H__
+#define __INTEL_DE_H__
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_uncore.h"
+
+static inline u32
+intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+	return intel_uncore_read(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+	intel_uncore_posting_read(&i915->uncore, reg);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline u32
+intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+{
+	return intel_uncore_read_fw(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+	intel_uncore_write(&i915->uncore, reg, val);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline void
+intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+	intel_uncore_write_fw(&i915->uncore, reg, val);
+}
+
+static inline void
+intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+{
+	intel_uncore_rmw(&i915->uncore, reg, clear, set);
+}
+
+static inline int
+intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+			   u32 mask, u32 value, unsigned int timeout)
+{
+	return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
+		      u32 mask, unsigned int timeout)
+{
+	return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+}
+
+static inline int
+intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+			u32 mask, unsigned int timeout)
+{
+	return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+}
+
+#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index aa45395..3468466 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
 
 #include "display/intel_crt.h"
 #include "display/intel_ddi.h"
@@ -203,9 +202,9 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 	val = vlv_cck_read(dev_priv, reg);
 	divider = val & CCK_FREQUENCY_VALUES;
 
-	WARN((val & CCK_FREQUENCY_STATUS) !=
-	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
-	     "%s change in progress\n", name);
+	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
+		 "%s change in progress\n", name);
 
 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
 }
@@ -235,7 +234,8 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
 						      CCK_CZ_CLOCK_CONTROL);
 
-	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
+		dev_priv->czclk_freq);
 }
 
 static inline u32 /* units of 100MHz */
@@ -518,13 +518,11 @@ static void
 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
 {
 	if (enable)
-		I915_WRITE(CLKGATE_DIS_PSL(pipe),
-			   I915_READ(CLKGATE_DIS_PSL(pipe)) |
-			   DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
 	else
-		I915_WRITE(CLKGATE_DIS_PSL(pipe),
-			   I915_READ(CLKGATE_DIS_PSL(pipe)) &
-			   ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
 }
 
 /* Wa_2006604312:icl */
@@ -533,11 +531,11 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
 		       bool enable)
 {
 	if (enable)
-		I915_WRITE(CLKGATE_DIS_PSL(pipe),
-			   I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
 	else
-		I915_WRITE(CLKGATE_DIS_PSL(pipe),
-			   I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
 }
 
 static bool
@@ -883,7 +881,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
 		return calculated_clock->p > best_clock->p;
 	}
 
-	if (WARN_ON_ONCE(!target_freq))
+	if (drm_WARN_ON_ONCE(dev, !target_freq))
 		return false;
 
 	*error_ppm = div_u64(1000000ULL *
@@ -1049,9 +1047,9 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
 	else
 		line_mask = DSL_LINEMASK_GEN3;
 
-	line1 = I915_READ(reg) & line_mask;
+	line1 = intel_de_read(dev_priv, reg) & line_mask;
 	msleep(5);
-	line2 = I915_READ(reg) & line_mask;
+	line2 = intel_de_read(dev_priv, reg) & line_mask;
 
 	return line1 != line2;
 }
@@ -1063,8 +1061,9 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
 
 	/* Wait for the display line to settle/start moving */
 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
-		DRM_ERROR("pipe %c scanline %s wait timed out\n",
-			  pipe_name(pipe), onoff(state));
+		drm_err(&dev_priv->drm,
+			"pipe %c scanline %s wait timed out\n",
+			pipe_name(pipe), onoff(state));
 }
 
 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
@@ -1090,7 +1089,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
 		/* Wait for the Pipe State to go off */
 		if (intel_de_wait_for_clear(dev_priv, reg,
 					    I965_PIPECONF_ACTIVE, 100))
-			WARN(1, "pipe_off wait timed out\n");
+			drm_WARN(&dev_priv->drm, 1,
+				 "pipe_off wait timed out\n");
 	} else {
 		intel_wait_for_pipe_scanline_stopped(crtc);
 	}
@@ -1103,7 +1103,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
 	u32 val;
 	bool cur_state;
 
-	val = I915_READ(DPLL(pipe));
+	val = intel_de_read(dev_priv, DPLL(pipe));
 	cur_state = !!(val & DPLL_VCO_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "PLL state assertion failure (expected %s, current %s)\n",
@@ -1139,10 +1139,11 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
 		 * so pipe->transcoder cast is fine here.
 		 */
 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
-		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		u32 val = intel_de_read(dev_priv,
+					TRANS_DDI_FUNC_CTL(cpu_transcoder));
 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
 	} else {
-		u32 val = I915_READ(FDI_TX_CTL(pipe));
+		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
 		cur_state = !!(val & FDI_TX_ENABLE);
 	}
 	I915_STATE_WARN(cur_state != state,
@@ -1158,7 +1159,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
 	u32 val;
 	bool cur_state;
 
-	val = I915_READ(FDI_RX_CTL(pipe));
+	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
 	cur_state = !!(val & FDI_RX_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "FDI RX state assertion failure (expected %s, current %s)\n",
@@ -1180,7 +1181,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
 	if (HAS_DDI(dev_priv))
 		return;
 
-	val = I915_READ(FDI_TX_CTL(pipe));
+	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
 }
 
@@ -1190,7 +1191,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 	u32 val;
 	bool cur_state;
 
-	val = I915_READ(FDI_RX_CTL(pipe));
+	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
 	I915_STATE_WARN(cur_state != state,
 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
@@ -1204,14 +1205,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 	enum pipe panel_pipe = INVALID_PIPE;
 	bool locked = true;
 
-	if (WARN_ON(HAS_DDI(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
 		return;
 
 	if (HAS_PCH_SPLIT(dev_priv)) {
 		u32 port_sel;
 
 		pp_reg = PP_CONTROL(0);
-		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 
 		switch (port_sel) {
 		case PANEL_PORT_SELECT_LVDS:
@@ -1238,13 +1239,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 		u32 port_sel;
 
 		pp_reg = PP_CONTROL(0);
-		port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 
-		WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+		drm_WARN_ON(&dev_priv->drm,
+			    port_sel != PANEL_PORT_SELECT_LVDS);
 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
 	}
 
-	val = I915_READ(pp_reg);
+	val = intel_de_read(dev_priv, pp_reg);
 	if (!(val & PANEL_POWER_ON) ||
 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
 		locked = false;
@@ -1268,7 +1270,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
 	if (wakeref) {
-		u32 val = I915_READ(PIPECONF(cpu_transcoder));
+		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
 		cur_state = !!(val & PIPECONF_ENABLE);
 
 		intel_display_power_put(dev_priv, power_domain, wakeref);
@@ -1318,7 +1320,7 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
 	u32 val;
 	bool enabled;
 
-	val = I915_READ(PCH_TRANSCONF(pipe));
+	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
 	enabled = !!(val & TRANS_ENABLE);
 	I915_STATE_WARN(enabled,
 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
@@ -1392,12 +1394,12 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-	POSTING_READ(DPLL(pipe));
+	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+	intel_de_posting_read(dev_priv, DPLL(pipe));
 	udelay(150);
 
 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
-		DRM_ERROR("DPLL %d failed to lock\n", pipe);
+		drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
 }
 
 static void vlv_enable_pll(struct intel_crtc *crtc,
@@ -1414,8 +1416,9 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
 		_vlv_enable_pll(crtc, pipe_config);
 
-	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
-	POSTING_READ(DPLL_MD(pipe));
+	intel_de_write(dev_priv, DPLL_MD(pipe),
+		       pipe_config->dpll_hw_state.dpll_md);
+	intel_de_posting_read(dev_priv, DPLL_MD(pipe));
 }
 
 
@@ -1442,11 +1445,11 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
 	udelay(1);
 
 	/* Enable PLL */
-	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+	intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 
 	/* Check PLL is locked */
 	if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
-		DRM_ERROR("PLL %d failed to lock\n", pipe);
+		drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
 }
 
 static void chv_enable_pll(struct intel_crtc *crtc,
@@ -1470,19 +1473,23 @@ static void chv_enable_pll(struct intel_crtc *crtc,
 		 * DPLLCMD is AWOL. Use chicken bits to propagate
 		 * the value from DPLLBMD to either pipe B or C.
 		 */
-		I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
-		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
-		I915_WRITE(CBR4_VLV, 0);
+		intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+		intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+			       pipe_config->dpll_hw_state.dpll_md);
+		intel_de_write(dev_priv, CBR4_VLV, 0);
 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
 
 		/*
 		 * DPLLB VGA mode also seems to cause problems.
 		 * We should always have it disabled.
 		 */
-		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+		drm_WARN_ON(&dev_priv->drm,
+			    (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+			     DPLL_VGA_MODE_DIS) == 0);
 	} else {
-		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
-		POSTING_READ(DPLL_MD(pipe));
+		intel_de_write(dev_priv, DPLL_MD(pipe),
+			       pipe_config->dpll_hw_state.dpll_md);
+		intel_de_posting_read(dev_priv, DPLL_MD(pipe));
 	}
 }
 
@@ -1513,29 +1520,29 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
 	 * dividers, even though the register value does change.
 	 */
-	I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
-	I915_WRITE(reg, dpll);
+	intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
+	intel_de_write(dev_priv, reg, dpll);
 
 	/* Wait for the clocks to stabilize. */
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
 	if (INTEL_GEN(dev_priv) >= 4) {
-		I915_WRITE(DPLL_MD(crtc->pipe),
-			   crtc_state->dpll_hw_state.dpll_md);
+		intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+			       crtc_state->dpll_hw_state.dpll_md);
 	} else {
 		/* The pixel multiplier can only be updated once the
 		 * DPLL is enabled and the clocks are stable.
 		 *
 		 * So write it again.
 		 */
-		I915_WRITE(reg, dpll);
+		intel_de_write(dev_priv, reg, dpll);
 	}
 
 	/* We do this three times for luck */
 	for (i = 0; i < 3; i++) {
-		I915_WRITE(reg, dpll);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, dpll);
+		intel_de_posting_read(dev_priv, reg);
 		udelay(150); /* wait for warmup */
 	}
 }
@@ -1553,8 +1560,8 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
 
-	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
-	POSTING_READ(DPLL(pipe));
+	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+	intel_de_posting_read(dev_priv, DPLL(pipe));
 }
 
 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1569,8 +1576,8 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	if (pipe != PIPE_A)
 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-	I915_WRITE(DPLL(pipe), val);
-	POSTING_READ(DPLL(pipe));
+	intel_de_write(dev_priv, DPLL(pipe), val);
+	intel_de_posting_read(dev_priv, DPLL(pipe));
 }
 
 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1586,8 +1593,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 	if (pipe != PIPE_A)
 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-	I915_WRITE(DPLL(pipe), val);
-	POSTING_READ(DPLL(pipe));
+	intel_de_write(dev_priv, DPLL(pipe), val);
+	intel_de_posting_read(dev_priv, DPLL(pipe));
 
 	vlv_dpio_get(dev_priv);
 
@@ -1626,9 +1633,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 
 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
 				       port_mask, expected_mask, 1000))
-		WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
-		     dport->base.base.base.id, dport->base.base.name,
-		     I915_READ(dpll_reg) & port_mask, expected_mask);
+		drm_WARN(&dev_priv->drm, 1,
+			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+			 dport->base.base.base.id, dport->base.base.name,
+			 intel_de_read(dev_priv, dpll_reg) & port_mask,
+			 expected_mask);
 }
 
 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -1648,7 +1657,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 
 	if (HAS_PCH_CPT(dev_priv)) {
 		reg = TRANS_CHICKEN2(pipe);
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		/*
 		 * Workaround: Set the timing override bit
 		 * before enabling the pch transcoder.
@@ -1657,12 +1666,12 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 		/* Configure frame start delay to match the CPU */
 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 
 	reg = PCH_TRANSCONF(pipe);
-	val = I915_READ(reg);
-	pipeconf_val = I915_READ(PIPECONF(pipe));
+	val = intel_de_read(dev_priv, reg);
+	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
 
 	if (HAS_PCH_IBX(dev_priv)) {
 		/* Configure frame start delay to match the CPU */
@@ -1692,9 +1701,10 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
 		val |= TRANS_PROGRESSIVE;
 	}
 
-	I915_WRITE(reg, val | TRANS_ENABLE);
+	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
-		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+			pipe_name(pipe));
 }
 
 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1706,16 +1716,16 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
 
-	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
 	/* Workaround: set timing override bit. */
 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 	/* Configure frame start delay to match the CPU */
 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
-	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
 
 	val = TRANS_ENABLE;
-	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
 
 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
 	    PIPECONF_INTERLACED_ILK)
@@ -1723,10 +1733,10 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
 	else
 		val |= TRANS_PROGRESSIVE;
 
-	I915_WRITE(LPT_TRANSCONF, val);
+	intel_de_write(dev_priv, LPT_TRANSCONF, val);
 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
 				  TRANS_STATE_ENABLE, 100))
-		DRM_ERROR("Failed to enable PCH transcoder\n");
+		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
 }
 
 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1743,19 +1753,20 @@ static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
 	assert_pch_ports_disabled(dev_priv, pipe);
 
 	reg = PCH_TRANSCONF(pipe);
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 	val &= ~TRANS_ENABLE;
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 	/* wait for PCH transcoder off, transcoder state */
 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
-		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+			pipe_name(pipe));
 
 	if (HAS_PCH_CPT(dev_priv)) {
 		/* Workaround: Clear the timing override chicken bit again. */
 		reg = TRANS_CHICKEN2(pipe);
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 }
 
@@ -1763,18 +1774,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	val = I915_READ(LPT_TRANSCONF);
+	val = intel_de_read(dev_priv, LPT_TRANSCONF);
 	val &= ~TRANS_ENABLE;
-	I915_WRITE(LPT_TRANSCONF, val);
+	intel_de_write(dev_priv, LPT_TRANSCONF, val);
 	/* wait for PCH transcoder off, transcoder state */
 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
 				    TRANS_STATE_ENABLE, 50))
-		DRM_ERROR("Failed to disable PCH transcoder\n");
+		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
 
 	/* Workaround: clear timing override bit. */
-	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
-	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
 }
 
 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
@@ -1807,7 +1818,7 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
 		return 0; /* Gen2 doesn't have a hardware frame counter */
 }
 
-static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 
@@ -1825,7 +1836,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
 	assert_vblank_disabled(&crtc->base);
 }
 
-static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1834,7 +1845,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 	i915_reg_t reg;
 	u32 val;
 
-	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
 
 	assert_planes_disabled(crtc);
 
@@ -1862,15 +1873,15 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
 	trace_intel_pipe_enable(crtc);
 
 	reg = PIPECONF(cpu_transcoder);
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 	if (val & PIPECONF_ENABLE) {
 		/* we keep both pipes enabled on 830 */
-		WARN_ON(!IS_I830(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
 		return;
 	}
 
-	I915_WRITE(reg, val | PIPECONF_ENABLE);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+	intel_de_posting_read(dev_priv, reg);
 
 	/*
 	 * Until the pipe starts PIPEDSL reads will return a stale value,
@@ -1892,7 +1903,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 	i915_reg_t reg;
 	u32 val;
 
-	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
 
 	/*
 	 * Make sure planes won't keep trying to pump pixels to us,
@@ -1903,7 +1914,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 	trace_intel_pipe_disable(crtc);
 
 	reg = PIPECONF(cpu_transcoder);
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 	if ((val & PIPECONF_ENABLE) == 0)
 		return;
 
@@ -1918,7 +1929,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
 	if (!IS_I830(dev_priv))
 		val &= ~PIPECONF_ENABLE;
 
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 	if ((val & PIPECONF_ENABLE) == 0)
 		intel_wait_for_pipe_off(old_crtc_state);
 }
@@ -2211,11 +2222,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
 	unsigned int pinctl;
 	u32 alignment;
 
-	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
 		return ERR_PTR(-EINVAL);
 
 	alignment = intel_surf_alignment(fb, 0);
-	if (WARN_ON(alignment && !is_power_of_2(alignment)))
+	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
 		return ERR_PTR(-EINVAL);
 
 	/* Note that the w/a also requires 64 PTE of padding following the
@@ -2386,7 +2397,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
 	unsigned int cpp = fb->format->cpp[color_plane];
 
-	WARN_ON(new_offset > old_offset);
+	drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
 
 	if (!is_surface_linear(fb, color_plane)) {
 		unsigned int tile_size, tile_width, tile_height;
@@ -2537,8 +2548,9 @@ static int intel_fb_offset_to_xy(int *x, int *y,
 		alignment = 0;
 
 	if (alignment != 0 && fb->offsets[color_plane] % alignment) {
-		DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
-			      fb->offsets[color_plane], color_plane);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Misaligned offset 0x%08x for color plane %d\n",
+			    fb->offsets[color_plane], color_plane);
 		return -EINVAL;
 	}
 
@@ -2548,9 +2560,10 @@ static int intel_fb_offset_to_xy(int *x, int *y,
 	/* Catch potential overflows early */
 	if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
 			    fb->offsets[color_plane])) {
-		DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
-			      fb->offsets[color_plane], fb->pitches[color_plane],
-			      color_plane);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Bad offset 0x%08x or pitch %d for color plane %d\n",
+			    fb->offsets[color_plane], fb->pitches[color_plane],
+			    color_plane);
 		return -ERANGE;
 	}
 
@@ -2706,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
 
 	/*
 	 * We assume the primary plane for pipe A has
-	 * the highest stride limits of them all.
+	 * the highest stride limits of them all,
+	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
 	 */
-	crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+	crtc = intel_get_first_crtc(dev_priv);
 	if (!crtc)
 		return 0;
 
@@ -3034,8 +3048,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 
 		ret = intel_fb_offset_to_xy(&x, &y, fb, i);
 		if (ret) {
-			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
-				      i, fb->offsets[i]);
+			drm_dbg_kms(&dev_priv->drm,
+				    "bad fb plane %d offset: 0x%x\n",
+				    i, fb->offsets[i]);
 			return ret;
 		}
 
@@ -3054,8 +3069,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 		 */
 		if (i == 0 && i915_gem_object_is_tiled(obj) &&
 		    (x + width) * cpp > fb->pitches[i]) {
-			DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
-				      i, fb->offsets[i]);
+			drm_dbg_kms(&dev_priv->drm,
+				    "bad fb plane %d offset: 0x%x\n",
+				     i, fb->offsets[i]);
 			return -EINVAL;
 		}
 
@@ -3111,8 +3127,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 	}
 
 	if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
-		DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
-			      mul_u32_u32(max_size, tile_size), obj->base.size);
+		drm_dbg_kms(&dev_priv->drm,
+			    "fb too big for bo (need %llu bytes, have %zu bytes)\n",
+			    mul_u32_u32(max_size, tile_size), obj->base.size);
 		return -EINVAL;
 	}
 
@@ -3143,7 +3160,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
 	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
 	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
 
-	WARN_ON(is_ccs_modifier(fb->modifier));
+	drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
 
 	/* Make src coordinates relative to the viewport */
 	drm_rect_translate(&plane_state->uapi.src,
@@ -3184,7 +3201,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
 						      DRM_MODE_ROTATE_0, tile_size);
 		offset /= tile_size;
 
-		WARN_ON(i >= ARRAY_SIZE(info->plane));
+		drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
 		info->plane[i].offset = offset;
 		info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
 						     tile_width * cpp);
@@ -3377,6 +3394,67 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
 	}
 }
 
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+		  struct intel_initial_plane_config *plane_config)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	u32 base, size;
+
+	if (plane_config->size == 0)
+		return NULL;
+
+	base = round_down(plane_config->base,
+			  I915_GTT_MIN_ALIGNMENT);
+	size = round_up(plane_config->base + plane_config->size,
+			I915_GTT_MIN_ALIGNMENT);
+	size -= base;
+
+	/*
+	 * If the FB is too big, just don't use it since fbdev is not very
+	 * important and we should probably use that space with FBC or other
+	 * features.
+	 */
+	if (size * 2 > i915->stolen_usable_size)
+		return NULL;
+
+	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+	if (IS_ERR(obj))
+		return NULL;
+
+	switch (plane_config->tiling) {
+	case I915_TILING_NONE:
+		break;
+	case I915_TILING_X:
+	case I915_TILING_Y:
+		obj->tiling_and_stride =
+			plane_config->fb->base.pitches[0] |
+			plane_config->tiling;
+		break;
+	default:
+		MISSING_CASE(plane_config->tiling);
+		goto err_obj;
+	}
+
+	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+	if (IS_ERR(vma))
+		goto err_obj;
+
+	if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+		goto err_obj;
+
+	if (i915_gem_object_is_tiled(obj) &&
+	    !i915_vma_is_map_and_fenceable(vma))
+		goto err_obj;
+
+	return vma;
+
+err_obj:
+	i915_gem_object_put(obj);
+	return NULL;
+}
+
 static bool
 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 			      struct intel_initial_plane_config *plane_config)
@@ -3385,22 +3463,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 	struct drm_framebuffer *fb = &plane_config->fb->base;
-	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
-	u32 size_aligned = round_up(plane_config->base + plane_config->size,
-				    PAGE_SIZE);
-	struct drm_i915_gem_object *obj;
-	bool ret = false;
-
-	size_aligned -= base_aligned;
-
-	if (plane_config->size == 0)
-		return false;
-
-	/* If the FB is too big, just don't use it since fbdev is not very
-	 * important and we should probably use that space with FBC or other
-	 * features. */
-	if (size_aligned * 2 > dev_priv->stolen_usable_size)
-		return false;
+	struct i915_vma *vma;
 
 	switch (fb->modifier) {
 	case DRM_FORMAT_MOD_LINEAR:
@@ -3408,30 +3471,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 	case I915_FORMAT_MOD_Y_TILED:
 		break;
 	default:
-		DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
-				 fb->modifier);
+		drm_dbg(&dev_priv->drm,
+			"Unsupported modifier for initial FB: 0x%llx\n",
+			fb->modifier);
 		return false;
 	}
 
-	obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
-							     base_aligned,
-							     base_aligned,
-							     size_aligned);
-	if (IS_ERR(obj))
+	vma = initial_plane_vma(dev_priv, plane_config);
+	if (!vma)
 		return false;
 
-	switch (plane_config->tiling) {
-	case I915_TILING_NONE:
-		break;
-	case I915_TILING_X:
-	case I915_TILING_Y:
-		obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
-		break;
-	default:
-		MISSING_CASE(plane_config->tiling);
-		goto out;
-	}
-
 	mode_cmd.pixel_format = fb->format->format;
 	mode_cmd.width = fb->width;
 	mode_cmd.height = fb->height;
@@ -3439,17 +3488,18 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 	mode_cmd.modifier[0] = fb->modifier;
 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
 
-	if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
-		DRM_DEBUG_KMS("intel fb init failed\n");
-		goto out;
+	if (intel_framebuffer_init(to_intel_framebuffer(fb),
+				   vma->obj, &mode_cmd)) {
+		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+		goto err_vma;
 	}
 
+	plane_config->vma = vma;
+	return true;
 
-	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
-	ret = true;
-out:
-	i915_gem_object_put(obj);
-	return ret;
+err_vma:
+	i915_vma_put(vma);
+	return false;
 }
 
 static void
@@ -3493,9 +3543,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
 	struct intel_plane_state *plane_state =
 		to_intel_plane_state(plane->base.state);
 
-	DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
-		      plane->base.base.id, plane->base.name,
-		      crtc->base.base.id, crtc->base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+		    plane->base.base.id, plane->base.name,
+		    crtc->base.base.id, crtc->base.name);
 
 	intel_set_plane_visible(crtc_state, plane_state, false);
 	fixup_active_planes(crtc_state);
@@ -3547,17 +3598,17 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	struct intel_plane_state *intel_state =
 		to_intel_plane_state(plane_state);
 	struct drm_framebuffer *fb;
+	struct i915_vma *vma;
 
 	if (!plane_config->fb)
 		return;
 
 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
 		fb = &plane_config->fb->base;
+		vma = plane_config->vma;
 		goto valid_fb;
 	}
 
-	kfree(plane_config->fb);
-
 	/*
 	 * Failed to alloc the obj, check to see if we should share
 	 * an fb with another CRTC instead
@@ -3577,7 +3628,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 
 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
 			fb = state->hw.fb;
-			drm_framebuffer_get(fb);
+			vma = state->vma;
 			goto valid_fb;
 		}
 	}
@@ -3600,21 +3651,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 	intel_state->color_plane[0].stride =
 		intel_fb_pitch(fb, 0, intel_state->hw.rotation);
 
-	intel_state->vma =
-		intel_pin_and_fence_fb_obj(fb,
-					   &intel_state->view,
-					   intel_plane_uses_fence(intel_state),
-					   &intel_state->flags);
-	if (IS_ERR(intel_state->vma)) {
-		DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
-			  intel_crtc->pipe, PTR_ERR(intel_state->vma));
-
-		intel_state->vma = NULL;
-		drm_framebuffer_put(fb);
-		return;
-	}
-
-	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+	__i915_vma_pin(vma);
+	intel_state->vma = i915_vma_get(vma);
+	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
+		if (vma->fence)
+			intel_state->flags |= PLANE_HAS_FENCE;
 
 	plane_state->src_x = 0;
 	plane_state->src_y = 0;
@@ -3633,9 +3674,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
 		dev_priv->preserve_bios_swizzle = true;
 
 	plane_state->fb = fb;
+	drm_framebuffer_get(fb);
+
 	plane_state->crtc = &intel_crtc->base;
 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
 
+	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
 		  &to_intel_frontbuffer(fb)->bits);
 }
@@ -3798,15 +3843,16 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 		max_height = skl_max_plane_height();
 
 	if (w > max_width || h > max_height) {
-		DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
-			      w, h, max_width, max_height);
+		drm_dbg_kms(&dev_priv->drm,
+			    "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+			    w, h, max_width, max_height);
 		return -EINVAL;
 	}
 
 	intel_add_fb_offsets(&x, &y, plane_state, 0);
 	offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
 	alignment = intel_surf_alignment(fb, 0);
-	if (WARN_ON(alignment && !is_power_of_2(alignment)))
+	if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
 		return -EINVAL;
 
 	/*
@@ -3829,7 +3875,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 
 		while ((x + w) * cpp > plane_state->color_plane[0].stride) {
 			if (offset == 0) {
-				DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
+				drm_dbg_kms(&dev_priv->drm,
+					    "Unable to find suitable display surface offset due to X-tiling\n");
 				return -EINVAL;
 			}
 
@@ -3854,7 +3901,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 
 		if (x != plane_state->color_plane[aux_plane].x ||
 		    y != plane_state->color_plane[aux_plane].y) {
-			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Unable to find suitable display surface offset due to CCS\n");
 			return -EINVAL;
 		}
 	}
@@ -3875,6 +3923,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
 
 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 {
+	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	unsigned int rotation = plane_state->hw.rotation;
 	int uv_plane = 1;
@@ -3892,8 +3941,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 
 	/* FIXME not quite sure how/if these apply to the chroma plane */
 	if (w > max_width || h > max_height) {
-		DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
-			      w, h, max_width, max_height);
+		drm_dbg_kms(&i915->drm,
+			    "CbCr source size %dx%d too big (limit %dx%d)\n",
+			    w, h, max_width, max_height);
 		return -EINVAL;
 	}
 
@@ -3922,7 +3972,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
 
 		if (x != plane_state->color_plane[ccs_plane].x ||
 		    y != plane_state->color_plane[ccs_plane].y) {
-			DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+			drm_dbg_kms(&i915->drm,
+				    "Unable to find suitable display surface offset due to CCS\n");
 			return -EINVAL;
 		}
 	}
@@ -4331,7 +4382,8 @@ static void i9xx_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+	intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+			  plane_state->color_plane[0].stride);
 
 	if (INTEL_GEN(dev_priv) < 4) {
 		/*
@@ -4339,21 +4391,26 @@ static void i9xx_update_plane(struct intel_plane *plane,
 		 * generator but let's assume we still need to
 		 * program whatever is there.
 		 */
-		I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
-		I915_WRITE_FW(DSPSIZE(i9xx_plane),
-			      ((crtc_h - 1) << 16) | (crtc_w - 1));
+		intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+				  (crtc_y << 16) | crtc_x);
+		intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+				  ((crtc_h - 1) << 16) | (crtc_w - 1));
 	} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
-		I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
-		I915_WRITE_FW(PRIMSIZE(i9xx_plane),
-			      ((crtc_h - 1) << 16) | (crtc_w - 1));
-		I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
+		intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+				  (crtc_y << 16) | crtc_x);
+		intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+				  ((crtc_h - 1) << 16) | (crtc_w - 1));
+		intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
 	}
 
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
+		intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+				  (y << 16) | x);
 	} else if (INTEL_GEN(dev_priv) >= 4) {
-		I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
-		I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+		intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+				  linear_offset);
+		intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+				  (y << 16) | x);
 	}
 
 	/*
@@ -4361,15 +4418,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
 	 * disabled. Try to make the plane enable atomic by writing
 	 * the control register just before the surface register.
 	 */
-	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
 	if (INTEL_GEN(dev_priv) >= 4)
-		I915_WRITE_FW(DSPSURF(i9xx_plane),
-			      intel_plane_ggtt_offset(plane_state) +
-			      dspaddr_offset);
+		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
 	else
-		I915_WRITE_FW(DSPADDR(i9xx_plane),
-			      intel_plane_ggtt_offset(plane_state) +
-			      dspaddr_offset);
+		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+				  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -4396,11 +4451,11 @@ static void i9xx_disable_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
 	if (INTEL_GEN(dev_priv) >= 4)
-		I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
+		intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
 	else
-		I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+		intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -4425,7 +4480,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(DSPCNTR(i9xx_plane));
+	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
 
 	ret = val & DISPLAY_PLANE_ENABLE;
 
@@ -4444,10 +4499,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	unsigned long irqflags;
 
-	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
-	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
-	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+	intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 /*
@@ -4791,7 +4851,7 @@ __intel_display_resume(struct drm_device *dev,
 
 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
 
-	WARN_ON(ret == -EDEADLK);
+	drm_WARN_ON(dev, ret == -EDEADLK);
 	return ret;
 }
 
@@ -4819,7 +4879,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
 
 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
-		DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Modeset potentially stuck, unbreaking through wedging\n");
 		intel_gt_set_wedged(&dev_priv->gt);
 	}
 
@@ -4843,13 +4904,15 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
 	state = drm_atomic_helper_duplicate_state(dev, ctx);
 	if (IS_ERR(state)) {
 		ret = PTR_ERR(state);
-		DRM_ERROR("Duplicating state failed with %i\n", ret);
+		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+			ret);
 		return;
 	}
 
 	ret = drm_atomic_helper_disable_all(dev, ctx);
 	if (ret) {
-		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+			ret);
 		drm_atomic_state_put(state);
 		return;
 	}
@@ -4878,7 +4941,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 		/* for testing only restore the display */
 		ret = __intel_display_resume(dev, state, ctx);
 		if (ret)
-			DRM_ERROR("Restoring old state failed with %i\n", ret);
+			drm_err(&dev_priv->drm,
+				"Restoring old state failed with %i\n", ret);
 	} else {
 		/*
 		 * The display has been reset as well,
@@ -4895,7 +4959,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
 
 		ret = __intel_display_resume(dev, state, ctx);
 		if (ret)
-			DRM_ERROR("Restoring old state failed with %i\n", ret);
+			drm_err(&dev_priv->drm,
+				"Restoring old state failed with %i\n", ret);
 
 		intel_hpd_init(dev_priv);
 	}
@@ -4915,7 +4980,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
 	enum pipe pipe = crtc->pipe;
 	u32 tmp;
 
-	tmp = I915_READ(PIPE_CHICKEN(pipe));
+	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
 
 	/*
 	 * Display WA #1153: icl
@@ -4930,7 +4995,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
 	 * across pipe
 	 */
 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
-	I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
 }
 
 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
@@ -4959,8 +5024,9 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
 	/* Enable Transcoder Port Sync */
 	trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
 
-	I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
-		   trans_ddi_func_ctl2_val);
+	intel_de_write(dev_priv,
+		       TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+		       trans_ddi_func_ctl2_val);
 }
 
 static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -4973,7 +5039,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
 
 	/* enable normal train */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	if (IS_IVYBRIDGE(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
@@ -4981,10 +5047,10 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
 		temp &= ~FDI_LINK_TRAIN_NONE;
 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
 	}
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
@@ -4992,16 +5058,16 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
 		temp &= ~FDI_LINK_TRAIN_NONE;
 		temp |= FDI_LINK_TRAIN_NONE;
 	}
-	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
 
 	/* wait one idle pattern time */
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(1000);
 
 	/* IVB wants error correction enabled */
 	if (IS_IVYBRIDGE(dev_priv))
-		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
-			   FDI_FE_ERRC_ENABLE);
+		intel_de_write(dev_priv, reg,
+		               intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
 }
 
 /* The FDI link training functions for ILK/Ibexpeak. */
@@ -5020,81 +5086,83 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 	   for train result */
 	reg = FDI_RX_IMR(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_RX_SYMBOL_LOCK;
 	temp &= ~FDI_RX_BIT_LOCK;
-	I915_WRITE(reg, temp);
-	I915_READ(reg);
+	intel_de_write(dev_priv, reg, temp);
+	intel_de_read(dev_priv, reg);
 	udelay(150);
 
 	/* enable CPU FDI TX and PCH FDI RX */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
 	/* Ironlake workaround, enable clock pointer after FDI enable*/
-	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
-		   FDI_RX_PHASE_SYNC_POINTER_EN);
+	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+		       FDI_RX_PHASE_SYNC_POINTER_OVR);
+	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
 
 	reg = FDI_RX_IIR(pipe);
 	for (tries = 0; tries < 5; tries++) {
-		temp = I915_READ(reg);
-		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+		temp = intel_de_read(dev_priv, reg);
+		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 
 		if ((temp & FDI_RX_BIT_LOCK)) {
-			DRM_DEBUG_KMS("FDI train 1 done.\n");
-			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
 			break;
 		}
 	}
 	if (tries == 5)
-		DRM_ERROR("FDI train 1 fail!\n");
+		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 
 	/* Train 2 */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_2;
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
 	reg = FDI_RX_IIR(pipe);
 	for (tries = 0; tries < 5; tries++) {
-		temp = I915_READ(reg);
-		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+		temp = intel_de_read(dev_priv, reg);
+		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 
 		if (temp & FDI_RX_SYMBOL_LOCK) {
-			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			intel_de_write(dev_priv, reg,
+				       temp | FDI_RX_SYMBOL_LOCK);
+			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
 			break;
 		}
 	}
 	if (tries == 5)
-		DRM_ERROR("FDI train 2 fail!\n");
+		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 
-	DRM_DEBUG_KMS("FDI train done\n");
+	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
 
 }
 
@@ -5118,17 +5186,17 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 	   for train result */
 	reg = FDI_RX_IMR(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_RX_SYMBOL_LOCK;
 	temp &= ~FDI_RX_BIT_LOCK;
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
 	/* enable CPU FDI TX and PCH FDI RX */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 	temp &= ~FDI_LINK_TRAIN_NONE;
@@ -5136,13 +5204,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 	/* SNB-B */
 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
-	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 
-	I915_WRITE(FDI_RX_MISC(pipe),
-		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5150,28 +5218,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 		temp &= ~FDI_LINK_TRAIN_NONE;
 		temp |= FDI_LINK_TRAIN_PATTERN_1;
 	}
-	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
 	for (i = 0; i < 4; i++) {
 		reg = FDI_TX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 		temp |= snb_b_fdi_train_param[i];
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 
-		POSTING_READ(reg);
+		intel_de_posting_read(dev_priv, reg);
 		udelay(500);
 
 		for (retry = 0; retry < 5; retry++) {
 			reg = FDI_RX_IIR(pipe);
-			temp = I915_READ(reg);
-			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 			if (temp & FDI_RX_BIT_LOCK) {
-				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-				DRM_DEBUG_KMS("FDI train 1 done.\n");
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_BIT_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 1 done.\n");
 				break;
 			}
 			udelay(50);
@@ -5180,11 +5250,11 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 			break;
 	}
 	if (i == 4)
-		DRM_ERROR("FDI train 1 fail!\n");
+		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
 
 	/* Train 2 */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_2;
 	if (IS_GEN(dev_priv, 6)) {
@@ -5192,10 +5262,10 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 		/* SNB-B */
 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
 	}
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -5203,28 +5273,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 		temp &= ~FDI_LINK_TRAIN_NONE;
 		temp |= FDI_LINK_TRAIN_PATTERN_2;
 	}
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
 	for (i = 0; i < 4; i++) {
 		reg = FDI_TX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 		temp |= snb_b_fdi_train_param[i];
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 
-		POSTING_READ(reg);
+		intel_de_posting_read(dev_priv, reg);
 		udelay(500);
 
 		for (retry = 0; retry < 5; retry++) {
 			reg = FDI_RX_IIR(pipe);
-			temp = I915_READ(reg);
-			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 			if (temp & FDI_RX_SYMBOL_LOCK) {
-				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-				DRM_DEBUG_KMS("FDI train 2 done.\n");
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_SYMBOL_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 2 done.\n");
 				break;
 			}
 			udelay(50);
@@ -5233,9 +5305,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
 			break;
 	}
 	if (i == 4)
-		DRM_ERROR("FDI train 2 fail!\n");
+		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
 
-	DRM_DEBUG_KMS("FDI train done.\n");
+	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 }
 
 /* Manual link training for Ivy Bridge A0 parts */
@@ -5251,111 +5323,117 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
 	   for train result */
 	reg = FDI_RX_IMR(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_RX_SYMBOL_LOCK;
 	temp &= ~FDI_RX_BIT_LOCK;
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(150);
 
-	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
-		      I915_READ(FDI_RX_IIR(pipe)));
+	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
 
 	/* Try each vswing and preemphasis setting twice before moving on */
 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
 		/* disable first in case we need to retry */
 		reg = FDI_TX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
 		temp &= ~FDI_TX_ENABLE;
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 
 		reg = FDI_RX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~FDI_LINK_TRAIN_AUTO;
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp &= ~FDI_RX_ENABLE;
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 
 		/* enable CPU FDI TX and PCH FDI RX */
 		reg = FDI_TX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
 		temp |= snb_b_fdi_train_param[j/2];
 		temp |= FDI_COMPOSITE_SYNC;
-		I915_WRITE(reg, temp | FDI_TX_ENABLE);
+		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
 
-		I915_WRITE(FDI_RX_MISC(pipe),
-			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
 		reg = FDI_RX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
 		temp |= FDI_COMPOSITE_SYNC;
-		I915_WRITE(reg, temp | FDI_RX_ENABLE);
+		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
 
-		POSTING_READ(reg);
+		intel_de_posting_read(dev_priv, reg);
 		udelay(1); /* should be 0.5us */
 
 		for (i = 0; i < 4; i++) {
 			reg = FDI_RX_IIR(pipe);
-			temp = I915_READ(reg);
-			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 
 			if (temp & FDI_RX_BIT_LOCK ||
-			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
-				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
-					      i);
+			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_BIT_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 1 done, level %i.\n",
+					    i);
 				break;
 			}
 			udelay(1); /* should be 0.5us */
 		}
 		if (i == 4) {
-			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+			drm_dbg_kms(&dev_priv->drm,
+				    "FDI train 1 fail on vswing %d\n", j / 2);
 			continue;
 		}
 
 		/* Train 2 */
 		reg = FDI_TX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 
 		reg = FDI_RX_CTL(pipe);
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 
-		POSTING_READ(reg);
+		intel_de_posting_read(dev_priv, reg);
 		udelay(2); /* should be 1.5us */
 
 		for (i = 0; i < 4; i++) {
 			reg = FDI_RX_IIR(pipe);
-			temp = I915_READ(reg);
-			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			temp = intel_de_read(dev_priv, reg);
+			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
 
 			if (temp & FDI_RX_SYMBOL_LOCK ||
-			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
-				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
-					      i);
+			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+				intel_de_write(dev_priv, reg,
+					       temp | FDI_RX_SYMBOL_LOCK);
+				drm_dbg_kms(&dev_priv->drm,
+					    "FDI train 2 done, level %i.\n",
+					    i);
 				goto train_done;
 			}
 			udelay(2); /* should be 1.5us */
 		}
 		if (i == 4)
-			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+			drm_dbg_kms(&dev_priv->drm,
+				    "FDI train 2 fail on vswing %d\n", j / 2);
 	}
 
 train_done:
-	DRM_DEBUG_KMS("FDI train done.\n");
+	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
 }
 
 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -5368,29 +5446,29 @@ static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
 
 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
-	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(200);
 
 	/* Switch from Rawclk to PCDclk */
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp | FDI_PCDCLK);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(200);
 
 	/* Enable CPU FDI TX PLL, always on for Ironlake */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
-		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
 
-		POSTING_READ(reg);
+		intel_de_posting_read(dev_priv, reg);
 		udelay(100);
 	}
 }
@@ -5405,23 +5483,23 @@ static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
 
 	/* Switch from PCDclk to Rawclk */
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_PCDCLK);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
 
 	/* Disable CPU FDI TX PLL */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(100);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
 
 	/* Wait for the clocks to turn off. */
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(100);
 }
 
@@ -5434,32 +5512,33 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
 
 	/* disable CPU FDI tx and PCH FDI rx */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
-	POSTING_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
+	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+	intel_de_posting_read(dev_priv, reg);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~(0x7 << 16);
-	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(100);
 
 	/* Ironlake workaround, disable clock pointer after downing FDI */
 	if (HAS_PCH_IBX(dev_priv))
-		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+			       FDI_RX_PHASE_SYNC_POINTER_OVR);
 
 	/* still set train pattern 1 */
 	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_1;
-	I915_WRITE(reg, temp);
+	intel_de_write(dev_priv, reg, temp);
 
 	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
+	temp = intel_de_read(dev_priv, reg);
 	if (HAS_PCH_CPT(dev_priv)) {
 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5469,10 +5548,10 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
 	}
 	/* BPC in FDI rx is consistent with that in PIPECONF */
 	temp &= ~(0x07 << 16);
-	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
-	I915_WRITE(reg, temp);
+	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	intel_de_write(dev_priv, reg, temp);
 
-	POSTING_READ(reg);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(100);
 }
 
@@ -5505,7 +5584,7 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
 {
 	u32 temp;
 
-	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
 
 	mutex_lock(&dev_priv->sb_lock);
 
@@ -5552,17 +5631,14 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
 	}
 
 	/* This should not happen with any sane values */
-	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
-		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
-	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
-		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 
-	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
-			clock,
-			auxdiv,
-			divsel,
-			phasedir,
-			phaseinc);
+	drm_dbg_kms(&dev_priv->drm,
+		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+		    clock, auxdiv, divsel, phasedir, phaseinc);
 
 	mutex_lock(&dev_priv->sb_lock);
 
@@ -5592,7 +5668,7 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
 	/* Wait for initialization time */
 	udelay(24);
 
-	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 }
 
 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
@@ -5603,7 +5679,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
 	u32 desired_divisor;
 	u32 temp;
 
-	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
 		return 0;
 
 	mutex_lock(&dev_priv->sb_lock);
@@ -5639,41 +5715,46 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
-	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
-		   I915_READ(HTOTAL(cpu_transcoder)));
-	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
-		   I915_READ(HBLANK(cpu_transcoder)));
-	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
-		   I915_READ(HSYNC(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
+		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
+		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
+		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
 
-	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
-		   I915_READ(VTOTAL(cpu_transcoder)));
-	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
-		   I915_READ(VBLANK(cpu_transcoder)));
-	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
-		   I915_READ(VSYNC(cpu_transcoder)));
-	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
-		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
+		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
+		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
+		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
 }
 
 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
 {
 	u32 temp;
 
-	temp = I915_READ(SOUTH_CHICKEN1);
+	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
 		return;
 
-	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
-	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+		    FDI_RX_ENABLE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+		    FDI_RX_ENABLE);
 
 	temp &= ~FDI_BC_BIFURCATION_SELECT;
 	if (enable)
 		temp |= FDI_BC_BIFURCATION_SELECT;
 
-	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
-	I915_WRITE(SOUTH_CHICKEN1, temp);
-	POSTING_READ(SOUTH_CHICKEN1);
+	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+		    enable ? "en" : "dis");
+	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
 }
 
 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
@@ -5723,8 +5804,9 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
 		num_encoders++;
 	}
 
-	WARN(num_encoders != 1, "%d encoders for pipe %c\n",
-	     num_encoders, pipe_name(crtc->pipe));
+	drm_WARN(encoder->base.dev, num_encoders != 1,
+		 "%d encoders for pipe %c\n",
+		 num_encoders, pipe_name(crtc->pipe));
 
 	return encoder;
 }
@@ -5753,8 +5835,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
 
 	/* Write the TU size bits before fdi link training, so that error
 	 * detection works. */
-	I915_WRITE(FDI_RX_TUSIZE1(pipe),
-		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
 	/* For PCH output, training FDI link */
 	dev_priv->display.fdi_link_train(crtc, crtc_state);
@@ -5764,7 +5846,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
 	if (HAS_PCH_CPT(dev_priv)) {
 		u32 sel;
 
-		temp = I915_READ(PCH_DPLL_SEL);
+		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
 		temp |= TRANS_DPLL_ENABLE(pipe);
 		sel = TRANS_DPLLB_SEL(pipe);
 		if (crtc_state->shared_dpll ==
@@ -5772,7 +5854,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
 			temp |= sel;
 		else
 			temp &= ~sel;
-		I915_WRITE(PCH_DPLL_SEL, temp);
+		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
 	}
 
 	/* XXX: pch pll's can be enabled any time before we enable the PCH
@@ -5795,11 +5877,11 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
 	    intel_crtc_has_dp_encoder(crtc_state)) {
 		const struct drm_display_mode *adjusted_mode =
 			&crtc_state->hw.adjusted_mode;
-		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
 		i915_reg_t reg = TRANS_DP_CTL(pipe);
 		enum port port;
 
-		temp = I915_READ(reg);
+		temp = intel_de_read(dev_priv, reg);
 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
 			  TRANS_DP_SYNC_MASK |
 			  TRANS_DP_BPC_MASK);
@@ -5812,17 +5894,16 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 
 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
-		WARN_ON(port < PORT_B || port > PORT_D);
+		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
 		temp |= TRANS_DP_PORT_SEL(port);
 
-		I915_WRITE(reg, temp);
+		intel_de_write(dev_priv, reg, temp);
 	}
 
 	ilk_enable_pch_transcoder(crtc_state);
 }
 
-static void lpt_pch_enable(const struct intel_atomic_state *state,
-			   const struct intel_crtc_state *crtc_state)
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
 {
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5844,11 +5925,13 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
 	i915_reg_t dslreg = PIPEDSL(pipe);
 	u32 temp;
 
-	temp = I915_READ(dslreg);
+	temp = intel_de_read(dev_priv, dslreg);
 	udelay(500);
-	if (wait_for(I915_READ(dslreg) != temp, 5)) {
-		if (wait_for(I915_READ(dslreg) != temp, 5))
-			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
+		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
+			drm_err(&dev_priv->drm,
+				"mode set failed: pipe %c stuck\n",
+				pipe_name(pipe));
 	}
 }
 
@@ -5963,7 +6046,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	 */
 	if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
 	    need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Pipe/Plane scaling not supported with IF-ID mode\n");
 		return -EINVAL;
 	}
 
@@ -5982,10 +6066,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 			scaler_state->scaler_users &= ~(1 << scaler_user);
 			scaler_state->scalers[*scaler_id].in_use = 0;
 
-			DRM_DEBUG_KMS("scaler_user index %u.%u: "
-				"Staged freeing scaler id %d scaler_users = 0x%x\n",
-				intel_crtc->pipe, scaler_user, *scaler_id,
-				scaler_state->scaler_users);
+			drm_dbg_kms(&dev_priv->drm,
+				    "scaler_user index %u.%u: "
+				    "Staged freeing scaler id %d scaler_users = 0x%x\n",
+				    intel_crtc->pipe, scaler_user, *scaler_id,
+				    scaler_state->scaler_users);
 			*scaler_id = -1;
 		}
 		return 0;
@@ -5993,7 +6078,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 
 	if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
 	    (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
-		DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Planar YUV: src dimensions not met\n");
 		return -EINVAL;
 	}
 
@@ -6006,18 +6092,20 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
 	    (INTEL_GEN(dev_priv) < 11 &&
 	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
 	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
-		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
-			"size is out of scaler range\n",
-			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
+		drm_dbg_kms(&dev_priv->drm,
+			    "scaler_user index %u.%u: src %ux%u dst %ux%u "
+			    "size is out of scaler range\n",
+			    intel_crtc->pipe, scaler_user, src_w, src_h,
+			    dst_w, dst_h);
 		return -EINVAL;
 	}
 
 	/* mark this plane as a scaler user in crtc_state */
 	scaler_state->scaler_users |= (1 << scaler_user);
-	DRM_DEBUG_KMS("scaler_user index %u.%u: "
-		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
-		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
-		scaler_state->scaler_users);
+	drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+		    "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+		    intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+		    scaler_state->scaler_users);
 
 	return 0;
 }
@@ -6036,7 +6124,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
 	const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
 	bool need_scaler = false;
 
-	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+	if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+	    state->pch_pfit.enabled)
 		need_scaler = true;
 
 	return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
@@ -6088,9 +6177,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 
 	/* check colorkey */
 	if (plane_state->ckey.flags) {
-		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
-			      intel_plane->base.base.id,
-			      intel_plane->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[PLANE:%d:%s] scaling with color key not allowed",
+			    intel_plane->base.base.id,
+			    intel_plane->base.name);
 		return -EINVAL;
 	}
 
@@ -6128,9 +6218,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 			break;
 		/* fall through */
 	default:
-		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
-			      intel_plane->base.base.id, intel_plane->base.name,
-			      fb->base.id, fb->format->format);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+			    intel_plane->base.base.id, intel_plane->base.name,
+			    fb->base.id, fb->format->format);
 		return -EINVAL;
 	}
 
@@ -6157,9 +6248,11 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
 	if (crtc_state->pch_pfit.enabled) {
 		u16 uv_rgb_hphase, uv_rgb_vphase;
 		int pfit_w, pfit_h, hscale, vscale;
+		unsigned long irqflags;
 		int id;
 
-		if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
+		if (drm_WARN_ON(&dev_priv->drm,
+				crtc_state->scaler_state.scaler_id < 0))
 			return;
 
 		pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
@@ -6172,14 +6265,21 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
 
 		id = scaler_state->scaler_id;
-		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
-			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
-		I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
-			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-		I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
-			      PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
-		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
+
+		spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+		intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+				  PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+		intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+				  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+		intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+				  PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+		intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+				  crtc_state->pch_pfit.pos);
+		intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+				  crtc_state->pch_pfit.size);
+
+		spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 	}
 }
 
@@ -6195,12 +6295,15 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
 		 * e.g. x201.
 		 */
 		if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
-			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
-						 PF_PIPE_SEL_IVB(pipe));
+			intel_de_write(dev_priv, PF_CTL(pipe),
+				       PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
 		else
-			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
-		I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
-		I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
+			intel_de_write(dev_priv, PF_CTL(pipe),
+				       PF_ENABLE | PF_FILTER_MED_3x3);
+		intel_de_write(dev_priv, PF_WIN_POS(pipe),
+			       crtc_state->pch_pfit.pos);
+		intel_de_write(dev_priv, PF_WIN_SZ(pipe),
+			       crtc_state->pch_pfit.size);
 	}
 }
 
@@ -6218,25 +6321,26 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
 	 * This function is called from post_plane_update, which is run after
 	 * a vblank wait.
 	 */
-	WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
 
 	if (IS_BROADWELL(dev_priv)) {
-		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
-						IPS_ENABLE | IPS_PCODE_CONTROL));
+		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+							 IPS_ENABLE | IPS_PCODE_CONTROL));
 		/* Quoting Art Runyan: "its not safe to expect any particular
 		 * value in IPS_CTL bit 31 after enabling IPS through the
 		 * mailbox." Moreover, the mailbox may return a bogus state,
 		 * so we need to just enable it and continue on.
 		 */
 	} else {
-		I915_WRITE(IPS_CTL, IPS_ENABLE);
+		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
 		/* The bit only becomes 1 in the next vblank, so this wait here
 		 * is essentially intel_wait_for_vblank. If we don't have this
 		 * and don't wait for vblanks until the end of crtc_enable, then
 		 * the HW state readout code will complain that the expected
 		 * IPS_CTL value is not the one we read. */
 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
-			DRM_ERROR("Timed out waiting for IPS enable\n");
+			drm_err(&dev_priv->drm,
+				"Timed out waiting for IPS enable\n");
 	}
 }
 
@@ -6250,17 +6354,19 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
 		return;
 
 	if (IS_BROADWELL(dev_priv)) {
-		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+		drm_WARN_ON(dev,
+			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
 		/*
 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
 		 * 42ms timeout value leads to occasional timeouts so use 100ms
 		 * instead.
 		 */
 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
-			DRM_ERROR("Timed out waiting for IPS disable\n");
+			drm_err(&dev_priv->drm,
+				"Timed out waiting for IPS disable\n");
 	} else {
-		I915_WRITE(IPS_CTL, 0);
-		POSTING_READ(IPS_CTL);
+		intel_de_write(dev_priv, IPS_CTL, 0);
+		intel_de_posting_read(dev_priv, IPS_CTL);
 	}
 
 	/* We need to wait for a vblank before we can disable the plane. */
@@ -6382,13 +6488,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
 				    struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 	const struct intel_crtc_state *new_crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_plane_state *new_primary_state =
-		intel_atomic_get_new_plane_state(state, primary);
 	enum pipe pipe = crtc->pipe;
 
 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
@@ -6399,8 +6502,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
 		hsw_enable_ips(new_crtc_state);
 
-	if (new_primary_state)
-		intel_fbc_post_update(crtc);
+	intel_fbc_post_update(state, crtc);
 
 	if (needs_nv12_wa(old_crtc_state) &&
 	    !needs_nv12_wa(new_crtc_state))
@@ -6415,20 +6517,16 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
 				   struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_plane *primary = to_intel_plane(crtc->base.primary);
 	const struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
 	const struct intel_crtc_state *new_crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct intel_plane_state *new_primary_state =
-		intel_atomic_get_new_plane_state(state, primary);
 	enum pipe pipe = crtc->pipe;
 
 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
 		hsw_disable_ips(old_crtc_state);
 
-	if (new_primary_state &&
-	    intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+	if (intel_fbc_pre_update(state, crtc))
 		intel_wait_for_vblank(dev_priv, pipe);
 
 	/* Display WA 827 */
@@ -6770,7 +6868,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	if (WARN_ON(crtc->active))
+	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
 		return;
 
 	/*
@@ -6863,7 +6961,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
 					    enum pipe pipe, bool apply)
 {
-	u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
 
 	if (apply)
@@ -6871,7 +6969,7 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
 	else
 		val &= ~mask;
 
-	I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
 }
 
 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
@@ -6890,7 +6988,17 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
 		val |= MBUS_DBOX_B_CREDIT(8);
 	}
 
-	I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
+static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+		       HSW_LINETIME(crtc_state->linetime) |
+		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
 }
 
 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
@@ -6900,10 +7008,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
 	u32 val;
 
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 	val &= ~HSW_FRAME_START_DELAY_MASK;
 	val |= HSW_FRAME_START_DELAY(0);
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 }
 
 static void hsw_crtc_enable(struct intel_atomic_state *state,
@@ -6916,7 +7024,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
 	bool psl_clkgate_wa;
 
-	if (WARN_ON(crtc->active))
+	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
 		return;
 
 	intel_encoders_pre_pll_enable(state, crtc);
@@ -6926,9 +7034,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
 
 	intel_encoders_pre_enable(state, crtc);
 
-	if (intel_crtc_has_dp_encoder(new_crtc_state))
-		intel_dp_set_m_n(new_crtc_state, M1_N1);
-
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_set_pipe_timings(new_crtc_state);
 
@@ -6939,8 +7044,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
 
 	if (cpu_transcoder != TRANSCODER_EDP &&
 	    !transcoder_is_dsi(cpu_transcoder))
-		I915_WRITE(PIPE_MULT(cpu_transcoder),
-			   new_crtc_state->pixel_multiplier - 1);
+		intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+			       new_crtc_state->pixel_multiplier - 1);
 
 	if (new_crtc_state->has_pch_encoder)
 		intel_cpu_transcoder_set_m_n(new_crtc_state,
@@ -6977,6 +7082,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
 	if (INTEL_GEN(dev_priv) < 9)
 		intel_disable_primary_plane(new_crtc_state);
 
+	hsw_set_linetime_wm(new_crtc_state);
+
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_set_pipe_chicken(crtc);
 
@@ -6989,15 +7096,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_pipe_mbus_enable(crtc);
 
-	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
-	if (!transcoder_is_dsi(cpu_transcoder))
-		intel_enable_pipe(new_crtc_state);
-
-	if (new_crtc_state->has_pch_encoder)
-		lpt_pch_enable(state, new_crtc_state);
-
-	intel_crtc_vblank_on(new_crtc_state);
-
 	intel_encoders_enable(state, crtc);
 
 	if (psl_clkgate_wa) {
@@ -7023,9 +7121,9 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 	/* To avoid upsetting the power well on haswell only disable the pfit if
 	 * it's in use. The hw state code will make sure we get this right. */
 	if (old_crtc_state->pch_pfit.enabled) {
-		I915_WRITE(PF_CTL(pipe), 0);
-		I915_WRITE(PF_WIN_POS(pipe), 0);
-		I915_WRITE(PF_WIN_SZ(pipe), 0);
+		intel_de_write(dev_priv, PF_CTL(pipe), 0);
+		intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+		intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
 	}
 }
 
@@ -7067,16 +7165,16 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
 
 			/* disable TRANS_DP_CTL */
 			reg = TRANS_DP_CTL(pipe);
-			temp = I915_READ(reg);
+			temp = intel_de_read(dev_priv, reg);
 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
 				  TRANS_DP_PORT_SEL_MASK);
 			temp |= TRANS_DP_PORT_SEL_NONE;
-			I915_WRITE(reg, temp);
+			intel_de_write(dev_priv, reg, temp);
 
 			/* disable DPLL_SEL */
-			temp = I915_READ(PCH_DPLL_SEL);
+			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
-			I915_WRITE(PCH_DPLL_SEL, temp);
+			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
 		}
 
 		ilk_fdi_pll_disable(crtc);
@@ -7109,15 +7207,17 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
 	 * according to register description and PRM.
 	 */
-	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
 
-	I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
-	I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
+	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
+		       crtc_state->gmch_pfit.pgm_ratios);
+	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
 
 	/* Border color in case we don't scale up to the full screen. Black by
 	 * default, change to something else for debugging. */
-	I915_WRITE(BCLRPAT(crtc->pipe), 0);
+	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
 }
 
 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
@@ -7304,7 +7404,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	if (WARN_ON(crtc->active))
+	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
 		return;
 
 	if (intel_crtc_has_dp_encoder(new_crtc_state))
@@ -7314,8 +7414,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
 	intel_set_pipe_src_size(new_crtc_state);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
-		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
-		I915_WRITE(CHV_CANVAS(pipe), 0);
+		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
 	}
 
 	i9xx_set_pipeconf(new_crtc_state);
@@ -7356,8 +7456,10 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
-	I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
+	intel_de_write(dev_priv, FP0(crtc->pipe),
+		       crtc_state->dpll_hw_state.fp0);
+	intel_de_write(dev_priv, FP1(crtc->pipe),
+		       crtc_state->dpll_hw_state.fp1);
 }
 
 static void i9xx_crtc_enable(struct intel_atomic_state *state,
@@ -7368,7 +7470,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	if (WARN_ON(crtc->active))
+	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
 		return;
 
 	i9xx_set_pll_dividers(new_crtc_state);
@@ -7418,9 +7520,9 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
 
 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
 
-	DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
-		      I915_READ(PFIT_CONTROL));
-	I915_WRITE(PFIT_CONTROL, 0);
+	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
+		    intel_de_read(dev_priv, PFIT_CONTROL));
+	intel_de_write(dev_priv, PFIT_CONTROL, 0);
 }
 
 static void i9xx_crtc_disable(struct intel_atomic_state *state,
@@ -7477,6 +7579,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_bw_state *bw_state =
 		to_intel_bw_state(dev_priv->bw_obj.state);
+	struct intel_cdclk_state *cdclk_state =
+		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
 	struct intel_crtc_state *crtc_state =
 		to_intel_crtc_state(crtc->base.state);
 	enum intel_display_power_domain domain;
@@ -7500,8 +7604,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 
 	state = drm_atomic_state_alloc(&dev_priv->drm);
 	if (!state) {
-		DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
-			      crtc->base.base.id, crtc->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "failed to disable [CRTC:%d:%s], out of memory",
+			    crtc->base.base.id, crtc->base.name);
 		return;
 	}
 
@@ -7511,19 +7616,21 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
 
-	WARN_ON(IS_ERR(temp_crtc_state) || ret);
+	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
 
 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
 
 	drm_atomic_state_put(state);
 
-	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
-		      crtc->base.base.id, crtc->base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+		    crtc->base.base.id, crtc->base.name);
 
 	crtc->active = false;
 	crtc->base.enabled = false;
 
-	WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+	drm_WARN_ON(&dev_priv->drm,
+		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
 	crtc_state->uapi.active = false;
 	crtc_state->uapi.connector_mask = 0;
 	crtc_state->uapi.encoder_mask = 0;
@@ -7543,8 +7650,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
 	crtc->enabled_power_domains = 0;
 
 	dev_priv->active_pipes &= ~BIT(pipe);
-	dev_priv->min_cdclk[pipe] = 0;
-	dev_priv->min_voltage_level[pipe] = 0;
+	cdclk_state->min_cdclk[pipe] = 0;
+	cdclk_state->min_voltage_level[pipe] = 0;
+	cdclk_state->active_pipes &= ~BIT(pipe);
 
 	bw_state->data_rate[pipe] = 0;
 	bw_state->num_active_planes[pipe] = 0;
@@ -7563,7 +7671,8 @@ int intel_display_suspend(struct drm_device *dev)
 	state = drm_atomic_helper_suspend(dev);
 	ret = PTR_ERR_OR_ZERO(state);
 	if (ret)
-		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+			ret);
 	else
 		dev_priv->modeset_restore_state = state;
 	return ret;
@@ -7583,13 +7692,13 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
 					 struct drm_connector_state *conn_state)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-		      connector->base.base.id,
-		      connector->base.name);
+	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+		    connector->base.base.id, connector->base.name);
 
 	if (connector->get_hw_state(connector)) {
-		struct intel_encoder *encoder = connector->encoder;
+		struct intel_encoder *encoder = intel_attached_encoder(connector);
 
 		I915_STATE_WARN(!crtc_state,
 			 "connector enabled without attached crtc\n");
@@ -7632,18 +7741,21 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 	struct intel_crtc *other_crtc;
 	struct intel_crtc_state *other_crtc_state;
 
-	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
-		      pipe_name(pipe), pipe_config->fdi_lanes);
+	drm_dbg_kms(&dev_priv->drm,
+		    "checking fdi config on pipe %c, lanes %i\n",
+		    pipe_name(pipe), pipe_config->fdi_lanes);
 	if (pipe_config->fdi_lanes > 4) {
-		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
-			      pipe_name(pipe), pipe_config->fdi_lanes);
+		drm_dbg_kms(&dev_priv->drm,
+			    "invalid fdi lane config on pipe %c: %i lanes\n",
+			    pipe_name(pipe), pipe_config->fdi_lanes);
 		return -EINVAL;
 	}
 
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		if (pipe_config->fdi_lanes > 2) {
-			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
-				      pipe_config->fdi_lanes);
+			drm_dbg_kms(&dev_priv->drm,
+				    "only 2 lanes on haswell, required: %i lanes\n",
+				    pipe_config->fdi_lanes);
 			return -EINVAL;
 		} else {
 			return 0;
@@ -7668,15 +7780,17 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 			return PTR_ERR(other_crtc_state);
 
 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
-			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
-				      pipe_name(pipe), pipe_config->fdi_lanes);
+			drm_dbg_kms(&dev_priv->drm,
+				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
+				    pipe_name(pipe), pipe_config->fdi_lanes);
 			return -EINVAL;
 		}
 		return 0;
 	case PIPE_C:
 		if (pipe_config->fdi_lanes > 2) {
-			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
-				      pipe_name(pipe), pipe_config->fdi_lanes);
+			drm_dbg_kms(&dev_priv->drm,
+				    "only 2 lanes on pipe %c: required %i lanes\n",
+				    pipe_name(pipe), pipe_config->fdi_lanes);
 			return -EINVAL;
 		}
 
@@ -7687,7 +7801,8 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
 			return PTR_ERR(other_crtc_state);
 
 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
-			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "fdi link B uses too many lanes to enable link C\n");
 			return -EINVAL;
 		}
 		return 0;
@@ -7701,6 +7816,7 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
 				  struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *i915 = to_i915(dev);
 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int lane, link_bw, fdi_dotclock, ret;
 	bool needs_recompute = false;
@@ -7713,7 +7829,7 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
 	 * Hence the bw of each lane in terms of the mode signal
 	 * is:
 	 */
-	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
+	link_bw = intel_fdi_link_freq(i915, pipe_config);
 
 	fdi_dotclock = adjusted_mode->crtc_clock;
 
@@ -7731,8 +7847,9 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
 
 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
 		pipe_config->pipe_bpp -= 2*3;
-		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
-			      pipe_config->pipe_bpp);
+		drm_dbg_kms(&i915->drm,
+			    "fdi link bw constraint, reducing pipe bpp to %i\n",
+			    pipe_config->pipe_bpp);
 		needs_recompute = true;
 		pipe_config->bw_constrained = true;
 
@@ -7774,15 +7891,17 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
 	return true;
 }
 
-static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
+static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv =
 		to_i915(crtc_state->uapi.crtc->dev);
-	struct intel_atomic_state *intel_state =
+	struct intel_atomic_state *state =
 		to_intel_atomic_state(crtc_state->uapi.state);
 
+	crtc_state->ips_enabled = false;
+
 	if (!hsw_crtc_state_ips_capable(crtc_state))
-		return false;
+		return 0;
 
 	/*
 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -7791,18 +7910,27 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
 	 * completely disable it.
 	 */
 	if (crtc_state->crc_enabled)
-		return false;
+		return 0;
 
 	/* IPS should be fine as long as at least one plane is enabled. */
 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
-		return false;
+		return 0;
 
-	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-	if (IS_BROADWELL(dev_priv) &&
-	    crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
-		return false;
+	if (IS_BROADWELL(dev_priv)) {
+		const struct intel_cdclk_state *cdclk_state;
 
-	return true;
+		cdclk_state = intel_atomic_get_cdclk_state(state);
+		if (IS_ERR(cdclk_state))
+			return PTR_ERR(cdclk_state);
+
+		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+			return 0;
+	}
+
+	crtc_state->ips_enabled = true;
+
+	return 0;
 }
 
 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
@@ -7884,9 +8012,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	}
 
 	if (adjusted_mode->crtc_clock > clock_limit) {
-		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
-			      adjusted_mode->crtc_clock, clock_limit,
-			      yesno(pipe_config->double_wide));
+		drm_dbg_kms(&dev_priv->drm,
+			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+			    adjusted_mode->crtc_clock, clock_limit,
+			    yesno(pipe_config->double_wide));
 		return -EINVAL;
 	}
 
@@ -7898,7 +8027,8 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 		 * for output conversion from RGB->YCBCR. So if CTM is already
 		 * applied we can't support YCBCR420 output.
 		 */
-		DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "YCBCR420 and CTM together are not possible\n");
 		return -EINVAL;
 	}
 
@@ -7910,13 +8040,15 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 	 */
 	if (pipe_config->pipe_src_w & 1) {
 		if (pipe_config->double_wide) {
-			DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Odd pipe source width not supported with double wide pipe\n");
 			return -EINVAL;
 		}
 
 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
 		    intel_is_dual_link_lvds(dev_priv)) {
-			DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Odd pipe source width not supported with dual link LVDS\n");
 			return -EINVAL;
 		}
 	}
@@ -7997,13 +8129,15 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
 	 * indicates as much.
 	 */
 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
-		bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+						       PCH_DREF_CONTROL) &
 			DREF_SSC1_ENABLE;
 
 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
-			DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
-				      enableddisabled(bios_lvds_use_ssc),
-				      enableddisabled(dev_priv->vbt.lvds_use_ssc));
+			drm_dbg_kms(&dev_priv->drm,
+				    "SSC %s by BIOS, overriding VBT which says %s\n",
+				    enableddisabled(bios_lvds_use_ssc),
+				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
 		}
 	}
@@ -8090,10 +8224,11 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
-	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
-	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
-	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
+		       TU_SIZE(m_n->tu) | m_n->gmch_m);
+	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
 }
 
 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
@@ -8119,33 +8254,42 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
 	enum transcoder transcoder = crtc_state->cpu_transcoder;
 
 	if (INTEL_GEN(dev_priv) >= 5) {
-		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
-		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
-		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
-		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
+			       TU_SIZE(m_n->tu) | m_n->gmch_m);
+		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
+			       m_n->gmch_n);
+		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
+			       m_n->link_m);
+		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
+			       m_n->link_n);
 		/*
 		 *  M2_N2 registers are set only if DRRS is supported
 		 * (to make sure the registers are not unnecessarily accessed).
 		 */
 		if (m2_n2 && crtc_state->has_drrs &&
 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
-			I915_WRITE(PIPE_DATA_M2(transcoder),
-					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
-			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
-			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
-			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
+				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
+				       m2_n2->gmch_n);
+			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
+				       m2_n2->link_m);
+			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
+				       m2_n2->link_n);
 		}
 	} else {
-		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
-		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
-		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
-		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
+			       TU_SIZE(m_n->tu) | m_n->gmch_m);
+		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
+		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
 	}
 }
 
 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
 {
 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (m_n == M1_N1) {
 		dp_m_n = &crtc_state->dp_m_n;
@@ -8158,7 +8302,7 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
 		 */
 		dp_m_n = &crtc_state->dp_m2_n2;
 	} else {
-		DRM_ERROR("Unsupported divider value\n");
+		drm_err(&i915->drm, "Unsupported divider value\n");
 		return;
 	}
 
@@ -8212,9 +8356,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 	u32 coreclk, reg_val;
 
 	/* Enable Refclk */
-	I915_WRITE(DPLL(pipe),
-		   pipe_config->dpll_hw_state.dpll &
-		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+	intel_de_write(dev_priv, DPLL(pipe),
+		       pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
 
 	/* No need to actually set up the DPLL with DSI */
 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8314,8 +8457,8 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
 	int vco;
 
 	/* Enable Refclk and SSC */
-	I915_WRITE(DPLL(pipe),
-		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+	intel_de_write(dev_priv, DPLL(pipe),
+		       pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
 
 	/* No need to actually set up the DPLL with DSI */
 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8614,27 +8757,22 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
 	}
 
 	if (INTEL_GEN(dev_priv) > 3)
-		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
+		               vsyncshift);
 
-	I915_WRITE(HTOTAL(cpu_transcoder),
-		   (adjusted_mode->crtc_hdisplay - 1) |
-		   ((adjusted_mode->crtc_htotal - 1) << 16));
-	I915_WRITE(HBLANK(cpu_transcoder),
-		   (adjusted_mode->crtc_hblank_start - 1) |
-		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
-	I915_WRITE(HSYNC(cpu_transcoder),
-		   (adjusted_mode->crtc_hsync_start - 1) |
-		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
+		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
+		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
+		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
 
-	I915_WRITE(VTOTAL(cpu_transcoder),
-		   (adjusted_mode->crtc_vdisplay - 1) |
-		   ((crtc_vtotal - 1) << 16));
-	I915_WRITE(VBLANK(cpu_transcoder),
-		   (adjusted_mode->crtc_vblank_start - 1) |
-		   ((crtc_vblank_end - 1) << 16));
-	I915_WRITE(VSYNC(cpu_transcoder),
-		   (adjusted_mode->crtc_vsync_start - 1) |
-		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
+		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
+	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
+		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
+	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
+		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
 
 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -8642,7 +8780,8 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
 	 * bits. */
 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
 	    (pipe == PIPE_B || pipe == PIPE_C))
-		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+		intel_de_write(dev_priv, VTOTAL(pipe),
+		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
 
 }
 
@@ -8655,9 +8794,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
 	/* pipesrc controls the size that is scaled from, which should
 	 * always be the user's requested size.
 	 */
-	I915_WRITE(PIPESRC(pipe),
-		   ((crtc_state->pipe_src_w - 1) << 16) |
-		   (crtc_state->pipe_src_h - 1));
+	intel_de_write(dev_priv, PIPESRC(pipe),
+		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
 }
 
 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -8670,9 +8808,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
 
 	if (INTEL_GEN(dev_priv) >= 9 ||
 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
-		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
 	else
-		return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
 }
 
 static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -8683,33 +8821,33 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 	u32 tmp;
 
-	tmp = I915_READ(HTOTAL(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
 
 	if (!transcoder_is_dsi(cpu_transcoder)) {
-		tmp = I915_READ(HBLANK(cpu_transcoder));
+		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
 							(tmp & 0xffff) + 1;
 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
 						((tmp >> 16) & 0xffff) + 1;
 	}
-	tmp = I915_READ(HSYNC(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
 
-	tmp = I915_READ(VTOTAL(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
 
 	if (!transcoder_is_dsi(cpu_transcoder)) {
-		tmp = I915_READ(VBLANK(cpu_transcoder));
+		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
 							(tmp & 0xffff) + 1;
 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
 						((tmp >> 16) & 0xffff) + 1;
 	}
-	tmp = I915_READ(VSYNC(cpu_transcoder));
+	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
 
@@ -8727,7 +8865,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 tmp;
 
-	tmp = I915_READ(PIPESRC(crtc->pipe));
+	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
 
@@ -8768,7 +8906,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	/* we keep both pipes enabled on 830 */
 	if (IS_I830(dev_priv))
-		pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
 
 	if (crtc_state->double_wide)
 		pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -8815,8 +8953,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	pipeconf |= PIPECONF_FRAME_START_DELAY(0);
 
-	I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
-	POSTING_READ(PIPECONF(crtc->pipe));
+	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
+	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
 }
 
 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -8833,7 +8971,9 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if (intel_panel_use_ssc(dev_priv)) {
 			refclk = dev_priv->vbt.lvds_ssc_freq;
-			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
 		}
 
 		limit = &intel_limits_i8xx_lvds;
@@ -8846,7 +8986,8 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				 refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
@@ -8868,7 +9009,9 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if (intel_panel_use_ssc(dev_priv)) {
 			refclk = dev_priv->vbt.lvds_ssc_freq;
-			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
 		}
 
 		if (intel_is_dual_link_lvds(dev_priv))
@@ -8888,7 +9031,8 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
@@ -8911,7 +9055,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if (intel_panel_use_ssc(dev_priv)) {
 			refclk = dev_priv->vbt.lvds_ssc_freq;
-			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
 		}
 
 		limit = &pnv_limits_lvds;
@@ -8922,7 +9068,8 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
@@ -8945,7 +9092,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if (intel_panel_use_ssc(dev_priv)) {
 			refclk = dev_priv->vbt.lvds_ssc_freq;
-			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    refclk);
 		}
 
 		limit = &intel_limits_i9xx_lvds;
@@ -8956,7 +9105,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				 refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
@@ -8970,6 +9120,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
 {
 	int refclk = 100000;
 	const struct intel_limit *limit = &intel_limits_chv;
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
@@ -8977,7 +9128,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
@@ -8991,6 +9142,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
 {
 	int refclk = 100000;
 	const struct intel_limit *limit = &intel_limits_vlv;
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
@@ -8998,7 +9150,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
@@ -9025,7 +9177,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 	if (!i9xx_has_pfit(dev_priv))
 		return;
 
-	tmp = I915_READ(PFIT_CONTROL);
+	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
 	if (!(tmp & PFIT_ENABLE))
 		return;
 
@@ -9039,7 +9191,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 	}
 
 	pipe_config->gmch_pfit.control = tmp;
-	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+	pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
+							  PFIT_PGM_RATIOS);
 }
 
 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9087,11 +9240,11 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
 	if (!plane->get_hw_state(plane, &pipe))
 		return;
 
-	WARN_ON(pipe != crtc->pipe);
+	drm_WARN_ON(dev, pipe != crtc->pipe);
 
 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 	if (!intel_fb) {
-		DRM_DEBUG_KMS("failed to alloc fb\n");
+		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
 		return;
 	}
 
@@ -9099,7 +9252,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
 
 	fb->dev = dev;
 
-	val = I915_READ(DSPCNTR(i9xx_plane));
+	val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
 
 	if (INTEL_GEN(dev_priv) >= 4) {
 		if (val & DISPPLANE_TILED) {
@@ -9120,34 +9273,37 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
 	fb->format = drm_format_info(fourcc);
 
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		offset = I915_READ(DSPOFFSET(i9xx_plane));
-		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+		offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
 	} else if (INTEL_GEN(dev_priv) >= 4) {
 		if (plane_config->tiling)
-			offset = I915_READ(DSPTILEOFF(i9xx_plane));
+			offset = intel_de_read(dev_priv,
+					       DSPTILEOFF(i9xx_plane));
 		else
-			offset = I915_READ(DSPLINOFF(i9xx_plane));
-		base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+			offset = intel_de_read(dev_priv,
+					       DSPLINOFF(i9xx_plane));
+		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
 	} else {
-		base = I915_READ(DSPADDR(i9xx_plane));
+		base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
 	}
 	plane_config->base = base;
 
-	val = I915_READ(PIPESRC(pipe));
+	val = intel_de_read(dev_priv, PIPESRC(pipe));
 	fb->width = ((val >> 16) & 0xfff) + 1;
 	fb->height = ((val >> 0) & 0xfff) + 1;
 
-	val = I915_READ(DSPSTRIDE(i9xx_plane));
+	val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
 	fb->pitches[0] = val & 0xffffffc0;
 
 	aligned_height = intel_fb_align_height(fb, 0, fb->height);
 
 	plane_config->size = fb->pitches[0] * aligned_height;
 
-	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
-		      crtc->base.name, plane->base.name, fb->width, fb->height,
-		      fb->format->cpp[0] * 8, base, fb->pitches[0],
-		      plane_config->size);
+	drm_dbg_kms(&dev_priv->drm,
+		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+		    crtc->base.name, plane->base.name, fb->width, fb->height,
+		    fb->format->cpp[0] * 8, base, fb->pitches[0],
+		    plane_config->size);
 
 	plane_config->fb = intel_fb;
 }
@@ -9192,11 +9348,12 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 tmp;
 
-	tmp = I915_READ(PIPEMISC(crtc->pipe));
+	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
 	if (tmp & PIPEMISC_YUV420_ENABLE) {
 		/* We support 4:2:0 in full blend mode only */
-		WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+		drm_WARN_ON(&dev_priv->drm,
+			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
 
 		return INTEL_OUTPUT_FORMAT_YCBCR420;
 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
@@ -9214,7 +9371,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
 	u32 tmp;
 
-	tmp = I915_READ(DSPCNTR(i9xx_plane));
+	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
 
 	if (tmp & DISPPLANE_GAMMA_ENABLE)
 		crtc_state->gamma_enable = true;
@@ -9245,7 +9402,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 
 	ret = false;
 
-	tmp = I915_READ(PIPECONF(crtc->pipe));
+	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
 	if (!(tmp & PIPECONF_ENABLE))
 		goto out;
 
@@ -9274,7 +9431,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 		PIPECONF_GAMMA_MODE_SHIFT;
 
 	if (IS_CHERRYVIEW(dev_priv))
-		pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+		pipe_config->cgm_mode = intel_de_read(dev_priv,
+						      CGM_PIPE_MODE(crtc->pipe));
 
 	i9xx_get_pipe_color_config(pipe_config);
 	intel_color_get_config(pipe_config);
@@ -9292,14 +9450,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
 		else
-			tmp = I915_READ(DPLL_MD(crtc->pipe));
+			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
 		pipe_config->pixel_multiplier =
 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
 		pipe_config->dpll_hw_state.dpll_md = tmp;
 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
-		tmp = I915_READ(DPLL(crtc->pipe));
+		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
 		pipe_config->pixel_multiplier =
 			((tmp & SDVO_MULTIPLIER_MASK)
 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -9309,10 +9467,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 		 * function. */
 		pipe_config->pixel_multiplier = 1;
 	}
-	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
+							DPLL(crtc->pipe));
 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
-		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
-		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
+							       FP0(crtc->pipe));
+		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
+							       FP1(crtc->pipe));
 	} else {
 		/* Mask out read-only status bits. */
 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
@@ -9381,8 +9542,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
 	}
 
 	/* Check if any DPLLs are using the SSC source */
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		u32 temp = I915_READ(PCH_DPLL(i));
+	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
 
 		if (!(temp & DPLL_VCO_ENABLE))
 			continue;
@@ -9394,15 +9555,16 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
 		}
 	}
 
-	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
-		      has_panel, has_lvds, has_ck505, using_ssc_source);
+	drm_dbg_kms(&dev_priv->drm,
+		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+		    has_panel, has_lvds, has_ck505, using_ssc_source);
 
 	/* Ironlake: try to setup display ref clock before DPLL
 	 * enabling. This is only under driver's control after
 	 * PCH B stepping, previous chipset stepping should be
 	 * ignoring this setting.
 	 */
-	val = I915_READ(PCH_DREF_CONTROL);
+	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
 
 	/* As we must carefully and slowly disable/enable each source in turn,
 	 * compute the final state we want first and check if we need to
@@ -9454,14 +9616,14 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
 
 		/* SSC must be turned on before enabling the CPU output  */
 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
-			DRM_DEBUG_KMS("Using SSC on panel\n");
+			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
 			val |= DREF_SSC1_ENABLE;
 		} else
 			val &= ~DREF_SSC1_ENABLE;
 
 		/* Get SSC going before enabling the outputs */
-		I915_WRITE(PCH_DREF_CONTROL, val);
-		POSTING_READ(PCH_DREF_CONTROL);
+		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
 		udelay(200);
 
 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -9469,30 +9631,31 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
 		/* Enable CPU source on CPU attached eDP */
 		if (has_cpu_edp) {
 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
-				DRM_DEBUG_KMS("Using SSC on eDP\n");
+				drm_dbg_kms(&dev_priv->drm,
+					    "Using SSC on eDP\n");
 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 			} else
 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 		} else
 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 
-		I915_WRITE(PCH_DREF_CONTROL, val);
-		POSTING_READ(PCH_DREF_CONTROL);
+		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
 		udelay(200);
 	} else {
-		DRM_DEBUG_KMS("Disabling CPU source output\n");
+		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
 
 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
 		/* Turn off CPU output */
 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 
-		I915_WRITE(PCH_DREF_CONTROL, val);
-		POSTING_READ(PCH_DREF_CONTROL);
+		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
 		udelay(200);
 
 		if (!using_ssc_source) {
-			DRM_DEBUG_KMS("Disabling SSC source\n");
+			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
 
 			/* Turn off the SSC source */
 			val &= ~DREF_SSC_SOURCE_MASK;
@@ -9501,8 +9664,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
 			/* Turn off SSC1 */
 			val &= ~DREF_SSC1_ENABLE;
 
-			I915_WRITE(PCH_DREF_CONTROL, val);
-			POSTING_READ(PCH_DREF_CONTROL);
+			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
 			udelay(200);
 		}
 	}
@@ -9514,21 +9677,21 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
 {
 	u32 tmp;
 
-	tmp = I915_READ(SOUTH_CHICKEN2);
+	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
-	I915_WRITE(SOUTH_CHICKEN2, tmp);
+	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
 
-	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
-		DRM_ERROR("FDI mPHY reset assert timeout\n");
+		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
 
-	tmp = I915_READ(SOUTH_CHICKEN2);
+	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
-	I915_WRITE(SOUTH_CHICKEN2, tmp);
+	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
 
-	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
-		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
 }
 
 /* WaMPhyProgramming:hsw */
@@ -9617,10 +9780,11 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
 {
 	u32 reg, tmp;
 
-	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+		     "FDI requires downspread\n"))
 		with_spread = true;
-	if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
-	    with_fdi, "LP PCH doesn't have FDI\n"))
+	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+		     with_fdi, "LP PCH doesn't have FDI\n"))
 		with_fdi = false;
 
 	mutex_lock(&dev_priv->sb_lock);
@@ -9714,10 +9878,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
 	u32 tmp;
 	int idx = BEND_IDX(steps);
 
-	if (WARN_ON(steps % 5 != 0))
+	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
 		return;
 
-	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
+	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
 		return;
 
 	mutex_lock(&dev_priv->sb_lock);
@@ -9740,8 +9904,8 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
 
 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
 {
-	u32 fuse_strap = I915_READ(FUSE_STRAP);
-	u32 ctl = I915_READ(SPLL_CTL);
+	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
 
 	if ((ctl & SPLL_PLL_ENABLE) == 0)
 		return false;
@@ -9760,8 +9924,8 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
 			       enum intel_dpll_id id)
 {
-	u32 fuse_strap = I915_READ(FUSE_STRAP);
-	u32 ctl = I915_READ(WRPLL_CTL(id));
+	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
 
 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
 		return false;
@@ -9810,17 +9974,17 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
 	dev_priv->pch_ssc_use = 0;
 
 	if (spll_uses_pch_ssc(dev_priv)) {
-		DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
 	}
 
 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
-		DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
 	}
 
 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
-		DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
 	}
 
@@ -9885,8 +10049,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
 	 * This would end up with an odd purple hue over
 	 * the entire display. Make sure we don't do it.
 	 */
-	WARN_ON(crtc_state->limited_color_range &&
-		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
 
 	if (crtc_state->limited_color_range)
 		val |= PIPECONF_COLOR_RANGE_SELECT;
@@ -9898,8 +10062,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
 
 	val |= PIPECONF_FRAME_START_DELAY(0);
 
-	I915_WRITE(PIPECONF(pipe), val);
-	POSTING_READ(PIPECONF(pipe));
+	intel_de_write(dev_priv, PIPECONF(pipe), val);
+	intel_de_posting_read(dev_priv, PIPECONF(pipe));
 }
 
 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
@@ -9921,8 +10085,8 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
 
-	I915_WRITE(PIPECONF(cpu_transcoder), val);
-	POSTING_READ(PIPECONF(cpu_transcoder));
+	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
 }
 
 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
@@ -9965,7 +10129,10 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
 					   BIT(PLANE_CURSOR))) == 0)
 		val |= PIPEMISC_HDR_MODE_PRECISION;
 
-	I915_WRITE(PIPEMISC(crtc->pipe), val);
+	if (INTEL_GEN(dev_priv) >= 12)
+		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
+	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
 }
 
 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
@@ -9973,7 +10140,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	u32 tmp;
 
-	tmp = I915_READ(PIPEMISC(crtc->pipe));
+	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
 
 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
 	case PIPEMISC_DITHER_6_BPC:
@@ -10126,8 +10293,9 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		if (intel_panel_use_ssc(dev_priv)) {
-			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
-				      dev_priv->vbt.lvds_ssc_freq);
+			drm_dbg_kms(&dev_priv->drm,
+				    "using SSC reference clock of %d kHz\n",
+				    dev_priv->vbt.lvds_ssc_freq);
 			refclk = dev_priv->vbt.lvds_ssc_freq;
 		}
 
@@ -10149,15 +10317,17 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
 	if (!crtc_state->clock_set &&
 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
 				refclk, NULL, &crtc_state->dpll)) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		drm_err(&dev_priv->drm,
+			"Couldn't find PLL settings for mode!\n");
 		return -EINVAL;
 	}
 
 	ilk_compute_dpll(crtc, crtc_state, NULL);
 
 	if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
-		DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
-			      pipe_name(crtc->pipe));
+		drm_dbg_kms(&dev_priv->drm,
+			    "failed to find PLL for pipe %c\n",
+			    pipe_name(crtc->pipe));
 		return -EINVAL;
 	}
 
@@ -10171,12 +10341,12 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum pipe pipe = crtc->pipe;
 
-	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
-	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
-	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
+	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
+	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
 		& ~TU_SIZE_MASK;
-	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
-	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
+	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 }
 
@@ -10189,30 +10359,38 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
 	enum pipe pipe = crtc->pipe;
 
 	if (INTEL_GEN(dev_priv) >= 5) {
-		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
-		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
-		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+		m_n->link_m = intel_de_read(dev_priv,
+					    PIPE_LINK_M1(transcoder));
+		m_n->link_n = intel_de_read(dev_priv,
+					    PIPE_LINK_N1(transcoder));
+		m_n->gmch_m = intel_de_read(dev_priv,
+					    PIPE_DATA_M1(transcoder))
 			& ~TU_SIZE_MASK;
-		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
-		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+		m_n->gmch_n = intel_de_read(dev_priv,
+					    PIPE_DATA_N1(transcoder));
+		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 
 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
-			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
-			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
-			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
+			m2_n2->link_m = intel_de_read(dev_priv,
+						      PIPE_LINK_M2(transcoder));
+			m2_n2->link_n =	intel_de_read(dev_priv,
+							     PIPE_LINK_N2(transcoder));
+			m2_n2->gmch_m =	intel_de_read(dev_priv,
+							     PIPE_DATA_M2(transcoder))
 					& ~TU_SIZE_MASK;
-			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
-			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+			m2_n2->gmch_n =	intel_de_read(dev_priv,
+							     PIPE_DATA_N2(transcoder));
+			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 		}
 	} else {
-		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
-		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
-		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
+		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
+		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
 			& ~TU_SIZE_MASK;
-		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
-		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
+		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
 	}
 }
@@ -10247,12 +10425,14 @@ static void skl_get_pfit_config(struct intel_crtc *crtc,
 
 	/* find scaler attached to this pipe */
 	for (i = 0; i < crtc->num_scalers; i++) {
-		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+		ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
 			id = i;
 			pipe_config->pch_pfit.enabled = true;
-			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
-			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+			pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+								  SKL_PS_WIN_POS(crtc->pipe, i));
+			pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+								   SKL_PS_WIN_SZ(crtc->pipe, i));
 			scaler_state->scalers[i].in_use = true;
 			break;
 		}
@@ -10284,11 +10464,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
 	if (!plane->get_hw_state(plane, &pipe))
 		return;
 
-	WARN_ON(pipe != crtc->pipe);
+	drm_WARN_ON(dev, pipe != crtc->pipe);
 
 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
 	if (!intel_fb) {
-		DRM_DEBUG_KMS("failed to alloc fb\n");
+		drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
 		return;
 	}
 
@@ -10296,7 +10476,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
 
 	fb->dev = dev;
 
-	val = I915_READ(PLANE_CTL(pipe, plane_id));
+	val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
@@ -10304,7 +10484,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
 		pixel_format = val & PLANE_CTL_FORMAT_MASK;
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
-		alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+		alpha = intel_de_read(dev_priv,
+				      PLANE_COLOR_CTL(pipe, plane_id));
 		alpha &= PLANE_COLOR_ALPHA_MASK;
 	} else {
 		alpha = val & PLANE_CTL_ALPHA_MASK;
@@ -10368,16 +10549,16 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
 	    val & PLANE_CTL_FLIP_HORIZONTAL)
 		plane_config->rotation |= DRM_MODE_REFLECT_X;
 
-	base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+	base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
 	plane_config->base = base;
 
-	offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
+	offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
 
-	val = I915_READ(PLANE_SIZE(pipe, plane_id));
+	val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
 	fb->height = ((val >> 16) & 0xffff) + 1;
 	fb->width = ((val >> 0) & 0xffff) + 1;
 
-	val = I915_READ(PLANE_STRIDE(pipe, plane_id));
+	val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
 	stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
@@ -10385,10 +10566,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
 
 	plane_config->size = fb->pitches[0] * aligned_height;
 
-	DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
-		      crtc->base.name, plane->base.name, fb->width, fb->height,
-		      fb->format->cpp[0] * 8, base, fb->pitches[0],
-		      plane_config->size);
+	drm_dbg_kms(&dev_priv->drm,
+		    "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+		    crtc->base.name, plane->base.name, fb->width, fb->height,
+		    fb->format->cpp[0] * 8, base, fb->pitches[0],
+		    plane_config->size);
 
 	plane_config->fb = intel_fb;
 	return;
@@ -10404,19 +10586,21 @@ static void ilk_get_pfit_config(struct intel_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	u32 tmp;
 
-	tmp = I915_READ(PF_CTL(crtc->pipe));
+	tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
 
 	if (tmp & PF_ENABLE) {
 		pipe_config->pch_pfit.enabled = true;
-		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
-		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+		pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+							  PF_WIN_POS(crtc->pipe));
+		pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+							   PF_WIN_SZ(crtc->pipe));
 
 		/* We currently do not free assignements of panel fitters on
 		 * ivb/hsw (since we don't use the higher upscaling modes which
 		 * differentiates them) so just WARN about this case for now. */
 		if (IS_GEN(dev_priv, 7)) {
-			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
-				PF_PIPE_SEL_IVB(crtc->pipe));
+			drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
+				    PF_PIPE_SEL_IVB(crtc->pipe));
 		}
 	}
 }
@@ -10441,7 +10625,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
 	pipe_config->master_transcoder = INVALID_TRANSCODER;
 
 	ret = false;
-	tmp = I915_READ(PIPECONF(crtc->pipe));
+	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
 	if (!(tmp & PIPECONF_ENABLE))
 		goto out;
 
@@ -10478,18 +10662,19 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
 		PIPECONF_GAMMA_MODE_SHIFT;
 
-	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+	pipe_config->csc_mode = intel_de_read(dev_priv,
+					      PIPE_CSC_MODE(crtc->pipe));
 
 	i9xx_get_pipe_color_config(pipe_config);
 	intel_color_get_config(pipe_config);
 
-	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
 		struct intel_shared_dpll *pll;
 		enum intel_dpll_id pll_id;
 
 		pipe_config->has_pch_encoder = true;
 
-		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
@@ -10502,7 +10687,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
 			 */
 			pll_id = (enum intel_dpll_id) crtc->pipe;
 		} else {
-			tmp = I915_READ(PCH_DPLL_SEL);
+			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
 				pll_id = DPLL_ID_PCH_PLL_B;
 			else
@@ -10513,8 +10698,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
 		pll = pipe_config->shared_dpll;
 
-		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
-						&pipe_config->dpll_hw_state));
+		drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
+						 &pipe_config->dpll_hw_state));
 
 		tmp = pipe_config->dpll_hw_state.dpll;
 		pipe_config->pixel_multiplier =
@@ -10552,8 +10737,9 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
 			intel_get_crtc_new_encoder(state, crtc_state);
 
 		if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
-			DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
-				      pipe_name(crtc->pipe));
+			drm_dbg_kms(&dev_priv->drm,
+				    "failed to find PLL for pipe %c\n",
+				    pipe_name(crtc->pipe));
 			return -EINVAL;
 		}
 	}
@@ -10567,10 +10753,10 @@ static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
 	enum intel_dpll_id id;
 	u32 temp;
 
-	temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+	temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 	id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
 
-	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
+	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
 		return;
 
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10585,24 +10771,25 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
 	u32 temp;
 
 	if (intel_phy_is_combo(dev_priv, phy)) {
-		temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+		temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
 			ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 		id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
 		port_dpll_id = ICL_PORT_DPLL_DEFAULT;
 	} else if (intel_phy_is_tc(dev_priv, phy)) {
-		u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+		u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
 
 		if (clk_sel == DDI_CLK_SEL_MG) {
 			id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
 								    port));
 			port_dpll_id = ICL_PORT_DPLL_MG_PHY;
 		} else {
-			WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+			drm_WARN_ON(&dev_priv->drm,
+				    clk_sel < DDI_CLK_SEL_TBT_162);
 			id = DPLL_ID_ICL_TBTPLL;
 			port_dpll_id = ICL_PORT_DPLL_DEFAULT;
 		}
 	} else {
-		WARN(1, "Invalid port %x\n", port);
+		drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
 		return;
 	}
 
@@ -10629,7 +10816,7 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
 		id = DPLL_ID_SKL_DPLL2;
 		break;
 	default:
-		DRM_ERROR("Incorrect port type\n");
+		drm_err(&dev_priv->drm, "Incorrect port type\n");
 		return;
 	}
 
@@ -10642,10 +10829,10 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
 	enum intel_dpll_id id;
 	u32 temp;
 
-	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+	temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
 	id = temp >> (port * 3 + 1);
 
-	if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
+	if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
 		return;
 
 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10655,7 +10842,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
 			    struct intel_crtc_state *pipe_config)
 {
 	enum intel_dpll_id id;
-	u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+	u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
 
 	switch (ddi_pll_sel) {
 	case PORT_CLK_SEL_WRPLL1:
@@ -10723,7 +10910,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 		bool force_thru = false;
 		enum pipe trans_pipe;
 
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+		tmp = intel_de_read(dev_priv,
+				    TRANS_DDI_FUNC_CTL(panel_transcoder));
 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
 			continue;
 
@@ -10738,8 +10926,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 
 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
 		default:
-			WARN(1, "unknown pipe linked to transcoder %s\n",
-			     transcoder_name(panel_transcoder));
+			drm_WARN(dev, 1,
+				 "unknown pipe linked to transcoder %s\n",
+				 transcoder_name(panel_transcoder));
 			/* fall through */
 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
 			force_thru = true;
@@ -10767,11 +10956,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	/*
 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
 	 */
-	WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
-		enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
 
 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
-	WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+	drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
 
 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
 	if (!wf)
@@ -10780,7 +10969,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
 	wakerefs[power_domain] = wf;
 	*power_domain_mask |= BIT_ULL(power_domain);
 
-	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
 
 	return tmp & PIPECONF_ENABLE;
 }
@@ -10805,7 +10994,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
 			cpu_transcoder = TRANSCODER_DSI_C;
 
 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-		WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+		drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
 
 		wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
 		if (!wf)
@@ -10825,11 +11014,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
 			break;
 
 		/* XXX: this works for video mode only */
-		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
 		if (!(tmp & DPI_ENABLE))
 			continue;
 
-		tmp = I915_READ(MIPI_CTRL(port));
+		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
 			continue;
 
@@ -10853,7 +11042,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
 						PORT_A : PORT_B;
 	} else {
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		tmp = intel_de_read(dev_priv,
+				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
 		if (INTEL_GEN(dev_priv) >= 12)
 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
 		else
@@ -10873,7 +11063,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
 
 	pll = pipe_config->shared_dpll;
 	if (pll) {
-		WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+		drm_WARN_ON(&dev_priv->drm,
+			    !pll->info->funcs->get_hw_state(dev_priv, pll,
 						&pipe_config->dpll_hw_state));
 	}
 
@@ -10883,10 +11074,10 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
 	 * the PCH transcoder is on.
 	 */
 	if (INTEL_GEN(dev_priv) < 9 &&
-	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
 		pipe_config->has_pch_encoder = true;
 
-		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
 
@@ -10899,7 +11090,8 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
 {
 	u32 trans_port_sync, master_select;
 
-	trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+	trans_port_sync = intel_de_read(dev_priv,
+				        TRANS_DDI_FUNC_CTL2(cpu_transcoder));
 
 	if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
 		return INVALID_TRANSCODER;
@@ -10943,8 +11135,9 @@ static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
 		intel_display_power_put(dev_priv, power_domain, trans_wakeref);
 	}
 
-	WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
-		crtc_state->sync_mode_slaves_mask);
+	drm_WARN_ON(&dev_priv->drm,
+		    crtc_state->master_transcoder != INVALID_TRANSCODER &&
+		    crtc_state->sync_mode_slaves_mask);
 }
 
 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
@@ -10955,6 +11148,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 	enum intel_display_power_domain power_domain;
 	u64 power_domain_mask;
 	bool active;
+	u32 tmp;
 
 	pipe_config->master_transcoder = INVALID_TRANSCODER;
 
@@ -10974,7 +11168,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 	if (IS_GEN9_LP(dev_priv) &&
 	    bxt_get_dsi_transcoder_state(crtc, pipe_config,
 					 &power_domain_mask, wakerefs)) {
-		WARN_ON(active);
+		drm_WARN_ON(&dev_priv->drm, active);
 		active = true;
 	}
 
@@ -10990,7 +11184,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 	intel_get_pipe_src_size(crtc, pipe_config);
 
 	if (IS_HASWELL(dev_priv)) {
-		u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+		u32 tmp = intel_de_read(dev_priv,
+					PIPECONF(pipe_config->cpu_transcoder));
 
 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -11013,12 +11208,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 			pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
 	}
 
-	pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+	pipe_config->gamma_mode = intel_de_read(dev_priv,
+						GAMMA_MODE(crtc->pipe));
 
-	pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+	pipe_config->csc_mode = intel_de_read(dev_priv,
+					      PIPE_CSC_MODE(crtc->pipe));
 
 	if (INTEL_GEN(dev_priv) >= 9) {
-		u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
 
 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
 			pipe_config->gamma_enable = true;
@@ -11031,8 +11228,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 
 	intel_color_get_config(pipe_config);
 
+	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
+	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		pipe_config->ips_linetime =
+			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
+
 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-	WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+	drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
 
 	wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
 	if (wf) {
@@ -11047,7 +11250,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 
 	if (hsw_crtc_supports_ips(crtc)) {
 		if (IS_HASWELL(dev_priv))
-			pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+			pipe_config->ips_enabled = intel_de_read(dev_priv,
+								 IPS_CTL) & IPS_ENABLE;
 		else {
 			/*
 			 * We cannot readout IPS state on broadwell, set to
@@ -11061,7 +11265,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
 		pipe_config->pixel_multiplier =
-			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+			intel_de_read(dev_priv,
+				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
 	} else {
 		pipe_config->pixel_multiplier = 1;
 	}
@@ -11150,7 +11355,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
 						    plane_state, 0);
 
 	if (src_x != 0 || src_y != 0) {
-		DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Arbitrary cursor panning not supported\n");
 		return -EINVAL;
 	}
 
@@ -11181,10 +11387,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
 			      struct intel_plane_state *plane_state)
 {
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
 	int ret;
 
 	if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
-		DRM_DEBUG_KMS("cursor cannot be tiled\n");
+		drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
 		return -EINVAL;
 	}
 
@@ -11255,6 +11462,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
 			     struct intel_plane_state *plane_state)
 {
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
 	int ret;
 
 	ret = intel_check_cursor(crtc_state, plane_state);
@@ -11267,14 +11475,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
 
 	/* Check for which cursor types we support */
 	if (!i845_cursor_size_ok(plane_state)) {
-		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-			  drm_rect_width(&plane_state->uapi.dst),
-			  drm_rect_height(&plane_state->uapi.dst));
+		drm_dbg_kms(&i915->drm,
+			    "Cursor dimension %dx%d not supported\n",
+			    drm_rect_width(&plane_state->uapi.dst),
+			    drm_rect_height(&plane_state->uapi.dst));
 		return -EINVAL;
 	}
 
-	WARN_ON(plane_state->uapi.visible &&
-		plane_state->color_plane[0].stride != fb->pitches[0]);
+	drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+		    plane_state->color_plane[0].stride != fb->pitches[0]);
 
 	switch (fb->pitches[0]) {
 	case 256:
@@ -11283,8 +11492,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
 	case 2048:
 		break;
 	default:
-		DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
-			      fb->pitches[0]);
+		 drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+			     fb->pitches[0]);
 		return -EINVAL;
 	}
 
@@ -11322,17 +11531,17 @@ static void i845_update_cursor(struct intel_plane *plane,
 	if (plane->cursor.base != base ||
 	    plane->cursor.size != size ||
 	    plane->cursor.cntl != cntl) {
-		I915_WRITE_FW(CURCNTR(PIPE_A), 0);
-		I915_WRITE_FW(CURBASE(PIPE_A), base);
-		I915_WRITE_FW(CURSIZE, size);
-		I915_WRITE_FW(CURPOS(PIPE_A), pos);
-		I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+		intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+		intel_de_write_fw(dev_priv, CURSIZE, size);
+		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+		intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
 
 		plane->cursor.base = base;
 		plane->cursor.size = size;
 		plane->cursor.cntl = cntl;
 	} else {
-		I915_WRITE_FW(CURPOS(PIPE_A), pos);
+		intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
 	}
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11357,7 +11566,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+	ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 
 	*pipe = PIPE_A;
 
@@ -11483,20 +11692,22 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
 
 	/* Check for which cursor types we support */
 	if (!i9xx_cursor_size_ok(plane_state)) {
-		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-			  drm_rect_width(&plane_state->uapi.dst),
-			  drm_rect_height(&plane_state->uapi.dst));
+		drm_dbg(&dev_priv->drm,
+			"Cursor dimension %dx%d not supported\n",
+			drm_rect_width(&plane_state->uapi.dst),
+			drm_rect_height(&plane_state->uapi.dst));
 		return -EINVAL;
 	}
 
-	WARN_ON(plane_state->uapi.visible &&
-		plane_state->color_plane[0].stride != fb->pitches[0]);
+	drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+		    plane_state->color_plane[0].stride != fb->pitches[0]);
 
 	if (fb->pitches[0] !=
 	    drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
-		DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
-			      fb->pitches[0],
-			      drm_rect_width(&plane_state->uapi.dst));
+		drm_dbg_kms(&dev_priv->drm,
+			    "Invalid cursor stride (%u) (cursor width %d)\n",
+			    fb->pitches[0],
+			    drm_rect_width(&plane_state->uapi.dst));
 		return -EINVAL;
 	}
 
@@ -11512,7 +11723,8 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
 	 */
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
 	    plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
-		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "CHV cursor C not allowed to straddle the left screen edge\n");
 		return -EINVAL;
 	}
 
@@ -11573,17 +11785,18 @@ static void i9xx_update_cursor(struct intel_plane *plane,
 	    plane->cursor.size != fbc_ctl ||
 	    plane->cursor.cntl != cntl) {
 		if (HAS_CUR_FBC(dev_priv))
-			I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
-		I915_WRITE_FW(CURCNTR(pipe), cntl);
-		I915_WRITE_FW(CURPOS(pipe), pos);
-		I915_WRITE_FW(CURBASE(pipe), base);
+			intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+					  fbc_ctl);
+		intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
 
 		plane->cursor.base = base;
 		plane->cursor.size = fbc_ctl;
 		plane->cursor.cntl = cntl;
 	} else {
-		I915_WRITE_FW(CURPOS(pipe), pos);
-		I915_WRITE_FW(CURBASE(pipe), base);
+		intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+		intel_de_write_fw(dev_priv, CURBASE(pipe), base);
 	}
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11614,7 +11827,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(CURCNTR(plane->pipe));
+	val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
 
 	ret = val & MCURSOR_MODE;
 
@@ -11700,13 +11913,13 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
 	struct intel_crtc_state *crtc_state;
 	int ret, i = -1;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-		      connector->base.id, connector->name,
-		      encoder->base.id, encoder->name);
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		    connector->base.id, connector->name,
+		    encoder->base.id, encoder->name);
 
 	old->restore_state = NULL;
 
-	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
 
 	/*
 	 * Algorithm gets a little messy:
@@ -11753,7 +11966,8 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
 	 * If we didn't find an unused CRTC, don't use any.
 	 */
 	if (!crtc) {
-		DRM_DEBUG_KMS("no pipe available for load-detect\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "no pipe available for load-detect\n");
 		ret = -ENODEV;
 		goto fail;
 	}
@@ -11804,13 +12018,16 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
 	if (!ret)
 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
 	if (ret) {
-		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Failed to create a copy of old state to restore: %i\n",
+			    ret);
 		goto fail;
 	}
 
 	ret = drm_atomic_commit(state);
 	if (ret) {
-		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "failed to set mode on load-detect pipe\n");
 		goto fail;
 	}
 
@@ -11843,20 +12060,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 {
 	struct intel_encoder *intel_encoder =
 		intel_attached_encoder(to_intel_connector(connector));
+	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
 	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_atomic_state *state = old->restore_state;
 	int ret;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
-		      connector->base.id, connector->name,
-		      encoder->base.id, encoder->name);
+	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		    connector->base.id, connector->name,
+		    encoder->base.id, encoder->name);
 
 	if (!state)
 		return;
 
 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
 	if (ret)
-		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+		drm_dbg_kms(&i915->drm,
+			    "Couldn't release load detect pipe: %i\n", ret);
 	drm_atomic_state_put(state);
 }
 
@@ -11921,8 +12140,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				7 : 14;
 			break;
 		default:
-			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
-				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
+			drm_dbg_kms(&dev_priv->drm,
+				    "Unknown DPLL mode %08x in programmed "
+				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
 			return;
 		}
 
@@ -11931,7 +12151,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 		else
 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	} else {
-		u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
+		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
+								 LVDS);
 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
 
 		if (is_lvds) {
@@ -12142,7 +12363,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 	was_visible = old_plane_state->uapi.visible;
 	visible = plane_state->uapi.visible;
 
-	if (!was_crtc_enabled && WARN_ON(was_visible))
+	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
 		was_visible = false;
 
 	/*
@@ -12168,11 +12389,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
 	turn_off = was_visible && (!visible || mode_changed);
 	turn_on = visible && (!was_visible || mode_changed);
 
-	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
-			 crtc->base.base.id, crtc->base.name,
-			 plane->base.base.id, plane->base.name,
-			 was_visible, visible,
-			 turn_off, turn_on, mode_changed);
+	drm_dbg_atomic(&dev_priv->drm,
+		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+		       crtc->base.base.id, crtc->base.name,
+		       plane->base.base.id, plane->base.name,
+		       was_visible, visible,
+		       turn_off, turn_on, mode_changed);
 
 	if (turn_on) {
 		if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -12349,8 +12571,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 		}
 
 		if (!linked_state) {
-			DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
-				      hweight8(crtc_state->nv12_planes));
+			drm_dbg_kms(&dev_priv->drm,
+				    "Need %d free Y planes for planar YUV\n",
+				    hweight8(crtc_state->nv12_planes));
 
 			return -EINVAL;
 		}
@@ -12361,7 +12584,8 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
 		linked_state->planar_linked_plane = plane;
 		crtc_state->active_planes |= BIT(linked->id);
 		crtc_state->update_planes |= BIT(linked->id);
-		DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
+			    linked->base.name, plane->base.name);
 
 		/* Copy parameters to slave plane */
 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
@@ -12398,120 +12622,74 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
 }
 
-static bool
-intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
+static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = crtc_state->uapi.crtc;
-	struct drm_atomic_state *state = crtc_state->uapi.state;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
+	const struct drm_display_mode *adjusted_mode =
+		&crtc_state->hw.adjusted_mode;
 
-	for_each_new_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != crtc)
-			continue;
-		if (connector->has_tile &&
-		    connector->tile_h_loc == connector->num_h_tile - 1 &&
-		    connector->tile_v_loc == connector->num_v_tile - 1)
-			return true;
-	}
+	if (!crtc_state->hw.enable)
+		return 0;
 
-	return false;
+	return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+				 adjusted_mode->crtc_clock);
 }
 
-static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
+			       const struct intel_cdclk_state *cdclk_state)
 {
-	crtc_state->master_transcoder = INVALID_TRANSCODER;
-	crtc_state->sync_mode_slaves_mask = 0;
+	const struct drm_display_mode *adjusted_mode =
+		&crtc_state->hw.adjusted_mode;
+
+	if (!crtc_state->hw.enable)
+		return 0;
+
+	return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+				 cdclk_state->logical.cdclk);
 }
 
-static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
-					    struct intel_crtc_state *crtc_state,
-					    int num_tiled_conns)
+static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
 {
-	struct drm_crtc *crtc = crtc_state->uapi.crtc;
-	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	struct drm_connector *master_connector;
-	struct drm_connector_list_iter conn_iter;
-	struct drm_crtc *master_crtc = NULL;
-	struct drm_crtc_state *master_crtc_state;
-	struct intel_crtc_state *master_pipe_config;
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	const struct drm_display_mode *adjusted_mode =
+		&crtc_state->hw.adjusted_mode;
+	u16 linetime_wm;
 
-	if (INTEL_GEN(dev_priv) < 11)
+	if (!crtc_state->hw.enable)
 		return 0;
 
-	if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
+	linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
+				   crtc_state->pixel_rate);
+
+	/* Display WA #1135: BXT:ALL GLK:ALL */
+	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+		linetime_wm /= 2;
+
+	return linetime_wm;
+}
+
+static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct intel_cdclk_state *cdclk_state;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		crtc_state->linetime = skl_linetime_wm(crtc_state);
+	else
+		crtc_state->linetime = hsw_linetime_wm(crtc_state);
+
+	if (!hsw_crtc_supports_ips(crtc))
 		return 0;
 
-	/*
-	 * In case of tiled displays there could be one or more slaves but there is
-	 * only one master. Lets make the CRTC used by the connector corresponding
-	 * to the last horizonal and last vertical tile a master/genlock CRTC.
-	 * All the other CRTCs corresponding to other tiles of the same Tile group
-	 * are the slave CRTCs and hold a pointer to their genlock CRTC.
-	 * If all tiles not present do not make master slave assignments.
-	 */
-	if (!connector->has_tile ||
-	    crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
-	    crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
-	    num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
-		reset_port_sync_mode_state(crtc_state);
-		return 0;
-	}
-	/* Last Horizontal and last vertical tile connector is a master
-	 * Master's crtc state is already populated in slave for port sync
-	 */
-	if (connector->tile_h_loc == connector->num_h_tile - 1 &&
-	    connector->tile_v_loc == connector->num_v_tile - 1)
-		return 0;
+	cdclk_state = intel_atomic_get_cdclk_state(state);
+	if (IS_ERR(cdclk_state))
+		return PTR_ERR(cdclk_state);
 
-	/* Loop through all connectors and configure the Slave crtc_state
-	 * to point to the correct master.
-	 */
-	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
-	drm_for_each_connector_iter(master_connector, &conn_iter) {
-		struct drm_connector_state *master_conn_state = NULL;
-
-		if (!(master_connector->has_tile &&
-		      master_connector->tile_group->id == connector->tile_group->id))
-			continue;
-		if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
-		    master_connector->tile_v_loc != master_connector->num_v_tile - 1)
-			continue;
-
-		master_conn_state = drm_atomic_get_connector_state(&state->base,
-								   master_connector);
-		if (IS_ERR(master_conn_state)) {
-			drm_connector_list_iter_end(&conn_iter);
-			return PTR_ERR(master_conn_state);
-		}
-		if (master_conn_state->crtc) {
-			master_crtc = master_conn_state->crtc;
-			break;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	if (!master_crtc) {
-		DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
-			      crtc->base.id);
-		return -EINVAL;
-	}
-
-	master_crtc_state = drm_atomic_get_crtc_state(&state->base,
-						      master_crtc);
-	if (IS_ERR(master_crtc_state))
-		return PTR_ERR(master_crtc_state);
-
-	master_pipe_config = to_intel_crtc_state(master_crtc_state);
-	crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
-	master_pipe_config->sync_mode_slaves_mask |=
-		BIT(crtc_state->cpu_transcoder);
-	DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
-		      transcoder_name(crtc_state->master_transcoder),
-		      crtc->base.id,
-		      master_pipe_config->sync_mode_slaves_mask);
+	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
+						       cdclk_state);
 
 	return 0;
 }
@@ -12531,7 +12709,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
 
 	if (mode_changed && crtc_state->hw.enable &&
 	    dev_priv->display.crtc_compute_clock &&
-	    !WARN_ON(crtc_state->shared_dpll)) {
+	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
 		if (ret)
 			return ret;
@@ -12551,17 +12729,18 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
 			return ret;
 	}
 
-	ret = 0;
 	if (dev_priv->display.compute_pipe_wm) {
 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
 		if (ret) {
-			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Target pipe watermarks are invalid\n");
 			return ret;
 		}
 	}
 
 	if (dev_priv->display.compute_intermediate_wm) {
-		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+		if (drm_WARN_ON(&dev_priv->drm,
+				!dev_priv->display.compute_pipe_wm))
 			return 0;
 
 		/*
@@ -12571,23 +12750,39 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
 		 */
 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
 		if (ret) {
-			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "No valid intermediate pipe watermarks are possible\n");
 			return ret;
 		}
 	}
 
 	if (INTEL_GEN(dev_priv) >= 9) {
-		if (mode_changed || crtc_state->update_pipe)
+		if (mode_changed || crtc_state->update_pipe) {
 			ret = skl_update_scaler_crtc(crtc_state);
-		if (!ret)
-			ret = intel_atomic_setup_scalers(dev_priv, crtc,
-							 crtc_state);
+			if (ret)
+				return ret;
+		}
+
+		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+		if (ret)
+			return ret;
 	}
 
-	if (HAS_IPS(dev_priv))
-		crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
+	if (HAS_IPS(dev_priv)) {
+		ret = hsw_compute_ips_config(crtc_state);
+		if (ret)
+			return ret;
+	}
 
-	return ret;
+	if (INTEL_GEN(dev_priv) >= 9 ||
+	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+		ret = hsw_compute_linetime_wm(state, crtc);
+		if (ret)
+			return ret;
+
+	}
+
+	return 0;
 }
 
 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
@@ -12620,6 +12815,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
 		      struct intel_crtc_state *pipe_config)
 {
 	struct drm_connector *connector = conn_state->connector;
+	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
 	const struct drm_display_info *info = &connector->display_info;
 	int bpp;
 
@@ -12641,11 +12837,13 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
 	}
 
 	if (bpp < pipe_config->pipe_bpp) {
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
-			      "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
-			      connector->base.id, connector->name,
-			      bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
-			      pipe_config->pipe_bpp);
+		drm_dbg_kms(&i915->drm,
+			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+			    connector->base.id, connector->name,
+			    bpp, 3 * info->bpc,
+			    3 * conn_state->max_requested_bpc,
+			    pipe_config->pipe_bpp);
 
 		pipe_config->pipe_bpp = bpp;
 	}
@@ -12705,10 +12903,13 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
 		      const char *id, unsigned int lane_count,
 		      const struct intel_link_m_n *m_n)
 {
-	DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
-		      id, lane_count,
-		      m_n->gmch_m, m_n->gmch_n,
-		      m_n->link_m, m_n->link_n, m_n->tu);
+	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+	drm_dbg_kms(&i915->drm,
+		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		    id, lane_count,
+		    m_n->gmch_m, m_n->gmch_n,
+		    m_n->link_m, m_n->link_n, m_n->tu);
 }
 
 static void
@@ -12784,27 +12985,31 @@ static const char *output_formats(enum intel_output_format format)
 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+	struct drm_i915_private *i915 = to_i915(plane->base.dev);
 	const struct drm_framebuffer *fb = plane_state->hw.fb;
 	struct drm_format_name_buf format_name;
 
 	if (!fb) {
-		DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
-			      plane->base.base.id, plane->base.name,
-			      yesno(plane_state->uapi.visible));
+		drm_dbg_kms(&i915->drm,
+			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+			    plane->base.base.id, plane->base.name,
+			    yesno(plane_state->uapi.visible));
 		return;
 	}
 
-	DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
-		      plane->base.base.id, plane->base.name,
-		      fb->base.id, fb->width, fb->height,
-		      drm_get_format_name(fb->format->format, &format_name),
-		      yesno(plane_state->uapi.visible));
-	DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
-		      plane_state->hw.rotation, plane_state->scaler_id);
+	drm_dbg_kms(&i915->drm,
+		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+		    plane->base.base.id, plane->base.name,
+		    fb->base.id, fb->width, fb->height,
+		    drm_get_format_name(fb->format->format, &format_name),
+		    yesno(plane_state->uapi.visible));
+	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+		    plane_state->hw.rotation, plane_state->scaler_id);
 	if (plane_state->uapi.visible)
-		DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
-			      DRM_RECT_FP_ARG(&plane_state->uapi.src),
-			      DRM_RECT_ARG(&plane_state->uapi.dst));
+		drm_dbg_kms(&i915->drm,
+			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
+			    DRM_RECT_ARG(&plane_state->uapi.dst));
 }
 
 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
@@ -12818,22 +13023,24 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 	char buf[64];
 	int i;
 
-	DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
-		      crtc->base.base.id, crtc->base.name,
-		      yesno(pipe_config->hw.enable), context);
+	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
+		    crtc->base.base.id, crtc->base.name,
+		    yesno(pipe_config->hw.enable), context);
 
 	if (!pipe_config->hw.enable)
 		goto dump_planes;
 
 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
-	DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
-		      yesno(pipe_config->hw.active),
-		      buf, pipe_config->output_types,
-		      output_formats(pipe_config->output_format));
+	drm_dbg_kms(&dev_priv->drm,
+		    "active: %s, output_types: %s (0x%x), output format: %s\n",
+		    yesno(pipe_config->hw.active),
+		    buf, pipe_config->output_types,
+		    output_formats(pipe_config->output_format));
 
-	DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
-		      transcoder_name(pipe_config->cpu_transcoder),
-		      pipe_config->pipe_bpp, pipe_config->dither);
+	drm_dbg_kms(&dev_priv->drm,
+		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+		    transcoder_name(pipe_config->cpu_transcoder),
+		    pipe_config->pipe_bpp, pipe_config->dither);
 
 	if (pipe_config->has_pch_encoder)
 		intel_dump_m_n_config(pipe_config, "fdi",
@@ -12849,13 +13056,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 					      &pipe_config->dp_m2_n2);
 	}
 
-	DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
-		      pipe_config->has_audio, pipe_config->has_infoframe,
-		      pipe_config->infoframes.enable);
+	drm_dbg_kms(&dev_priv->drm,
+		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+		    pipe_config->has_audio, pipe_config->has_infoframe,
+		    pipe_config->infoframes.enable);
 
 	if (pipe_config->infoframes.enable &
 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
-		DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
+			    pipe_config->infoframes.gcp);
 	if (pipe_config->infoframes.enable &
 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
@@ -12866,50 +13075,59 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
 
-	DRM_DEBUG_KMS("requested mode:\n");
+	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
-	DRM_DEBUG_KMS("adjusted mode:\n");
+	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
 	intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
-	DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
-		      pipe_config->port_clock,
-		      pipe_config->pipe_src_w, pipe_config->pipe_src_h,
-		      pipe_config->pixel_rate);
+	drm_dbg_kms(&dev_priv->drm,
+		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
+		    pipe_config->port_clock,
+		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+		    pipe_config->pixel_rate);
+
+	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
+		    pipe_config->linetime, pipe_config->ips_linetime);
 
 	if (INTEL_GEN(dev_priv) >= 9)
-		DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
-			      crtc->num_scalers,
-			      pipe_config->scaler_state.scaler_users,
-		              pipe_config->scaler_state.scaler_id);
+		drm_dbg_kms(&dev_priv->drm,
+			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+			    crtc->num_scalers,
+			    pipe_config->scaler_state.scaler_users,
+			    pipe_config->scaler_state.scaler_id);
 
 	if (HAS_GMCH(dev_priv))
-		DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
-			      pipe_config->gmch_pfit.control,
-			      pipe_config->gmch_pfit.pgm_ratios,
-			      pipe_config->gmch_pfit.lvds_border_bits);
+		drm_dbg_kms(&dev_priv->drm,
+			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+			    pipe_config->gmch_pfit.control,
+			    pipe_config->gmch_pfit.pgm_ratios,
+			    pipe_config->gmch_pfit.lvds_border_bits);
 	else
-		DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
-			      pipe_config->pch_pfit.pos,
-			      pipe_config->pch_pfit.size,
-			      enableddisabled(pipe_config->pch_pfit.enabled),
-			      yesno(pipe_config->pch_pfit.force_thru));
+		drm_dbg_kms(&dev_priv->drm,
+			    "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
+			    pipe_config->pch_pfit.pos,
+			    pipe_config->pch_pfit.size,
+			    enableddisabled(pipe_config->pch_pfit.enabled),
+			    yesno(pipe_config->pch_pfit.force_thru));
 
-	DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
-		      pipe_config->ips_enabled, pipe_config->double_wide);
+	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
+		    pipe_config->ips_enabled, pipe_config->double_wide);
 
 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
 
 	if (IS_CHERRYVIEW(dev_priv))
-		DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
-			      pipe_config->cgm_mode, pipe_config->gamma_mode,
-			      pipe_config->gamma_enable, pipe_config->csc_enable);
+		drm_dbg_kms(&dev_priv->drm,
+			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+			    pipe_config->cgm_mode, pipe_config->gamma_mode,
+			    pipe_config->gamma_enable, pipe_config->csc_enable);
 	else
-		DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
-			      pipe_config->csc_mode, pipe_config->gamma_mode,
-			      pipe_config->gamma_enable, pipe_config->csc_enable);
+		drm_dbg_kms(&dev_priv->drm,
+			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+			    pipe_config->csc_mode, pipe_config->gamma_mode,
+			    pipe_config->gamma_enable, pipe_config->csc_enable);
 
-	DRM_DEBUG_KMS("MST master transcoder: %s\n",
-		      transcoder_name(pipe_config->mst_master_transcoder));
+	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
+		    transcoder_name(pipe_config->mst_master_transcoder));
 
 dump_planes:
 	if (!state)
@@ -12957,24 +13175,21 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
 
 		encoder = to_intel_encoder(connector_state->best_encoder);
 
-		WARN_ON(!connector_state->crtc);
+		drm_WARN_ON(dev, !connector_state->crtc);
 
 		switch (encoder->type) {
-			unsigned int port_mask;
 		case INTEL_OUTPUT_DDI:
-			if (WARN_ON(!HAS_DDI(to_i915(dev))))
+			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
 				break;
 			/* else, fall through */
 		case INTEL_OUTPUT_DP:
 		case INTEL_OUTPUT_HDMI:
 		case INTEL_OUTPUT_EDP:
-			port_mask = 1 << encoder->port;
-
 			/* the same port mustn't appear more than once */
-			if (used_ports & port_mask)
+			if (used_ports & BIT(encoder->port))
 				ret = false;
 
-			used_ports |= port_mask;
+			used_ports |= BIT(encoder->port);
 			break;
 		case INTEL_OUTPUT_DP_MST:
 			used_mst_ports |=
@@ -13055,15 +13270,6 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
 	if (IS_G4X(dev_priv) ||
 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		saved_state->wm = crtc_state->wm;
-	/*
-	 * Save the slave bitmask which gets filled for master crtc state during
-	 * slave atomic check call. For all other CRTCs reset the port sync variables
-	 * crtc_state->master_transcoder needs to be set to INVALID
-	 */
-	reset_port_sync_mode_state(saved_state);
-	if (intel_atomic_is_master_connector(crtc_state))
-		saved_state->sync_mode_slaves_mask =
-			crtc_state->sync_mode_slaves_mask;
 
 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
 	kfree(saved_state);
@@ -13078,11 +13284,10 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 {
 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
 	struct drm_atomic_state *state = pipe_config->uapi.state;
-	struct intel_encoder *encoder;
+	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
-	int base_bpp, ret;
-	int i, tile_group_id = -1, num_tiled_conns = 0;
+	int base_bpp, ret, i;
 	bool retry = true;
 
 	pipe_config->cpu_transcoder =
@@ -13121,13 +13326,15 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 			       &pipe_config->pipe_src_h);
 
 	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(connector_state->best_encoder);
+
 		if (connector_state->crtc != crtc)
 			continue;
 
-		encoder = to_intel_encoder(connector_state->best_encoder);
-
 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
-			DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+			drm_dbg_kms(&i915->drm,
+				    "rejecting invalid cloning configuration\n");
 			return -EINVAL;
 		}
 
@@ -13152,47 +13359,24 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
 			      CRTC_STEREO_DOUBLE);
 
-	/* Get tile_group_id of tiled connector */
-	for_each_new_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc == crtc &&
-		    connector->has_tile) {
-			tile_group_id = connector->tile_group->id;
-			break;
-		}
-	}
-
-	/* Get total number of tiled connectors in state that belong to
-	 * this tile group.
-	 */
-	for_each_new_connector_in_state(state, connector, connector_state, i) {
-		if (connector->has_tile &&
-		    connector->tile_group->id == tile_group_id)
-			num_tiled_conns++;
-	}
-
 	/* Pass our mode to the connectors and the CRTC to give them a chance to
 	 * adjust it according to limitations or connector properties, and also
 	 * a chance to reject the mode entirely.
 	 */
 	for_each_new_connector_in_state(state, connector, connector_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(connector_state->best_encoder);
+
 		if (connector_state->crtc != crtc)
 			continue;
 
-		ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
-						       num_tiled_conns);
-		if (ret) {
-			DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
-				      ret);
-			return ret;
-		}
-
-		encoder = to_intel_encoder(connector_state->best_encoder);
 		ret = encoder->compute_config(encoder, pipe_config,
 					      connector_state);
 		if (ret < 0) {
 			if (ret != -EDEADLK)
-				DRM_DEBUG_KMS("Encoder config failure: %d\n",
-					      ret);
+				drm_dbg_kms(&i915->drm,
+					    "Encoder config failure: %d\n",
+					    ret);
 			return ret;
 		}
 	}
@@ -13207,15 +13391,16 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	if (ret == -EDEADLK)
 		return ret;
 	if (ret < 0) {
-		DRM_DEBUG_KMS("CRTC fixup failed\n");
+		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
 		return ret;
 	}
 
 	if (ret == RETRY) {
-		if (WARN(!retry, "loop in pipe configuration computation\n"))
+		if (drm_WARN(&i915->drm, !retry,
+			     "loop in pipe configuration computation\n"))
 			return -EINVAL;
 
-		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
 		retry = false;
 		goto encoder_retry;
 	}
@@ -13226,8 +13411,9 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	 */
 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
 		!pipe_config->dither_force_disable;
-	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
-		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+	drm_dbg_kms(&i915->drm,
+		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
 	/*
 	 * Make drm_calc_timestamping_constants in
@@ -13238,6 +13424,35 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
 	return 0;
 }
 
+static int
+intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
+{
+	struct intel_atomic_state *state =
+		to_intel_atomic_state(crtc_state->uapi.state);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_connector_state *conn_state;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_new_connector_in_state(&state->base, connector,
+					conn_state, i) {
+		struct intel_encoder *encoder =
+			to_intel_encoder(conn_state->best_encoder);
+		int ret;
+
+		if (conn_state->crtc != &crtc->base ||
+		    !encoder->compute_config_late)
+			continue;
+
+		ret = encoder->compute_config_late(encoder, crtc_state,
+						   conn_state);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
 	int diff;
@@ -13316,16 +13531,17 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
 		if (!drm_debug_enabled(DRM_UT_KMS))
 			return;
 
-		DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
-		DRM_DEBUG_KMS("expected:\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "fastset mismatch in %s infoframe\n", name);
+		drm_dbg_kms(&dev_priv->drm, "expected:\n");
 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
-		DRM_DEBUG_KMS("found:\n");
+		drm_dbg_kms(&dev_priv->drm, "found:\n");
 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
 	} else {
-		DRM_ERROR("mismatch in %s infoframe\n", name);
-		DRM_ERROR("expected:\n");
+		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
+		drm_err(&dev_priv->drm, "expected:\n");
 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
-		DRM_ERROR("found:\n");
+		drm_err(&dev_priv->drm, "found:\n");
 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
 	}
 }
@@ -13334,6 +13550,7 @@ static void __printf(4, 5)
 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
 		     const char *name, const char *format, ...)
 {
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 	struct va_format vaf;
 	va_list args;
 
@@ -13342,11 +13559,12 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
 	vaf.va = &args;
 
 	if (fastset)
-		DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
-			      crtc->base.base.id, crtc->base.name, name, &vaf);
+		drm_dbg_kms(&i915->drm,
+			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+			    crtc->base.base.id, crtc->base.name, name, &vaf);
 	else
-		DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
-			  crtc->base.base.id, crtc->base.name, name, &vaf);
+		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
+			crtc->base.base.id, crtc->base.name, name, &vaf);
 
 	va_end(args);
 }
@@ -13382,7 +13600,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 		!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
 
 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
-		DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "initial modeset and fastboot not set\n");
 		ret = false;
 	}
 
@@ -13584,7 +13803,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
 	PIPE_CONF_CHECK_I(pixel_multiplier);
 	PIPE_CONF_CHECK_I(output_format);
-	PIPE_CONF_CHECK_I(dc3co_exitline);
 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
 	if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -13644,10 +13862,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 		PIPE_CONF_CHECK_BOOL(gamma_enable);
 		PIPE_CONF_CHECK_BOOL(csc_enable);
 
+		PIPE_CONF_CHECK_I(linetime);
+		PIPE_CONF_CHECK_I(ips_linetime);
+
 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
 		if (bp_gamma)
 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
-
 	}
 
 	PIPE_CONF_CHECK_BOOL(double_wide);
@@ -13703,7 +13923,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
 	PIPE_CONF_CHECK_INFOFRAME(drm);
 
-	PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
 	PIPE_CONF_CHECK_I(master_transcoder);
 
 	PIPE_CONF_CHECK_I(dsc.compression_enable);
@@ -13737,9 +13957,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
 		 * FDI already provided one idea for the dotclock.
 		 * Yell if the encoder disagrees.
 		 */
-		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
-		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
-		     fdi_dotclock, dotclock);
+		drm_WARN(&dev_priv->drm,
+			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+			 fdi_dotclock, dotclock);
 	}
 }
 
@@ -13750,12 +13971,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
 	struct skl_hw_state {
 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
-		struct skl_ddb_allocation ddb;
 		struct skl_pipe_wm wm;
 	} *hw;
-	struct skl_ddb_allocation *sw_ddb;
 	struct skl_pipe_wm *sw_wm;
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+	u8 hw_enabled_slices;
 	const enum pipe pipe = crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
 
@@ -13771,14 +13991,14 @@ static void verify_wm_state(struct intel_crtc *crtc,
 
 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
 
-	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
-	sw_ddb = &dev_priv->wm.skl_hw.ddb;
+	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 11 &&
-	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
-		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
-			  sw_ddb->enabled_slices,
-			  hw->ddb.enabled_slices);
+	    hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+		drm_err(&dev_priv->drm,
+			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+			dev_priv->enabled_dbuf_slices_mask,
+			hw_enabled_slices);
 
 	/* planes */
 	for_each_universal_plane(dev_priv, pipe, plane) {
@@ -13793,26 +14013,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
 						&sw_plane_wm->wm[level]))
 				continue;
 
-			DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-				  pipe_name(pipe), plane + 1, level,
-				  sw_plane_wm->wm[level].plane_en,
-				  sw_plane_wm->wm[level].plane_res_b,
-				  sw_plane_wm->wm[level].plane_res_l,
-				  hw_plane_wm->wm[level].plane_en,
-				  hw_plane_wm->wm[level].plane_res_b,
-				  hw_plane_wm->wm[level].plane_res_l);
+			drm_err(&dev_priv->drm,
+				"mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				pipe_name(pipe), plane + 1, level,
+				sw_plane_wm->wm[level].plane_en,
+				sw_plane_wm->wm[level].plane_res_b,
+				sw_plane_wm->wm[level].plane_res_l,
+				hw_plane_wm->wm[level].plane_en,
+				hw_plane_wm->wm[level].plane_res_b,
+				hw_plane_wm->wm[level].plane_res_l);
 		}
 
 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
 					 &sw_plane_wm->trans_wm)) {
-			DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-				  pipe_name(pipe), plane + 1,
-				  sw_plane_wm->trans_wm.plane_en,
-				  sw_plane_wm->trans_wm.plane_res_b,
-				  sw_plane_wm->trans_wm.plane_res_l,
-				  hw_plane_wm->trans_wm.plane_en,
-				  hw_plane_wm->trans_wm.plane_res_b,
-				  hw_plane_wm->trans_wm.plane_res_l);
+			drm_err(&dev_priv->drm,
+				"mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				pipe_name(pipe), plane + 1,
+				sw_plane_wm->trans_wm.plane_en,
+				sw_plane_wm->trans_wm.plane_res_b,
+				sw_plane_wm->trans_wm.plane_res_l,
+				hw_plane_wm->trans_wm.plane_en,
+				hw_plane_wm->trans_wm.plane_res_b,
+				hw_plane_wm->trans_wm.plane_res_l);
 		}
 
 		/* DDB */
@@ -13820,10 +14042,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
-			DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
-				  pipe_name(pipe), plane + 1,
-				  sw_ddb_entry->start, sw_ddb_entry->end,
-				  hw_ddb_entry->start, hw_ddb_entry->end);
+			drm_err(&dev_priv->drm,
+				"mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+				pipe_name(pipe), plane + 1,
+				sw_ddb_entry->start, sw_ddb_entry->end,
+				hw_ddb_entry->start, hw_ddb_entry->end);
 		}
 	}
 
@@ -13845,26 +14068,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
 						&sw_plane_wm->wm[level]))
 				continue;
 
-			DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-				  pipe_name(pipe), level,
-				  sw_plane_wm->wm[level].plane_en,
-				  sw_plane_wm->wm[level].plane_res_b,
-				  sw_plane_wm->wm[level].plane_res_l,
-				  hw_plane_wm->wm[level].plane_en,
-				  hw_plane_wm->wm[level].plane_res_b,
-				  hw_plane_wm->wm[level].plane_res_l);
+			drm_err(&dev_priv->drm,
+				"mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				pipe_name(pipe), level,
+				sw_plane_wm->wm[level].plane_en,
+				sw_plane_wm->wm[level].plane_res_b,
+				sw_plane_wm->wm[level].plane_res_l,
+				hw_plane_wm->wm[level].plane_en,
+				hw_plane_wm->wm[level].plane_res_b,
+				hw_plane_wm->wm[level].plane_res_l);
 		}
 
 		if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
 					 &sw_plane_wm->trans_wm)) {
-			DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
-				  pipe_name(pipe),
-				  sw_plane_wm->trans_wm.plane_en,
-				  sw_plane_wm->trans_wm.plane_res_b,
-				  sw_plane_wm->trans_wm.plane_res_l,
-				  hw_plane_wm->trans_wm.plane_en,
-				  hw_plane_wm->trans_wm.plane_res_b,
-				  hw_plane_wm->trans_wm.plane_res_l);
+			drm_err(&dev_priv->drm,
+				"mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+				pipe_name(pipe),
+				sw_plane_wm->trans_wm.plane_en,
+				sw_plane_wm->trans_wm.plane_res_b,
+				sw_plane_wm->trans_wm.plane_res_l,
+				hw_plane_wm->trans_wm.plane_en,
+				hw_plane_wm->trans_wm.plane_res_b,
+				hw_plane_wm->trans_wm.plane_res_l);
 		}
 
 		/* DDB */
@@ -13872,10 +14097,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
 
 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
-			DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
-				  pipe_name(pipe),
-				  sw_ddb_entry->start, sw_ddb_entry->end,
-				  hw_ddb_entry->start, hw_ddb_entry->end);
+			drm_err(&dev_priv->drm,
+				"mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+				pipe_name(pipe),
+				sw_ddb_entry->start, sw_ddb_entry->end,
+				hw_ddb_entry->start, hw_ddb_entry->end);
 		}
 	}
 
@@ -13919,9 +14145,9 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat
 		bool enabled = false, found = false;
 		enum pipe pipe;
 
-		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
-			      encoder->base.base.id,
-			      encoder->base.name);
+		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+			    encoder->base.base.id,
+			    encoder->base.name);
 
 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
 						   new_conn_state, i) {
@@ -13973,7 +14199,8 @@ verify_crtc_state(struct intel_crtc *crtc,
 	intel_crtc_state_reset(old_crtc_state, crtc);
 	old_crtc_state->uapi.state = state;
 
-	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
+	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+		    crtc->base.name);
 
 	active = dev_priv->display.get_pipe_config(crtc, pipe_config);
 
@@ -14048,7 +14275,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 
 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
 
-	DRM_DEBUG_KMS("%s\n", pll->info->name);
+	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
 
 	active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
@@ -14075,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
 	if (new_crtc_state->hw.active)
 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
-				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+				pipe_name(crtc->pipe), pll->active_mask);
 	else
 		I915_STATE_WARN(pll->active_mask & crtc_mask,
 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
-				pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+				pipe_name(crtc->pipe), pll->active_mask);
 
 	I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -14108,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
 
 		I915_STATE_WARN(pll->active_mask & crtc_mask,
 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
-				pipe_name(drm_crtc_index(&crtc->base)));
+				pipe_name(crtc->pipe));
 		I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
-				pipe_name(drm_crtc_index(&crtc->base)));
+				pipe_name(crtc->pipe));
 	}
 }
 
@@ -14135,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
 {
 	int i;
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++)
-		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
+		verify_single_dpll_state(dev_priv,
+					 &dev_priv->dpll.shared_dplls[i],
+					 NULL, NULL);
 }
 
 static void
@@ -14280,35 +14509,35 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
 	return 0;
 }
 
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+			   u8 active_pipes)
+{
+	const struct intel_crtc_state *crtc_state;
+	struct intel_crtc *crtc;
+	int i;
+
+	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+		if (crtc_state->hw.active)
+			active_pipes |= BIT(crtc->pipe);
+		else
+			active_pipes &= ~BIT(crtc->pipe);
+	}
+
+	return active_pipes;
+}
+
 static int intel_modeset_checks(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-	struct intel_crtc *crtc;
-	int ret, i;
-
-	/* keep the current setting */
-	if (!state->cdclk.force_min_cdclk_changed)
-		state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+	int ret;
 
 	state->modeset = true;
-	state->active_pipes = dev_priv->active_pipes;
-	state->cdclk.logical = dev_priv->cdclk.logical;
-	state->cdclk.actual = dev_priv->cdclk.actual;
+	state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
 
-	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
-					    new_crtc_state, i) {
-		if (new_crtc_state->hw.active)
-			state->active_pipes |= BIT(crtc->pipe);
-		else
-			state->active_pipes &= ~BIT(crtc->pipe);
-
-		if (old_crtc_state->hw.active != new_crtc_state->hw.active)
-			state->active_pipe_changes |= BIT(crtc->pipe);
-	}
+	state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
 
 	if (state->active_pipe_changes) {
-		ret = intel_atomic_lock_global_state(state);
+		ret = _intel_atomic_lock_global_state(state);
 		if (ret)
 			return ret;
 	}
@@ -14399,7 +14628,7 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
 }
 
 static int intel_atomic_check_planes(struct intel_atomic_state *state,
-				     bool *need_modeset)
+				     bool *need_cdclk_calc)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -14415,8 +14644,9 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
 		ret = intel_plane_atomic_check(state, plane);
 		if (ret) {
-			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
-					 plane->base.base.id, plane->base.name);
+			drm_dbg_atomic(&dev_priv->drm,
+				       "[PLANE:%d:%s] atomic driver check failed\n",
+				       plane->base.base.id, plane->base.name);
 			return ret;
 		}
 	}
@@ -14453,8 +14683,11 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
 	 * affected planes are part of the state. We can now
 	 * compute the minimum cdclk for each plane.
 	 */
-	for_each_new_intel_plane_in_state(state, plane, plane_state, i)
-		*need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
@@ -14467,9 +14700,11 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
 
 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
 		int ret = intel_crtc_atomic_check(state, crtc);
+		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 		if (ret) {
-			DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
-					 crtc->base.base.id, crtc->base.name);
+			drm_dbg_atomic(&i915->drm,
+				       "[CRTC:%d:%s] atomic driver check failed\n",
+				       crtc->base.base.id, crtc->base.name);
 			return ret;
 		}
 	}
@@ -14494,76 +14729,6 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
 	return false;
 }
 
-static int
-intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	int ret = 0;
-
-	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		struct drm_connector_state *conn_state;
-		struct drm_crtc_state *crtc_state;
-
-		if (!connector->has_tile ||
-		    connector->tile_group->id != tile_grp_id)
-			continue;
-		conn_state = drm_atomic_get_connector_state(&state->base,
-							    connector);
-		if (IS_ERR(conn_state)) {
-			ret =  PTR_ERR(conn_state);
-			break;
-		}
-
-		if (!conn_state->crtc)
-			continue;
-
-		crtc_state = drm_atomic_get_crtc_state(&state->base,
-						       conn_state->crtc);
-		if (IS_ERR(crtc_state)) {
-			ret = PTR_ERR(crtc_state);
-			break;
-		}
-		crtc_state->mode_changed = true;
-		ret = drm_atomic_add_affected_connectors(&state->base,
-							 conn_state->crtc);
-		if (ret)
-			break;
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	return ret;
-}
-
-static int
-intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
-{
-	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct drm_connector *connector;
-	struct drm_connector_state *old_conn_state, *new_conn_state;
-	int i, ret;
-
-	if (INTEL_GEN(dev_priv) < 11)
-		return 0;
-
-	/* Is tiled, mark all other tiled CRTCs as needing a modeset */
-	for_each_oldnew_connector_in_state(&state->base, connector,
-					   old_conn_state, new_conn_state, i) {
-		if (!connector->has_tile)
-			continue;
-		if (!intel_connector_needs_modeset(state, connector))
-			continue;
-
-		ret = intel_modeset_all_tiles(state, connector->tile_group->id);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
 /**
  * intel_atomic_check - validate state object
  * @dev: drm device
@@ -14575,6 +14740,7 @@ static int intel_atomic_check(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+	struct intel_cdclk_state *new_cdclk_state;
 	struct intel_crtc *crtc;
 	int ret, i;
 	bool any_ms = false;
@@ -14582,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev,
 	/* Catch I915_MODE_FLAG_INHERITED */
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		if (new_crtc_state->hw.mode.private_flags !=
-		    old_crtc_state->hw.mode.private_flags)
+		if (new_crtc_state->uapi.mode.private_flags !=
+		    old_crtc_state->uapi.mode.private_flags)
 			new_crtc_state->uapi.mode_changed = true;
 	}
 
@@ -14591,21 +14757,6 @@ static int intel_atomic_check(struct drm_device *dev,
 	if (ret)
 		goto fail;
 
-	/**
-	 * This check adds all the connectors in current state that belong to
-	 * the same tile group to a full modeset.
-	 * This function directly sets the mode_changed to true and we also call
-	 * drm_atomic_add_affected_connectors(). Hence we are not explicitly
-	 * calling drm_atomic_helper_check_modeset() after this.
-	 *
-	 * Fixme: Handle some corner cases where one of the
-	 * tiled connectors gets disconnected and tile info is lost but since it
-	 * was previously synced to other conn, we need to add that to the modeset.
-	 */
-	ret = intel_atomic_check_tiled_conns(state);
-	if (ret)
-		goto fail;
-
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
 		if (!needs_modeset(new_crtc_state)) {
@@ -14615,18 +14766,26 @@ static int intel_atomic_check(struct drm_device *dev,
 			continue;
 		}
 
-		if (!new_crtc_state->uapi.enable) {
-			intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
-			continue;
-		}
-
 		ret = intel_crtc_prepare_cleared_state(new_crtc_state);
 		if (ret)
 			goto fail;
 
+		if (!new_crtc_state->hw.enable)
+			continue;
+
 		ret = intel_modeset_pipe_config(new_crtc_state);
 		if (ret)
 			goto fail;
+	}
+
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		if (!needs_modeset(new_crtc_state))
+			continue;
+
+		ret = intel_modeset_pipe_config_late(new_crtc_state);
+		if (ret)
+			goto fail;
 
 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
 	}
@@ -14656,8 +14815,10 @@ static int intel_atomic_check(struct drm_device *dev,
 		}
 
 		if (is_trans_port_sync_mode(new_crtc_state)) {
-			u8 trans = new_crtc_state->sync_mode_slaves_mask |
-				   BIT(new_crtc_state->master_transcoder);
+			u8 trans = new_crtc_state->sync_mode_slaves_mask;
+
+			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
+				trans |= BIT(new_crtc_state->master_transcoder);
 
 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
 				new_crtc_state->uapi.mode_changed = true;
@@ -14680,7 +14841,8 @@ static int intel_atomic_check(struct drm_device *dev,
 	}
 
 	if (any_ms && !check_digital_port_conflicts(state)) {
-		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "rejecting conflicting digital port configuration\n");
 		ret = EINVAL;
 		goto fail;
 	}
@@ -14689,18 +14851,32 @@ static int intel_atomic_check(struct drm_device *dev,
 	if (ret)
 		goto fail;
 
-	any_ms |= state->cdclk.force_min_cdclk_changed;
-
 	ret = intel_atomic_check_planes(state, &any_ms);
 	if (ret)
 		goto fail;
 
+	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+	if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+		any_ms = true;
+
+	/*
+	 * distrust_bios_wm will force a full dbuf recomputation
+	 * but the hardware state will only get updated accordingly
+	 * if state->modeset==true. Hence distrust_bios_wm==true &&
+	 * state->modeset==false is an invalid combination which
+	 * would cause the hardware and software dbuf state to get
+	 * out of sync. We must prevent that.
+	 *
+	 * FIXME clean up this mess and introduce better
+	 * state tracking for dbuf.
+	 */
+	if (dev_priv->wm.distrust_bios_wm)
+		any_ms = true;
+
 	if (any_ms) {
 		ret = intel_modeset_checks(state);
 		if (ret)
 			goto fail;
-	} else {
-		state->cdclk.logical = dev_priv->cdclk.logical;
 	}
 
 	ret = intel_atomic_check_crtcs(state);
@@ -14806,6 +14982,18 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
 			ilk_pfit_disable(old_crtc_state);
 	}
 
+	/*
+	 * The register is supposedly single buffered so perhaps
+	 * not 100% correct to do this here. But SKL+ calculate
+	 * this based on the adjust pixel rate so pfit changes do
+	 * affect it and so it must be updated for fastsets.
+	 * HSW/BDW only really need this here for fastboot, after
+	 * that the value should not change without a full modeset.
+	 */
+	if (INTEL_GEN(dev_priv) >= 9 ||
+	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+		hsw_set_linetime_wm(new_crtc_state);
+
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_set_pipe_chicken(crtc);
 }
@@ -14848,9 +15036,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	bool modeset = needs_modeset(new_crtc_state);
-	struct intel_plane_state *new_plane_state =
-		intel_atomic_get_new_plane_state(state,
-						 to_intel_plane(crtc->base.primary));
 
 	if (modeset) {
 		intel_crtc_update_active_timings(new_crtc_state);
@@ -14873,8 +15058,8 @@ static void intel_update_crtc(struct intel_crtc *crtc,
 
 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
 		intel_fbc_disable(crtc);
-	else if (new_plane_state)
-		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+	else
+		intel_fbc_enable(state, crtc);
 
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(new_crtc_state);
@@ -14904,7 +15089,8 @@ static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *ne
 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
 	enum transcoder slave_transcoder;
 
-	WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+	drm_WARN_ON(&dev_priv->drm,
+		    !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
 
 	slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
 	return intel_get_crtc_for_pipe(dev_priv,
@@ -15021,7 +15207,7 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
 		if (conn_state->crtc == &crtc->base)
 			break;
 	}
-	intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
+	intel_dp = intel_attached_dp(to_intel_connector(conn));
 	intel_dp_stop_link_train(intel_dp);
 }
 
@@ -15036,15 +15222,12 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
 		intel_atomic_get_new_crtc_state(state, crtc);
 	struct intel_crtc_state *old_crtc_state =
 		intel_atomic_get_old_crtc_state(state, crtc);
-	struct intel_plane_state *new_plane_state =
-		intel_atomic_get_new_plane_state(state,
-						 to_intel_plane(crtc->base.primary));
 	bool modeset = needs_modeset(new_crtc_state);
 
 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
 		intel_fbc_disable(crtc);
-	else if (new_plane_state)
-		intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+	else
+		intel_fbc_enable(state, crtc);
 
 	/* Perform vblank evasion around commit operation */
 	intel_pipe_update_start(new_crtc_state);
@@ -15068,18 +15251,20 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
 					       struct intel_crtc_state *old_crtc_state,
 					       struct intel_crtc_state *new_crtc_state)
 {
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 	struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
 	struct intel_crtc_state *new_slave_crtc_state =
 		intel_atomic_get_new_crtc_state(state, slave_crtc);
 	struct intel_crtc_state *old_slave_crtc_state =
 		intel_atomic_get_old_crtc_state(state, slave_crtc);
 
-	WARN_ON(!slave_crtc || !new_slave_crtc_state ||
-		!old_slave_crtc_state);
+	drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
+		    !old_slave_crtc_state);
 
-	DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
-		      crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
-		      slave_crtc->base.name);
+	drm_dbg_kms(&i915->drm,
+		    "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+		    crtc->base.base.id, crtc->base.name,
+		    slave_crtc->base.base.id, slave_crtc->base.name);
 
 	/* Enable seq for slave with with DP_TP_CTL left Idle until the
 	 * master is ready
@@ -15109,35 +15294,53 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
 				       state);
 }
 
+static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+	u8 required_slices = state->enabled_dbuf_slices_mask;
+	u8 slices_union = hw_enabled_slices | required_slices;
+
+	/* If 2nd DBuf slice required, enable it here */
+	if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, slices_union);
+}
+
+static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+	u8 required_slices = state->enabled_dbuf_slices_mask;
+
+	/* If 2nd DBuf slice is no more required disable it */
+	if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-	u8 required_slices = state->wm_results.ddb.enabled_slices;
 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
-	const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
 	u8 update_pipes = 0, modeset_pipes = 0;
 	int i;
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		enum pipe pipe = crtc->pipe;
+
 		if (!new_crtc_state->hw.active)
 			continue;
 
 		/* ignore allocations for crtc's that have been turned off. */
 		if (!needs_modeset(new_crtc_state)) {
-			entries[i] = old_crtc_state->wm.skl.ddb;
-			update_pipes |= BIT(crtc->pipe);
+			entries[pipe] = old_crtc_state->wm.skl.ddb;
+			update_pipes |= BIT(pipe);
 		} else {
-			modeset_pipes |= BIT(crtc->pipe);
+			modeset_pipes |= BIT(pipe);
 		}
 	}
 
-	/* If 2nd DBuf slice required, enable it here */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
-
 	/*
 	 * Whenever the number of active pipes changes, we need to make sure we
 	 * update the pipes in the right order so that their ddb allocations
@@ -15156,10 +15359,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 				continue;
 
 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
-							entries, num_pipes, i))
+							entries, I915_MAX_PIPES, pipe))
 				continue;
 
-			entries[i] = new_crtc_state->wm.skl.ddb;
+			entries[pipe] = new_crtc_state->wm.skl.ddb;
 			update_pipes &= ~BIT(pipe);
 
 			intel_update_crtc(crtc, state, old_crtc_state,
@@ -15193,10 +15396,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 		    is_trans_port_sync_slave(new_crtc_state))
 			continue;
 
-		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
-						    entries, num_pipes, i));
+		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+									entries, I915_MAX_PIPES, pipe));
 
-		entries[i] = new_crtc_state->wm.skl.ddb;
+		entries[pipe] = new_crtc_state->wm.skl.ddb;
 		modeset_pipes &= ~BIT(pipe);
 
 		if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -15228,20 +15431,17 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 		if ((modeset_pipes & BIT(pipe)) == 0)
 			continue;
 
-		WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
-						    entries, num_pipes, i));
+		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+									entries, I915_MAX_PIPES, pipe));
 
-		entries[i] = new_crtc_state->wm.skl.ddb;
+		entries[pipe] = new_crtc_state->wm.skl.ddb;
 		modeset_pipes &= ~BIT(pipe);
 
 		intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
 	}
 
-	WARN_ON(modeset_pipes);
+	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
 
-	/* If 2nd DBuf slice is no more required disable it */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
 }
 
 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15338,10 +15538,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	if (state->modeset) {
 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
 
-		intel_set_cdclk_pre_plane_update(dev_priv,
-						 &state->cdclk.actual,
-						 &dev_priv->cdclk.actual,
-						 state->cdclk.pipe);
+		intel_set_cdclk_pre_plane_update(state);
 
 		/*
 		 * SKL workaround: bspec recommends we disable the SAGV when we
@@ -15371,16 +15568,17 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	if (state->modeset)
 		intel_encoders_update_prepare(state);
 
+	/* Enable all new slices, we might need */
+	if (state->modeset)
+		icl_dbuf_slice_pre_update(state);
+
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	dev_priv->display.commit_modeset_enables(state);
 
 	if (state->modeset) {
 		intel_encoders_update_complete(state);
 
-		intel_set_cdclk_post_plane_update(dev_priv,
-						  &state->cdclk.actual,
-						  &dev_priv->cdclk.actual,
-						  state->cdclk.pipe);
+		intel_set_cdclk_post_plane_update(state);
 	}
 
 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -15427,6 +15625,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 			dev_priv->display.optimize_watermarks(state, crtc);
 	}
 
+	/* Disable all slices, we don't need */
+	if (state->modeset)
+		icl_dbuf_slice_post_update(state);
+
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		intel_post_plane_update(state, crtc);
 
@@ -15570,7 +15772,8 @@ static int intel_atomic_commit(struct drm_device *dev,
 
 	ret = intel_atomic_prepare_commit(state);
 	if (ret) {
-		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+		drm_dbg_atomic(&dev_priv->drm,
+			       "Preparing state failed with %i\n", ret);
 		i915_sw_fence_commit(&state->commit_ready);
 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
 		return ret;
@@ -15579,6 +15782,8 @@ static int intel_atomic_commit(struct drm_device *dev,
 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
 	if (!ret)
 		ret = drm_atomic_helper_swap_state(&state->base, true);
+	if (!ret)
+		intel_atomic_swap_global_state(state);
 
 	if (ret) {
 		i915_sw_fence_commit(&state->commit_ready);
@@ -15594,14 +15799,7 @@ static int intel_atomic_commit(struct drm_device *dev,
 	if (state->global_state_changed) {
 		assert_global_state_locked(dev_priv);
 
-		memcpy(dev_priv->min_cdclk, state->min_cdclk,
-		       sizeof(state->min_cdclk));
-		memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
-		       sizeof(state->min_voltage_level));
 		dev_priv->active_pipes = state->active_pipes;
-		dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
-
-		intel_cdclk_swap_state(state);
 	}
 
 	drm_atomic_state_get(&state->base);
@@ -15729,7 +15927,7 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
 
 /**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @plane: drm plane to prepare for
+ * @_plane: drm plane to prepare for
  * @_new_plane_state: the plane state being prepared
  *
  * Prepares a framebuffer for usage on a display plane.  Generally this
@@ -15740,23 +15938,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
  * Returns 0 on success, negative error code on failure.
  */
 int
-intel_prepare_plane_fb(struct drm_plane *plane,
+intel_prepare_plane_fb(struct drm_plane *_plane,
 		       struct drm_plane_state *_new_plane_state)
 {
+	struct intel_plane *plane = to_intel_plane(_plane);
 	struct intel_plane_state *new_plane_state =
 		to_intel_plane_state(_new_plane_state);
-	struct intel_atomic_state *intel_state =
+	struct intel_atomic_state *state =
 		to_intel_atomic_state(new_plane_state->uapi.state);
-	struct drm_i915_private *dev_priv = to_i915(plane->dev);
-	struct drm_framebuffer *fb = new_plane_state->hw.fb;
-	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	const struct intel_plane_state *old_plane_state =
+		intel_atomic_get_old_plane_state(state, plane);
+	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
 	int ret;
 
 	if (old_obj) {
-		struct intel_crtc_state *crtc_state =
-			intel_atomic_get_new_crtc_state(intel_state,
-							to_intel_crtc(plane->state->crtc));
+		const struct intel_crtc_state *crtc_state =
+			intel_atomic_get_new_crtc_state(state,
+							to_intel_crtc(old_plane_state->hw.crtc));
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -15770,7 +15970,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 		 * can safely continue.
 		 */
 		if (needs_modeset(crtc_state)) {
-			ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+			ret = i915_sw_fence_await_reservation(&state->commit_ready,
 							      old_obj->base.resv, NULL,
 							      false, 0,
 							      GFP_KERNEL);
@@ -15780,7 +15980,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	}
 
 	if (new_plane_state->uapi.fence) { /* explicit fencing */
-		ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
 						    new_plane_state->uapi.fence,
 						    I915_FENCE_TIMEOUT,
 						    GFP_KERNEL);
@@ -15807,12 +16007,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
 		struct dma_fence *fence;
 
-		ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+		ret = i915_sw_fence_await_reservation(&state->commit_ready,
 						      obj->base.resv, NULL,
 						      false, I915_FENCE_TIMEOUT,
 						      GFP_KERNEL);
 		if (ret < 0)
-			return ret;
+			goto unpin_fb;
 
 		fence = dma_resv_get_excl_rcu(obj->base.resv);
 		if (fence) {
@@ -15833,12 +16033,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	 * that are not quite steady state without resorting to forcing
 	 * maximum clocks following a vblank miss (see do_rps_boost()).
 	 */
-	if (!intel_state->rps_interactive) {
+	if (!state->rps_interactive) {
 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
-		intel_state->rps_interactive = true;
+		state->rps_interactive = true;
 	}
 
 	return 0;
+
+unpin_fb:
+	intel_plane_unpin_fb(new_plane_state);
+
+	return ret;
 }
 
 /**
@@ -15854,13 +16059,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
 {
 	struct intel_plane_state *old_plane_state =
 		to_intel_plane_state(_old_plane_state);
-	struct intel_atomic_state *intel_state =
+	struct intel_atomic_state *state =
 		to_intel_atomic_state(old_plane_state->uapi.state);
 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
+	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
 
-	if (intel_state->rps_interactive) {
+	if (!obj)
+		return;
+
+	if (state->rps_interactive) {
 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
-		intel_state->rps_interactive = false;
+		state->rps_interactive = false;
 	}
 
 	/* Should only be called after a successful intel_prepare_plane_fb()! */
@@ -16029,6 +16238,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 	new_plane_state->uapi.crtc_w = crtc_w;
 	new_plane_state->uapi.crtc_h = crtc_h;
 
+	intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+
 	ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
 						  old_plane_state, new_plane_state);
 	if (ret)
@@ -16113,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 	struct intel_plane *plane;
 	const struct drm_plane_funcs *plane_funcs;
 	unsigned int supported_rotations;
-	unsigned int possible_crtcs;
 	const u32 *formats;
 	int num_formats;
 	int ret, zpos;
@@ -16194,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 	plane->get_hw_state = i9xx_plane_get_hw_state;
 	plane->check_plane = i9xx_plane_check;
 
-	possible_crtcs = BIT(pipe);
-
 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-					       possible_crtcs, plane_funcs,
+					       0, plane_funcs,
 					       formats, num_formats,
 					       i9xx_format_modifiers,
 					       DRM_PLANE_TYPE_PRIMARY,
 					       "primary %c", pipe_name(pipe));
 	else
 		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-					       possible_crtcs, plane_funcs,
+					       0, plane_funcs,
 					       formats, num_formats,
 					       i9xx_format_modifiers,
 					       DRM_PLANE_TYPE_PRIMARY,
@@ -16247,7 +16455,6 @@ static struct intel_plane *
 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
 			  enum pipe pipe)
 {
-	unsigned int possible_crtcs;
 	struct intel_plane *cursor;
 	int ret, zpos;
 
@@ -16280,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
 	if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
 		cursor->cursor.size = ~0;
 
-	possible_crtcs = BIT(pipe);
-
 	ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
-				       possible_crtcs, &intel_cursor_plane_funcs,
+				       0, &intel_cursor_plane_funcs,
 				       intel_cursor_formats,
 				       ARRAY_SIZE(intel_cursor_formats),
 				       cursor_format_modifiers,
@@ -16328,6 +16533,7 @@ static const struct drm_crtc_funcs bdw_crtc_funcs = {
 	.get_vblank_counter = g4x_get_vblank_counter,
 	.enable_vblank = bdw_enable_vblank,
 	.disable_vblank = bdw_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_funcs ilk_crtc_funcs = {
@@ -16336,6 +16542,7 @@ static const struct drm_crtc_funcs ilk_crtc_funcs = {
 	.get_vblank_counter = g4x_get_vblank_counter,
 	.enable_vblank = ilk_enable_vblank,
 	.disable_vblank = ilk_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_funcs g4x_crtc_funcs = {
@@ -16344,6 +16551,7 @@ static const struct drm_crtc_funcs g4x_crtc_funcs = {
 	.get_vblank_counter = g4x_get_vblank_counter,
 	.enable_vblank = i965_enable_vblank,
 	.disable_vblank = i965_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_funcs i965_crtc_funcs = {
@@ -16352,6 +16560,7 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
 	.get_vblank_counter = i915_get_vblank_counter,
 	.enable_vblank = i965_enable_vblank,
 	.disable_vblank = i965_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
@@ -16360,6 +16569,7 @@ static const struct drm_crtc_funcs i915gm_crtc_funcs = {
 	.get_vblank_counter = i915_get_vblank_counter,
 	.enable_vblank = i915gm_enable_vblank,
 	.disable_vblank = i915gm_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -16368,6 +16578,7 @@ static const struct drm_crtc_funcs i915_crtc_funcs = {
 	.get_vblank_counter = i915_get_vblank_counter,
 	.enable_vblank = i8xx_enable_vblank,
 	.disable_vblank = i8xx_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
@@ -16376,6 +16587,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
 	/* no hw vblank counter */
 	.enable_vblank = i8xx_enable_vblank,
 	.disable_vblank = i8xx_disable_vblank,
+	.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
 };
 
 static struct intel_crtc *intel_crtc_alloc(void)
@@ -16405,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc)
 	kfree(crtc);
 }
 
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+	struct intel_plane *plane;
+
+	for_each_intel_plane(&dev_priv->drm, plane) {
+		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+								  plane->pipe);
+
+		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+	}
+}
+
 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
 	struct intel_plane *primary, *cursor;
@@ -16483,7 +16707,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
 
 	intel_color_init(crtc);
 
-	WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
+	intel_crtc_crc_init(crtc);
+
+	drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
 
 	return 0;
 
@@ -16543,10 +16769,10 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
 	if (!IS_MOBILE(dev_priv))
 		return false;
 
-	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
 		return false;
 
-	if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+	if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
 		return false;
 
 	return true;
@@ -16561,11 +16787,11 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
 		return false;
 
 	if (HAS_PCH_LPT_H(dev_priv) &&
-	    I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
 		return false;
 
 	/* DDI E can't be used if DDI A requires 4 lanes */
-	if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
 		return false;
 
 	if (!dev_priv->vbt.int_crt_support)
@@ -16591,10 +16817,10 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
 		pps_num = 1;
 
 	for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
-		u32 val = I915_READ(PP_CONTROL(pps_idx));
+		u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
 
 		val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
-		I915_WRITE(PP_CONTROL(pps_idx), val);
+		intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
 	}
 }
 
@@ -16674,14 +16900,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 		 * On SKL pre-D0 the strap isn't connected, so we assume
 		 * it's there.
 		 */
-		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
 		/* WaIgnoreDDIAStrap: skl */
 		if (found || IS_GEN9_BC(dev_priv))
 			intel_ddi_init(dev_priv, PORT_A);
 
 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
 		 * register */
-		found = I915_READ(SFUSE_STRAP);
+		found = intel_de_read(dev_priv, SFUSE_STRAP);
 
 		if (found & SFUSE_STRAP_DDIB_DETECTED)
 			intel_ddi_init(dev_priv, PORT_B);
@@ -16714,25 +16940,25 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 		if (ilk_has_edp_a(dev_priv))
 			intel_dp_init(dev_priv, DP_A, PORT_A);
 
-		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
 			/* PCH SDVOB multiplex with HDMIB */
 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
 			if (!found)
 				intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
-			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
 				intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
 		}
 
-		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
 			intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
 
-		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
 			intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
 
-		if (I915_READ(PCH_DP_C) & DP_DETECTED)
+		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
 			intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
 
-		if (I915_READ(PCH_DP_D) & DP_DETECTED)
+		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
 			intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		bool has_edp, has_port;
@@ -16757,16 +16983,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 		 */
 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
-		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
 			has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
-		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
 			intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
 
 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
-		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
 			has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
-		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
 			intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
 
 		if (IS_CHERRYVIEW(dev_priv)) {
@@ -16775,9 +17001,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 			 * so no need to worry about it
 			 */
 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
-			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
 				intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
-			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
 				intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
 		}
 
@@ -16793,11 +17019,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
 		intel_crt_init(dev_priv);
 
-		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
-			DRM_DEBUG_KMS("probing SDVOB\n");
+		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
 			if (!found && IS_G4X(dev_priv)) {
-				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+				drm_dbg_kms(&dev_priv->drm,
+					    "probing HDMI on SDVOB\n");
 				intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
 			}
 
@@ -16807,22 +17034,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 
 		/* Before G4X SDVOC doesn't have its own detect register */
 
-		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
-			DRM_DEBUG_KMS("probing SDVOC\n");
+		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
 		}
 
-		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
 
 			if (IS_G4X(dev_priv)) {
-				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+				drm_dbg_kms(&dev_priv->drm,
+					    "probing HDMI on SDVOC\n");
 				intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
 			}
 			if (IS_G4X(dev_priv))
 				intel_dp_init(dev_priv, DP_C, PORT_C);
 		}
 
-		if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
+		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
 			intel_dp_init(dev_priv, DP_D, PORT_D);
 
 		if (SUPPORTS_TV(dev_priv))
@@ -16864,9 +17092,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
 						unsigned int *handle)
 {
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
 	if (obj->userptr.mm) {
-		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+		drm_dbg(&i915->drm,
+			"attempting to use a userptr for a framebuffer, denied\n");
 		return -EINVAL;
 	}
 
@@ -16920,14 +17150,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		 */
 		if (tiling != I915_TILING_NONE &&
 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
-			DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "tiling_mode doesn't match fb modifier\n");
 			goto err;
 		}
 	} else {
 		if (tiling == I915_TILING_X) {
 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
 		} else if (tiling == I915_TILING_Y) {
-			DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "No Y tiling for legacy addfb\n");
 			goto err;
 		}
 	}
@@ -16937,10 +17169,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 				      mode_cmd->modifier[0])) {
 		struct drm_format_name_buf format_name;
 
-		DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
-			      drm_get_format_name(mode_cmd->pixel_format,
-						  &format_name),
-			      mode_cmd->modifier[0]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "unsupported pixel format %s / modifier 0x%llx\n",
+			    drm_get_format_name(mode_cmd->pixel_format,
+						&format_name),
+			    mode_cmd->modifier[0]);
 		goto err;
 	}
 
@@ -16950,17 +17183,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	 */
 	if (INTEL_GEN(dev_priv) < 4 &&
 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
-		DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "tiling_mode must match fb modifier exactly on gen2/3\n");
 		goto err;
 	}
 
 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
 					 mode_cmd->modifier[0]);
 	if (mode_cmd->pitches[0] > max_stride) {
-		DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
-			      mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
-			      "tiled" : "linear",
-			      mode_cmd->pitches[0], max_stride);
+		drm_dbg_kms(&dev_priv->drm,
+			    "%s pitch (%u) must be at most %d\n",
+			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+			    "tiled" : "linear",
+			    mode_cmd->pitches[0], max_stride);
 		goto err;
 	}
 
@@ -16969,15 +17204,17 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 	 * the fb pitch and fence stride match.
 	 */
 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
-		DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
-			      mode_cmd->pitches[0], stride);
+		drm_dbg_kms(&dev_priv->drm,
+			    "pitch (%d) must match tiling stride (%d)\n",
+			    mode_cmd->pitches[0], stride);
 		goto err;
 	}
 
 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
 	if (mode_cmd->offsets[0] != 0) {
-		DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
-			      mode_cmd->offsets[0]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "plane 0 offset (0x%08x) must be 0\n",
+			    mode_cmd->offsets[0]);
 		goto err;
 	}
 
@@ -16987,14 +17224,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 		u32 stride_alignment;
 
 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
-			DRM_DEBUG_KMS("bad plane %d handle\n", i);
+			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+				    i);
 			goto err;
 		}
 
 		stride_alignment = intel_fb_stride_alignment(fb, i);
 		if (fb->pitches[i] & (stride_alignment - 1)) {
-			DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
-				      i, fb->pitches[i], stride_alignment);
+			drm_dbg_kms(&dev_priv->drm,
+				    "plane %d pitch (%d) must be at least %u byte aligned\n",
+				    i, fb->pitches[i], stride_alignment);
 			goto err;
 		}
 
@@ -17002,9 +17241,10 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
 
 			if (fb->pitches[i] != ccs_aux_stride) {
-				DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
-					      i,
-					      fb->pitches[i], ccs_aux_stride);
+				drm_dbg_kms(&dev_priv->drm,
+					    "ccs aux plane %d pitch (%d) must be %d\n",
+					    i,
+					    fb->pitches[i], ccs_aux_stride);
 				goto err;
 			}
 		}
@@ -17018,7 +17258,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
 
 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
 	if (ret) {
-		DRM_ERROR("framebuffer init failed %d\n", ret);
+		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
 		goto err;
 	}
 
@@ -17048,17 +17288,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
 	return fb;
 }
 
-static void intel_atomic_state_free(struct drm_atomic_state *state)
-{
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
-	drm_atomic_state_default_release(state);
-
-	i915_sw_fence_fini(&intel_state->commit_ready);
-
-	kfree(state);
-}
-
 static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
 		 const struct drm_display_mode *mode)
@@ -17290,9 +17519,36 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
 
 void intel_modeset_init_hw(struct drm_i915_private *i915)
 {
+	struct intel_cdclk_state *cdclk_state =
+		to_intel_cdclk_state(i915->cdclk.obj.state);
+
 	intel_update_cdclk(i915);
-	intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
-	i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
+	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
+	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+}
+
+static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+
+	drm_for_each_crtc(crtc, state->dev) {
+		struct drm_crtc_state *crtc_state;
+
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+	}
+
+	drm_for_each_plane(plane, state->dev) {
+		struct drm_plane_state *plane_state;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state))
+			return PTR_ERR(plane_state);
+	}
+
+	return 0;
 }
 
 /*
@@ -17305,9 +17561,8 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
  * through the atomic check code to calculate new watermark values in the
  * state object.
  */
-static void sanitize_watermarks(struct drm_device *dev)
+static void sanitize_watermarks(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_atomic_state *state;
 	struct intel_atomic_state *intel_state;
 	struct intel_crtc *crtc;
@@ -17320,26 +17575,17 @@ static void sanitize_watermarks(struct drm_device *dev)
 	if (!dev_priv->display.optimize_watermarks)
 		return;
 
-	/*
-	 * We need to hold connection_mutex before calling duplicate_state so
-	 * that the connector loop is protected.
-	 */
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	} else if (WARN_ON(ret)) {
-		goto fail;
-	}
-
-	state = drm_atomic_helper_duplicate_state(dev, &ctx);
-	if (WARN_ON(IS_ERR(state)))
-		goto fail;
+	state = drm_atomic_state_alloc(&dev_priv->drm);
+	if (drm_WARN_ON(&dev_priv->drm, !state))
+		return;
 
 	intel_state = to_intel_atomic_state(state);
 
+	drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+	state->acquire_ctx = &ctx;
+
 	/*
 	 * Hardware readout is the only time we don't want to calculate
 	 * intermediate watermarks (since we don't trust the current
@@ -17348,22 +17594,13 @@ static void sanitize_watermarks(struct drm_device *dev)
 	if (!HAS_GMCH(dev_priv))
 		intel_state->skip_intermediate_wm = true;
 
-	ret = intel_atomic_check(dev, state);
-	if (ret) {
-		/*
-		 * If we fail here, it means that the hardware appears to be
-		 * programmed in a way that shouldn't be possible, given our
-		 * understanding of watermark requirements.  This might mean a
-		 * mistake in the hardware readout code or a mistake in the
-		 * watermark calculations for a given platform.  Raise a WARN
-		 * so that this is noticeable.
-		 *
-		 * If this actually happens, we'll have to just leave the
-		 * BIOS-programmed watermarks untouched and hope for the best.
-		 */
-		WARN(true, "Could not determine valid watermarks for inherited state\n");
-		goto put_state;
-	}
+	ret = sanitize_watermarks_add_affected(state);
+	if (ret)
+		goto fail;
+
+	ret = intel_atomic_check(&dev_priv->drm, state);
+	if (ret)
+		goto fail;
 
 	/* Write calculated watermark values back */
 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
@@ -17373,9 +17610,29 @@ static void sanitize_watermarks(struct drm_device *dev)
 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
 	}
 
-put_state:
-	drm_atomic_state_put(state);
 fail:
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+
+	/*
+	 * If we fail here, it means that the hardware appears to be
+	 * programmed in a way that shouldn't be possible, given our
+	 * understanding of watermark requirements.  This might mean a
+	 * mistake in the hardware readout code or a mistake in the
+	 * watermark calculations for a given platform.  Raise a WARN
+	 * so that this is noticeable.
+	 *
+	 * If this actually happens, we'll have to just leave the
+	 * BIOS-programmed watermarks untouched and hope for the best.
+	 */
+	drm_WARN(&dev_priv->drm, ret,
+		 "Could not determine valid watermarks for inherited state\n");
+
+	drm_atomic_state_put(state);
+
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 }
@@ -17384,7 +17641,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
 {
 	if (IS_GEN(dev_priv, 5)) {
 		u32 fdi_pll_clk =
-			I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
 
 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
 	} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
@@ -17393,7 +17650,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
 }
 
 static int intel_initial_commit(struct drm_device *dev)
@@ -17476,6 +17733,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
 
 	drm_mode_config_init(&i915->drm);
+	INIT_LIST_HEAD(&i915->global_obj_list);
 
 	mode_config->min_width = 0;
 	mode_config->min_height = 0;
@@ -17517,11 +17775,31 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
 	}
 }
 
-int intel_modeset_init(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct drm_i915_private *i915)
 {
-	struct drm_device *dev = &i915->drm;
-	enum pipe pipe;
-	struct intel_crtc *crtc;
+	intel_atomic_global_obj_cleanup(i915);
+	drm_mode_config_cleanup(&i915->drm);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+	if (plane_config->fb) {
+		struct drm_framebuffer *fb = &plane_config->fb->base;
+
+		/* We may only have the stub and not a full framebuffer */
+		if (drm_framebuffer_read_refcount(fb))
+			drm_framebuffer_put(fb);
+		else
+			kfree(fb);
+	}
+
+	if (plane_config->vma)
+		i915_vma_put(plane_config->vma);
+}
+
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
+{
 	int ret;
 
 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -17530,6 +17808,10 @@ int intel_modeset_init(struct drm_i915_private *i915)
 
 	intel_mode_config_init(i915);
 
+	ret = intel_cdclk_init(i915);
+	if (ret)
+		return ret;
+
 	ret = intel_bw_init(i915);
 	if (ret)
 		return ret;
@@ -17542,26 +17824,38 @@ int intel_modeset_init(struct drm_i915_private *i915)
 
 	intel_fbc_init(i915);
 
+	return 0;
+}
+
+/* part #2: call after irq install */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+	struct drm_device *dev = &i915->drm;
+	enum pipe pipe;
+	struct intel_crtc *crtc;
+	int ret;
+
 	intel_init_pm(i915);
 
 	intel_panel_sanitize_ssc(i915);
 
 	intel_gmbus_setup(i915);
 
-	DRM_DEBUG_KMS("%d display pipe%s available.\n",
-		      INTEL_NUM_PIPES(i915),
-		      INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
+		    INTEL_NUM_PIPES(i915),
+		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
 
 	if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
 		for_each_pipe(i915, pipe) {
 			ret = intel_crtc_init(i915, pipe);
 			if (ret) {
-				drm_mode_config_cleanup(dev);
+				intel_mode_config_cleanup(i915);
 				return ret;
 			}
 		}
 	}
 
+	intel_plane_possible_crtcs_init(i915);
 	intel_shared_dpll_init(dev);
 	intel_update_fdi_pll_freq(i915);
 
@@ -17601,6 +17895,8 @@ int intel_modeset_init(struct drm_i915_private *i915)
 		 * just get the first one.
 		 */
 		intel_find_initial_plane_obj(crtc, &plane_config);
+
+		plane_config_fini(&plane_config);
 	}
 
 	/*
@@ -17609,7 +17905,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
 	 * since the watermark calculation done here will use pstate->fb.
 	 */
 	if (!HAS_GMCH(i915))
-		sanitize_watermarks(dev);
+		sanitize_watermarks(i915);
 
 	/*
 	 * Force all active planes to recompute their states. So that on
@@ -17619,7 +17915,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
 	 */
 	ret = intel_initial_commit(dev);
 	if (ret)
-		DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+		drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
 
 	return 0;
 }
@@ -17638,10 +17934,12 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 	u32 dpll, fp;
 	int i;
 
-	WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
+	drm_WARN_ON(&dev_priv->drm,
+		    i9xx_calc_dpll_params(48000, &clock) != 25154);
 
-	DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
-		      pipe_name(pipe), clock.vco, clock.dot);
+	drm_dbg_kms(&dev_priv->drm,
+		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
+		    pipe_name(pipe), clock.vco, clock.dot);
 
 	fp = i9xx_dpll_compute_fp(&clock);
 	dpll = DPLL_DVO_2X_MODE |
@@ -17651,27 +17949,27 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 		PLL_REF_INPUT_DREFCLK |
 		DPLL_VCO_ENABLE;
 
-	I915_WRITE(FP0(pipe), fp);
-	I915_WRITE(FP1(pipe), fp);
+	intel_de_write(dev_priv, FP0(pipe), fp);
+	intel_de_write(dev_priv, FP1(pipe), fp);
 
-	I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
-	I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
-	I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
-	I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
-	I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
-	I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
-	I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
+	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
+	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
+	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
+	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
+	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
+	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
 
 	/*
 	 * Apparently we need to have VGA mode enabled prior to changing
 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
 	 * dividers, even though the register value does change.
 	 */
-	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
-	I915_WRITE(DPLL(pipe), dpll);
+	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+	intel_de_write(dev_priv, DPLL(pipe), dpll);
 
 	/* Wait for the clocks to stabilize. */
-	POSTING_READ(DPLL(pipe));
+	intel_de_posting_read(dev_priv, DPLL(pipe));
 	udelay(150);
 
 	/* The pixel multiplier can only be updated once the
@@ -17679,17 +17977,18 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 	 *
 	 * So write it again.
 	 */
-	I915_WRITE(DPLL(pipe), dpll);
+	intel_de_write(dev_priv, DPLL(pipe), dpll);
 
 	/* We do this three times for luck */
 	for (i = 0; i < 3 ; i++) {
-		I915_WRITE(DPLL(pipe), dpll);
-		POSTING_READ(DPLL(pipe));
+		intel_de_write(dev_priv, DPLL(pipe), dpll);
+		intel_de_posting_read(dev_priv, DPLL(pipe));
 		udelay(150); /* wait for warmup */
 	}
 
-	I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
-	POSTING_READ(PIPECONF(pipe));
+	intel_de_write(dev_priv, PIPECONF(pipe),
+		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
+	intel_de_posting_read(dev_priv, PIPECONF(pipe));
 
 	intel_wait_for_pipe_scanline_moving(crtc);
 }
@@ -17698,22 +17997,30 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 
-	DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
-		      pipe_name(pipe));
+	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
+		    pipe_name(pipe));
 
-	WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
-	WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
-	WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
-	WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
-	WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
+		    DISPLAY_PLANE_ENABLE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
+		    DISPLAY_PLANE_ENABLE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
+		    DISPLAY_PLANE_ENABLE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
 
-	I915_WRITE(PIPECONF(pipe), 0);
-	POSTING_READ(PIPECONF(pipe));
+	intel_de_write(dev_priv, PIPECONF(pipe), 0);
+	intel_de_posting_read(dev_priv, PIPECONF(pipe));
 
 	intel_wait_for_pipe_scanline_stopped(crtc);
 
-	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
-	POSTING_READ(DPLL(pipe));
+	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+	intel_de_posting_read(dev_priv, DPLL(pipe));
 }
 
 static void
@@ -17736,8 +18043,9 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
 		if (pipe == crtc->pipe)
 			continue;
 
-		DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
-			      plane->base.base.id, plane->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+			    plane->base.base.id, plane->base.name);
 
 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 		intel_plane_disable_noatomic(plane_crtc, plane);
@@ -17787,18 +18095,18 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
 		if (transcoder_is_dsi(cpu_transcoder))
 			return;
 
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		val &= ~HSW_FRAME_START_DELAY_MASK;
 		val |= HSW_FRAME_START_DELAY(0);
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	} else {
 		i915_reg_t reg = PIPECONF(cpu_transcoder);
 		u32 val;
 
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
 		val |= PIPECONF_FRAME_START_DELAY(0);
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 
 	if (!crtc_state->has_pch_encoder)
@@ -17808,19 +18116,19 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
 		u32 val;
 
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		val &= ~TRANS_FRAME_START_DELAY_MASK;
 		val |= TRANS_FRAME_START_DELAY(0);
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	} else {
 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
 		u32 val;
 
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 }
 
@@ -17852,9 +18160,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
 		 * gamma and CSC to match how we program our planes.
 		 */
 		if (INTEL_GEN(dev_priv) >= 9)
-			I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
-				   SKL_BOTTOM_COLOR_GAMMA_ENABLE |
-				   SKL_BOTTOM_COLOR_CSC_ENABLE);
+			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
+				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
 	}
 
 	/* Adjust the state of the output pipe according to whether we
@@ -17926,16 +18233,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 		crtc_state->hw.active;
 
 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
-		DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
-			      pipe_name(crtc->pipe));
+		drm_dbg_kms(&dev_priv->drm,
+			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+			    pipe_name(crtc->pipe));
 		has_active_crtc = false;
 	}
 
 	connector = intel_encoder_find_connector(encoder);
 	if (connector && !has_active_crtc) {
-		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
-			      encoder->base.base.id,
-			      encoder->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+			    encoder->base.base.id,
+			    encoder->base.name);
 
 		/* Connector is active, but has no active pipe. This is
 		 * fallout from our resume register restoring. Disable
@@ -17943,9 +18252,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 		if (crtc_state) {
 			struct drm_encoder *best_encoder;
 
-			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
-				      encoder->base.base.id,
-				      encoder->base.name);
+			drm_dbg_kms(&dev_priv->drm,
+				    "[ENCODER:%d:%s] manually disabled\n",
+				    encoder->base.base.id,
+				    encoder->base.name);
 
 			/* avoid oopsing in case the hooks consult best_encoder */
 			best_encoder = connector->base.state->best_encoder;
@@ -17998,9 +18308,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
 
 		intel_set_plane_visible(crtc_state, plane_state, visible);
 
-		DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
-			      plane->base.base.id, plane->base.name,
-			      enableddisabled(visible), pipe_name(pipe));
+		drm_dbg_kms(&dev_priv->drm,
+			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+			    plane->base.base.id, plane->base.name,
+			    enableddisabled(visible), pipe_name(pipe));
 	}
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -18014,14 +18325,14 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_cdclk_state *cdclk_state =
+		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
 	enum pipe pipe;
 	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
 	struct intel_connector *connector;
 	struct drm_connector_list_iter conn_iter;
-	int i;
-
-	dev_priv->active_pipes = 0;
+	u8 active_pipes = 0;
 
 	for_each_intel_crtc(dev, crtc) {
 		struct intel_crtc_state *crtc_state =
@@ -18038,41 +18349,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 		crtc->active = crtc_state->hw.active;
 
 		if (crtc_state->hw.active)
-			dev_priv->active_pipes |= BIT(crtc->pipe);
+			active_pipes |= BIT(crtc->pipe);
 
-		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
-			      crtc->base.base.id, crtc->base.name,
-			      enableddisabled(crtc_state->hw.active));
+		drm_dbg_kms(&dev_priv->drm,
+			    "[CRTC:%d:%s] hw state readout: %s\n",
+			    crtc->base.base.id, crtc->base.name,
+			    enableddisabled(crtc_state->hw.active));
 	}
 
+	dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
+
 	readout_plane_state(dev_priv);
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
-		pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
-							&pll->state.hw_state);
-
-		if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
-		    pll->info->id == DPLL_ID_EHL_DPLL4) {
-			pll->wakeref = intel_display_power_get(dev_priv,
-							       POWER_DOMAIN_DPLL_DC_OFF);
-		}
-
-		pll->state.crtc_mask = 0;
-		for_each_intel_crtc(dev, crtc) {
-			struct intel_crtc_state *crtc_state =
-				to_intel_crtc_state(crtc->base.state);
-
-			if (crtc_state->hw.active &&
-			    crtc_state->shared_dpll == pll)
-				pll->state.crtc_mask |= 1 << crtc->pipe;
-		}
-		pll->active_mask = pll->state.crtc_mask;
-
-		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
-			      pll->info->name, pll->state.crtc_mask, pll->on);
-	}
+	intel_dpll_readout_hw_state(dev_priv);
 
 	for_each_intel_encoder(dev, encoder) {
 		pipe = 0;
@@ -18089,10 +18378,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			encoder->base.crtc = NULL;
 		}
 
-		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
-			      encoder->base.base.id, encoder->base.name,
-			      enableddisabled(encoder->base.crtc),
-			      pipe_name(pipe));
+		drm_dbg_kms(&dev_priv->drm,
+			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+			    encoder->base.base.id, encoder->base.name,
+			    enableddisabled(encoder->base.crtc),
+			    pipe_name(pipe));
 	}
 
 	drm_connector_list_iter_begin(dev, &conn_iter);
@@ -18103,7 +18393,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
 			connector->base.dpms = DRM_MODE_DPMS_ON;
 
-			encoder = connector->encoder;
+			encoder = intel_attached_encoder(connector);
 			connector->base.encoder = &encoder->base;
 
 			crtc = to_intel_crtc(encoder->base.crtc);
@@ -18124,9 +18414,10 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 			connector->base.dpms = DRM_MODE_DPMS_OFF;
 			connector->base.encoder = NULL;
 		}
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
-			      connector->base.base.id, connector->base.name,
-			      enableddisabled(connector->base.encoder));
+		drm_dbg_kms(&dev_priv->drm,
+			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
+			    connector->base.base.id, connector->base.name,
+			    enableddisabled(connector->base.encoder));
 	}
 	drm_connector_list_iter_end(&conn_iter);
 
@@ -18190,19 +18481,20 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 					crtc_state->min_cdclk[plane->id] =
 						crtc_state->pixel_rate;
 			}
-			DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
-				      plane->base.base.id, plane->base.name,
-				      crtc_state->min_cdclk[plane->id]);
+			drm_dbg_kms(&dev_priv->drm,
+				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
+				    plane->base.base.id, plane->base.name,
+				    crtc_state->min_cdclk[plane->id]);
 		}
 
 		if (crtc_state->hw.active) {
 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
-			if (WARN_ON(min_cdclk < 0))
+			if (drm_WARN_ON(dev, min_cdclk < 0))
 				min_cdclk = 0;
 		}
 
-		dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
-		dev_priv->min_voltage_level[crtc->pipe] =
+		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+		cdclk_state->min_voltage_level[crtc->pipe] =
 			crtc_state->min_voltage_level;
 
 		intel_bw_crtc_update(bw_state, crtc_state);
@@ -18241,53 +18533,55 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
 	 * Also known as Wa_14010480278.
 	 */
 	if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
-		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
-			   DARBF_GATING_DIS);
+		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
+			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
 
 	if (IS_HASWELL(dev_priv)) {
 		/*
 		 * WaRsPkgCStateDisplayPMReq:hsw
 		 * System hang if this isn't done before disabling all planes!
 		 */
-		I915_WRITE(CHICKEN_PAR1_1,
-			   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+		intel_de_write(dev_priv, CHICKEN_PAR1_1,
+			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
 	}
 }
 
 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
 				       enum port port, i915_reg_t hdmi_reg)
 {
-	u32 val = I915_READ(hdmi_reg);
+	u32 val = intel_de_read(dev_priv, hdmi_reg);
 
 	if (val & SDVO_ENABLE ||
 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
 		return;
 
-	DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
-		      port_name(port));
+	drm_dbg_kms(&dev_priv->drm,
+		    "Sanitizing transcoder select for HDMI %c\n",
+		    port_name(port));
 
 	val &= ~SDVO_PIPE_SEL_MASK;
 	val |= SDVO_PIPE_SEL(PIPE_A);
 
-	I915_WRITE(hdmi_reg, val);
+	intel_de_write(dev_priv, hdmi_reg, val);
 }
 
 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
 				     enum port port, i915_reg_t dp_reg)
 {
-	u32 val = I915_READ(dp_reg);
+	u32 val = intel_de_read(dev_priv, dp_reg);
 
 	if (val & DP_PORT_EN ||
 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
 		return;
 
-	DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
-		      port_name(port));
+	drm_dbg_kms(&dev_priv->drm,
+		    "Sanitizing transcoder select for DP %c\n",
+		    port_name(port));
 
 	val &= ~DP_PIPE_SEL_MASK;
 	val |= DP_PIPE_SEL(PIPE_A);
 
-	I915_WRITE(dp_reg, val);
+	intel_de_write(dev_priv, dp_reg, val);
 }
 
 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
@@ -18324,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 	struct intel_encoder *encoder;
 	struct intel_crtc *crtc;
 	intel_wakeref_t wakeref;
-	int i;
 
 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
@@ -18377,18 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 
 	intel_modeset_update_connector_atomic_state(dev);
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
-		if (!pll->on || pll->active_mask)
-			continue;
-
-		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
-			      pll->info->name);
-
-		pll->info->funcs->disable(dev_priv, pll);
-		pll->on = false;
-	}
+	intel_dpll_sanitize_state(dev_priv);
 
 	if (IS_G4X(dev_priv)) {
 		g4x_wm_get_hw_state(dev_priv);
@@ -18408,7 +18690,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
 		u64 put_domains;
 
 		put_domains = modeset_get_crtc_power_domains(crtc_state);
-		if (WARN_ON(put_domains))
+		if (drm_WARN_ON(dev, put_domains))
 			modeset_put_power_domains(dev_priv, put_domains);
 	}
 
@@ -18444,7 +18726,8 @@ void intel_display_resume(struct drm_device *dev)
 	drm_modeset_acquire_fini(&ctx);
 
 	if (ret)
-		DRM_ERROR("Restoring old state failed with %i\n", ret);
+		drm_err(&dev_priv->drm,
+			"Restoring old state failed with %i\n", ret);
 	if (state)
 		drm_atomic_state_put(state);
 }
@@ -18467,21 +18750,19 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915)
 	drm_connector_list_iter_end(&conn_iter);
 }
 
+/* part #1: call before irq uninstall */
 void intel_modeset_driver_remove(struct drm_i915_private *i915)
 {
 	flush_workqueue(i915->flip_wq);
 	flush_workqueue(i915->modeset_wq);
 
 	flush_work(&i915->atomic_helper.free_work);
-	WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
+	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+}
 
-	/*
-	 * Interrupts and polling as the first thing to avoid creating havoc.
-	 * Too much stuff here (turning of connectors, ...) would
-	 * experience fancy races otherwise.
-	 */
-	intel_irq_uninstall(i915);
-
+/* part #2: call after irq uninstall */
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
+{
 	/*
 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
 	 * poll handlers. Hence disable polling after hpd handling is shut down.
@@ -18507,14 +18788,12 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
 
 	intel_hdcp_component_fini(i915);
 
-	drm_mode_config_cleanup(&i915->drm);
+	intel_mode_config_cleanup(i915);
 
 	intel_overlay_cleanup(i915);
 
 	intel_gmbus_teardown(i915);
 
-	intel_bw_cleanup(i915);
-
 	destroy_workqueue(i915->flip_wq);
 	destroy_workqueue(i915->modeset_wq);
 
@@ -18523,6 +18802,15 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 
+static bool
+has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+{
+	if (cpu_transcoder == TRANSCODER_EDP)
+		return HAS_TRANSCODER_EDP(dev_priv);
+	else
+		return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
+}
+
 struct intel_display_error_state {
 
 	u32 power_well_driver;
@@ -18589,7 +18877,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 		return NULL;
 
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
+		error->power_well_driver = intel_de_read(dev_priv,
+							 HSW_PWR_WELL_CTL2);
 
 	for_each_pipe(dev_priv, i) {
 		error->pipe[i].power_domain_on =
@@ -18598,33 +18887,39 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 		if (!error->pipe[i].power_domain_on)
 			continue;
 
-		error->cursor[i].control = I915_READ(CURCNTR(i));
-		error->cursor[i].position = I915_READ(CURPOS(i));
-		error->cursor[i].base = I915_READ(CURBASE(i));
+		error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
+		error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
+		error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
 
-		error->plane[i].control = I915_READ(DSPCNTR(i));
-		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+		error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
+		error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
 		if (INTEL_GEN(dev_priv) <= 3) {
-			error->plane[i].size = I915_READ(DSPSIZE(i));
-			error->plane[i].pos = I915_READ(DSPPOS(i));
+			error->plane[i].size = intel_de_read(dev_priv,
+							     DSPSIZE(i));
+			error->plane[i].pos = intel_de_read(dev_priv,
+							    DSPPOS(i));
 		}
 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
-			error->plane[i].addr = I915_READ(DSPADDR(i));
+			error->plane[i].addr = intel_de_read(dev_priv,
+							     DSPADDR(i));
 		if (INTEL_GEN(dev_priv) >= 4) {
-			error->plane[i].surface = I915_READ(DSPSURF(i));
-			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+			error->plane[i].surface = intel_de_read(dev_priv,
+								DSPSURF(i));
+			error->plane[i].tile_offset = intel_de_read(dev_priv,
+								    DSPTILEOFF(i));
 		}
 
-		error->pipe[i].source = I915_READ(PIPESRC(i));
+		error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
 
 		if (HAS_GMCH(dev_priv))
-			error->pipe[i].stat = I915_READ(PIPESTAT(i));
+			error->pipe[i].stat = intel_de_read(dev_priv,
+							    PIPESTAT(i));
 	}
 
 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 
-		if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+		if (!has_transcoder(dev_priv, cpu_transcoder))
 			continue;
 
 		error->transcoder[i].available = true;
@@ -18636,13 +18931,20 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 
 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
 
-		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
-		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
-		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
-		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
-		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
-		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
-		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+		error->transcoder[i].conf = intel_de_read(dev_priv,
+							  PIPECONF(cpu_transcoder));
+		error->transcoder[i].htotal = intel_de_read(dev_priv,
+							    HTOTAL(cpu_transcoder));
+		error->transcoder[i].hblank = intel_de_read(dev_priv,
+							    HBLANK(cpu_transcoder));
+		error->transcoder[i].hsync = intel_de_read(dev_priv,
+							   HSYNC(cpu_transcoder));
+		error->transcoder[i].vtotal = intel_de_read(dev_priv,
+							    VTOTAL(cpu_transcoder));
+		error->transcoder[i].vblank = intel_de_read(dev_priv,
+							    VBLANK(cpu_transcoder));
+		error->transcoder[i].vsync = intel_de_read(dev_priv,
+							   VSYNC(cpu_transcoder));
 	}
 
 	return error;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 028aab7..adb1225 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -26,7 +26,6 @@
 #define _INTEL_DISPLAY_H_
 
 #include <drm/drm_util.h>
-#include <drm/i915_drm.h>
 
 enum link_m_n_set;
 struct dpll;
@@ -40,12 +39,15 @@ struct drm_framebuffer;
 struct drm_i915_error_state_buf;
 struct drm_i915_gem_object;
 struct drm_i915_private;
+struct drm_mode_fb_cmd2;
 struct drm_modeset_acquire_ctx;
 struct drm_plane;
 struct drm_plane_state;
 struct i915_ggtt_view;
+struct intel_atomic_state;
 struct intel_crtc;
 struct intel_crtc_state;
+struct intel_crtc_state;
 struct intel_digital_port;
 struct intel_dp;
 struct intel_encoder;
@@ -54,7 +56,6 @@ struct intel_plane;
 struct intel_plane_state;
 struct intel_remapped_info;
 struct intel_rotation_info;
-struct intel_crtc_state;
 
 enum i915_gpio {
 	GPIOA,
@@ -312,10 +313,11 @@ enum phy_fia {
 };
 
 #define for_each_pipe(__dev_priv, __p) \
-	for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
+	for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+		for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
 
 #define for_each_pipe_masked(__dev_priv, __p, __mask) \
-	for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
+	for_each_pipe(__dev_priv, __p) \
 		for_each_if((__mask) & BIT(__p))
 
 #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -469,6 +471,8 @@ enum phy_fia {
 			     ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
 			     (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
 
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+			   u8 active_pipes);
 void intel_link_compute_m_n(u16 bpp, int nlanes,
 			    int pixel_clock, int link_clock,
 			    struct intel_link_m_n *m_n,
@@ -486,6 +490,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
 bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
 
 void intel_plane_destroy(struct drm_plane *plane);
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state);
 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -495,6 +500,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
 		      const char *name, u32 reg, int ref_freq);
 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
 			   const char *name, u32 reg);
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state);
 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
 void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
@@ -520,6 +526,7 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv);
 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
 
 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
@@ -608,8 +615,10 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
 
 /* modesetting */
 void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
 int intel_modeset_init(struct drm_i915_private *i915);
 void intel_modeset_driver_remove(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
 void intel_display_resume(struct drm_device *dev);
 void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
new file mode 100644
index 0000000..1e6eb7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -0,0 +1,2134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_debugfs.h"
+#include "intel_csr.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_fbc.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sideband.h"
+
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+	return to_i915(node->minor->dev);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+		   dev_priv->fb_tracking.busy_bits);
+
+	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+		   dev_priv->fb_tracking.flip_bits);
+
+	return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct intel_fbc *fbc = &dev_priv->fbc;
+	intel_wakeref_t wakeref;
+
+	if (!HAS_FBC(dev_priv))
+		return -ENODEV;
+
+	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+	mutex_lock(&fbc->lock);
+
+	if (intel_fbc_is_active(dev_priv))
+		seq_puts(m, "FBC enabled\n");
+	else
+		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+	if (intel_fbc_is_active(dev_priv)) {
+		u32 mask;
+
+		if (INTEL_GEN(dev_priv) >= 8)
+			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
+		else if (INTEL_GEN(dev_priv) >= 7)
+			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
+		else if (INTEL_GEN(dev_priv) >= 5)
+			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
+		else if (IS_G4X(dev_priv))
+			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+		else
+			mask = intel_de_read(dev_priv, FBC_STATUS) &
+				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
+
+		seq_printf(m, "Compressing: %s\n", yesno(mask));
+	}
+
+	mutex_unlock(&fbc->lock);
+	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+	return 0;
+}
+
+static int i915_fbc_false_color_get(void *data, u64 *val)
+{
+	struct drm_i915_private *dev_priv = data;
+
+	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+		return -ENODEV;
+
+	*val = dev_priv->fbc.false_color;
+
+	return 0;
+}
+
+static int i915_fbc_false_color_set(void *data, u64 val)
+{
+	struct drm_i915_private *dev_priv = data;
+	u32 reg;
+
+	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+		return -ENODEV;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
+	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
+	dev_priv->fbc.false_color = val;
+
+	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
+		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
+
+	mutex_unlock(&dev_priv->fbc.lock);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
+			i915_fbc_false_color_get, i915_fbc_false_color_set,
+			"%llu\n");
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	intel_wakeref_t wakeref;
+
+	if (!HAS_IPS(dev_priv))
+		return -ENODEV;
+
+	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+	seq_printf(m, "Enabled by kernel parameter: %s\n",
+		   yesno(i915_modparams.enable_ips));
+
+	if (INTEL_GEN(dev_priv) >= 8) {
+		seq_puts(m, "Currently: unknown\n");
+	} else {
+		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
+			seq_puts(m, "Currently: enabled\n");
+		else
+			seq_puts(m, "Currently: disabled\n");
+	}
+
+	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+	return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	intel_wakeref_t wakeref;
+	bool sr_enabled = false;
+
+	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		/* no global SR status; inspect per-plane WM */;
+	else if (HAS_PCH_SPLIT(dev_priv))
+		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
+	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
+		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
+	else if (IS_I915GM(dev_priv))
+		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
+	else if (IS_PINEVIEW(dev_priv))
+		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
+
+	return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+	if (opregion->header)
+		seq_write(m, opregion->header, OPREGION_SIZE);
+
+	return 0;
+}
+
+static int i915_vbt(struct seq_file *m, void *unused)
+{
+	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+	if (opregion->vbt)
+		seq_write(m, opregion->vbt, opregion->vbt_size);
+
+	return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_framebuffer *fbdev_fb = NULL;
+	struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
+		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
+
+		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+			   fbdev_fb->base.width,
+			   fbdev_fb->base.height,
+			   fbdev_fb->base.format->depth,
+			   fbdev_fb->base.format->cpp[0] * 8,
+			   fbdev_fb->base.modifier,
+			   drm_framebuffer_read_refcount(&fbdev_fb->base));
+		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+		seq_putc(m, '\n');
+	}
+#endif
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	drm_for_each_fb(drm_fb, dev) {
+		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+		if (fb == fbdev_fb)
+			continue;
+
+		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+			   fb->base.width,
+			   fb->base.height,
+			   fb->base.format->depth,
+			   fb->base.format->cpp[0] * 8,
+			   fb->base.modifier,
+			   drm_framebuffer_read_refcount(&fb->base));
+		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+		seq_putc(m, '\n');
+	}
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+	u8 val;
+	static const char * const sink_status[] = {
+		"inactive",
+		"transition to active, capture and display",
+		"active, display from RFB",
+		"active, capture and display on sink device timings",
+		"transition to inactive, capture and display, timing re-sync",
+		"reserved",
+		"reserved",
+		"sink internal error",
+	};
+	struct drm_connector *connector = m->private;
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+	struct intel_dp *intel_dp =
+		intel_attached_dp(to_intel_connector(connector));
+	int ret;
+
+	if (!CAN_PSR(dev_priv)) {
+		seq_puts(m, "PSR Unsupported\n");
+		return -ENODEV;
+	}
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+	if (ret == 1) {
+		const char *str = "unknown";
+
+		val &= DP_PSR_SINK_STATE_MASK;
+		if (val < ARRAY_SIZE(sink_status))
+			str = sink_status[val];
+		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+	} else {
+		return ret;
+	}
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+	u32 val, status_val;
+	const char *status = "unknown";
+
+	if (dev_priv->psr.psr2_enabled) {
+		static const char * const live_status[] = {
+			"IDLE",
+			"CAPTURE",
+			"CAPTURE_FS",
+			"SLEEP",
+			"BUFON_FW",
+			"ML_UP",
+			"SU_STANDBY",
+			"FAST_SLEEP",
+			"DEEP_SLEEP",
+			"BUF_ON",
+			"TG_ON"
+		};
+		val = intel_de_read(dev_priv,
+				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+			      EDP_PSR2_STATUS_STATE_SHIFT;
+		if (status_val < ARRAY_SIZE(live_status))
+			status = live_status[status_val];
+	} else {
+		static const char * const live_status[] = {
+			"IDLE",
+			"SRDONACK",
+			"SRDENT",
+			"BUFOFF",
+			"BUFON",
+			"AUXACK",
+			"SRDOFFACK",
+			"SRDENT_ON",
+		};
+		val = intel_de_read(dev_priv,
+				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
+		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+			      EDP_PSR_STATUS_STATE_SHIFT;
+		if (status_val < ARRAY_SIZE(live_status))
+			status = live_status[status_val];
+	}
+
+	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct i915_psr *psr = &dev_priv->psr;
+	intel_wakeref_t wakeref;
+	const char *status;
+	bool enabled;
+	u32 val;
+
+	if (!HAS_PSR(dev_priv))
+		return -ENODEV;
+
+	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+	if (psr->dp)
+		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+	seq_puts(m, "\n");
+
+	if (!psr->sink_support)
+		return 0;
+
+	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+	mutex_lock(&psr->lock);
+
+	if (psr->enabled)
+		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+	else
+		status = "disabled";
+	seq_printf(m, "PSR mode: %s\n", status);
+
+	if (!psr->enabled) {
+		seq_printf(m, "PSR sink not reliable: %s\n",
+			   yesno(psr->sink_not_reliable));
+
+		goto unlock;
+	}
+
+	if (psr->psr2_enabled) {
+		val = intel_de_read(dev_priv,
+				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
+		enabled = val & EDP_PSR2_ENABLE;
+	} else {
+		val = intel_de_read(dev_priv,
+				    EDP_PSR_CTL(dev_priv->psr.transcoder));
+		enabled = val & EDP_PSR_ENABLE;
+	}
+	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+		   enableddisabled(enabled), val);
+	psr_source_status(dev_priv, m);
+	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+		   psr->busy_frontbuffer_bits);
+
+	/*
+	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
+	 */
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+		val = intel_de_read(dev_priv,
+				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+		val &= EDP_PSR_PERF_CNT_MASK;
+		seq_printf(m, "Performance counter: %u\n", val);
+	}
+
+	if (psr->debug & I915_PSR_DEBUG_IRQ) {
+		seq_printf(m, "Last attempted entry at: %lld\n",
+			   psr->last_entry_attempt);
+		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+	}
+
+	if (psr->psr2_enabled) {
+		u32 su_frames_val[3];
+		int frame;
+
+		/*
+		 * Reading all 3 registers before hand to minimize crossing a
+		 * frame boundary between register reads
+		 */
+		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+			val = intel_de_read(dev_priv,
+					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+			su_frames_val[frame / 3] = val;
+		}
+
+		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+			u32 su_blocks;
+
+			su_blocks = su_frames_val[frame / 3] &
+				    PSR2_SU_STATUS_MASK(frame);
+			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+			seq_printf(m, "%d\t%d\n", frame, su_blocks);
+		}
+	}
+
+unlock:
+	mutex_unlock(&psr->lock);
+	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+	return 0;
+}
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+	struct drm_i915_private *dev_priv = data;
+	intel_wakeref_t wakeref;
+	int ret;
+
+	if (!CAN_PSR(dev_priv))
+		return -ENODEV;
+
+	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+	ret = intel_psr_debug_set(dev_priv, val);
+
+	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+	return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+	struct drm_i915_private *dev_priv = data;
+
+	if (!CAN_PSR(dev_priv))
+		return -ENODEV;
+
+	*val = READ_ONCE(dev_priv->psr.debug);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+			"%llu\n");
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	int i;
+
+	mutex_lock(&power_domains->lock);
+
+	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+	for (i = 0; i < power_domains->power_well_count; i++) {
+		struct i915_power_well *power_well;
+		enum intel_display_power_domain power_domain;
+
+		power_well = &power_domains->power_wells[i];
+		seq_printf(m, "%-25s %d\n", power_well->desc->name,
+			   power_well->count);
+
+		for_each_power_domain(power_domain, power_well->desc->domains)
+			seq_printf(m, "  %-23s %d\n",
+				 intel_display_power_domain_str(power_domain),
+				 power_domains->domain_use_count[power_domain]);
+	}
+
+	mutex_unlock(&power_domains->lock);
+
+	return 0;
+}
+
+static int i915_dmc_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	intel_wakeref_t wakeref;
+	struct intel_csr *csr;
+	i915_reg_t dc5_reg, dc6_reg = {};
+
+	if (!HAS_CSR(dev_priv))
+		return -ENODEV;
+
+	csr = &dev_priv->csr;
+
+	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
+	seq_printf(m, "path: %s\n", csr->fw_path);
+
+	if (!csr->dmc_payload)
+		goto out;
+
+	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
+		   CSR_VERSION_MINOR(csr->version));
+
+	if (INTEL_GEN(dev_priv) >= 12) {
+		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+		/*
+		 * NOTE: DMC_DEBUG3 is a general purpose reg.
+		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+		 * reg for DC3CO debugging and validation,
+		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+		 */
+		seq_printf(m, "DC3CO count: %d\n",
+			   intel_de_read(dev_priv, DMC_DEBUG3));
+	} else {
+		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+						 SKL_CSR_DC3_DC5_COUNT;
+		if (!IS_GEN9_LP(dev_priv))
+			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+	}
+
+	seq_printf(m, "DC3 -> DC5 count: %d\n",
+		   intel_de_read(dev_priv, dc5_reg));
+	if (dc6_reg.reg)
+		seq_printf(m, "DC5 -> DC6 count: %d\n",
+			   intel_de_read(dev_priv, dc6_reg));
+
+out:
+	seq_printf(m, "program base: 0x%08x\n",
+		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
+	seq_printf(m, "ssp base: 0x%08x\n",
+		   intel_de_read(dev_priv, CSR_SSP_BASE));
+	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
+
+	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+	return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+				 const struct drm_display_mode *mode)
+{
+	int i;
+
+	for (i = 0; i < tabs; i++)
+		seq_putc(m, '\t');
+
+	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_encoder_info(struct seq_file *m,
+			       struct intel_crtc *crtc,
+			       struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_connector_list_iter conn_iter;
+	struct drm_connector *connector;
+
+	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+		   encoder->base.base.id, encoder->base.name);
+
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		const struct drm_connector_state *conn_state =
+			connector->state;
+
+		if (conn_state->best_encoder != &encoder->base)
+			continue;
+
+		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+			   connector->base.id, connector->name);
+	}
+	drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+	const struct drm_display_mode *mode = panel->fixed_mode;
+
+	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_hdcp_info(struct seq_file *m,
+			    struct intel_connector *intel_connector)
+{
+	bool hdcp_cap, hdcp2_cap;
+
+	hdcp_cap = intel_hdcp_capable(intel_connector);
+	hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+	if (hdcp_cap)
+		seq_puts(m, "HDCP1.4 ");
+	if (hdcp2_cap)
+		seq_puts(m, "HDCP2.2 ");
+
+	if (!hdcp_cap && !hdcp2_cap)
+		seq_puts(m, "None");
+
+	seq_puts(m, "\n");
+}
+
+static void intel_dp_info(struct seq_file *m,
+			  struct intel_connector *intel_connector)
+{
+	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
+	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+		intel_panel_info(m, &intel_connector->panel);
+
+	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+				&intel_dp->aux);
+	if (intel_connector->hdcp.shim) {
+		seq_puts(m, "\tHDCP version: ");
+		intel_hdcp_info(m, intel_connector);
+	}
+}
+
+static void intel_dp_mst_info(struct seq_file *m,
+			  struct intel_connector *intel_connector)
+{
+	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+	struct intel_dp_mst_encoder *intel_mst =
+		enc_to_mst(intel_encoder);
+	struct intel_digital_port *intel_dig_port = intel_mst->primary;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+					intel_connector->port);
+
+	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+			    struct intel_connector *intel_connector)
+{
+	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
+
+	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+	if (intel_connector->hdcp.shim) {
+		seq_puts(m, "\tHDCP version: ");
+		intel_hdcp_info(m, intel_connector);
+	}
+}
+
+static void intel_lvds_info(struct seq_file *m,
+			    struct intel_connector *intel_connector)
+{
+	intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+				 struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	const struct drm_connector_state *conn_state = connector->state;
+	struct intel_encoder *encoder =
+		to_intel_encoder(conn_state->best_encoder);
+	const struct drm_display_mode *mode;
+
+	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
+		   connector->base.id, connector->name,
+		   drm_get_connector_status_name(connector->status));
+
+	if (connector->status == connector_status_disconnected)
+		return;
+
+	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+		   connector->display_info.width_mm,
+		   connector->display_info.height_mm);
+	seq_printf(m, "\tsubpixel order: %s\n",
+		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
+
+	if (!encoder)
+		return;
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DisplayPort:
+	case DRM_MODE_CONNECTOR_eDP:
+		if (encoder->type == INTEL_OUTPUT_DP_MST)
+			intel_dp_mst_info(m, intel_connector);
+		else
+			intel_dp_info(m, intel_connector);
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		if (encoder->type == INTEL_OUTPUT_LVDS)
+			intel_lvds_info(m, intel_connector);
+		break;
+	case DRM_MODE_CONNECTOR_HDMIA:
+		if (encoder->type == INTEL_OUTPUT_HDMI ||
+		    encoder->type == INTEL_OUTPUT_DDI)
+			intel_hdmi_info(m, intel_connector);
+		break;
+	default:
+		break;
+	}
+
+	seq_printf(m, "\tmodes:\n");
+	list_for_each_entry(mode, &connector->modes, head)
+		intel_seq_print_mode(m, 2, mode);
+}
+
+static const char *plane_type(enum drm_plane_type type)
+{
+	switch (type) {
+	case DRM_PLANE_TYPE_OVERLAY:
+		return "OVL";
+	case DRM_PLANE_TYPE_PRIMARY:
+		return "PRI";
+	case DRM_PLANE_TYPE_CURSOR:
+		return "CUR";
+	/*
+	 * Deliberately omitting default: to generate compiler warnings
+	 * when a new drm_plane_type gets added.
+	 */
+	}
+
+	return "unknown";
+}
+
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
+{
+	/*
+	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
+	 * will print them all to visualize if the values are misused
+	 */
+	snprintf(buf, bufsize,
+		 "%s%s%s%s%s%s(0x%08x)",
+		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
+		 rotation);
+}
+
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+	const struct intel_plane_state *plane_state =
+		to_intel_plane_state(plane->base.state);
+	const struct drm_framebuffer *fb = plane_state->uapi.fb;
+	struct drm_format_name_buf format_name;
+	struct drm_rect src, dst;
+	char rot_str[48];
+
+	src = drm_plane_state_src(&plane_state->uapi);
+	dst = drm_plane_state_dest(&plane_state->uapi);
+
+	if (fb)
+		drm_get_format_name(fb->format->format, &format_name);
+
+	plane_rotation(rot_str, sizeof(rot_str),
+		       plane_state->uapi.rotation);
+
+	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+		   fb ? fb->width : 0, fb ? fb->height : 0,
+		   DRM_RECT_FP_ARG(&src),
+		   DRM_RECT_ARG(&dst),
+		   rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+	const struct intel_plane_state *plane_state =
+		to_intel_plane_state(plane->base.state);
+	const struct drm_framebuffer *fb = plane_state->hw.fb;
+	struct drm_format_name_buf format_name;
+	char rot_str[48];
+
+	if (!fb)
+		return;
+
+	drm_get_format_name(fb->format->format, &format_name);
+
+	plane_rotation(rot_str, sizeof(rot_str),
+		       plane_state->hw.rotation);
+
+	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+		   fb->base.id, format_name.str,
+		   fb->width, fb->height,
+		   yesno(plane_state->uapi.visible),
+		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
+		   DRM_RECT_ARG(&plane_state->uapi.dst),
+		   rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct intel_plane *plane;
+
+	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+			   plane->base.base.id, plane->base.name,
+			   plane_type(plane->base.type));
+		intel_plane_uapi_info(m, plane);
+		intel_plane_hw_info(m, plane);
+	}
+}
+
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
+	int num_scalers = crtc->num_scalers;
+	int i;
+
+	/* Not all platformas have a scaler */
+	if (num_scalers) {
+		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+			   num_scalers,
+			   crtc_state->scaler_state.scaler_users,
+			   crtc_state->scaler_state.scaler_id);
+
+		for (i = 0; i < num_scalers; i++) {
+			const struct intel_scaler *sc =
+				&crtc_state->scaler_state.scalers[i];
+
+			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
+				   i, yesno(sc->in_use), sc->mode);
+		}
+		seq_puts(m, "\n");
+	} else {
+		seq_puts(m, "\tNo scalers available on this platform\n");
+	}
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	const struct intel_crtc_state *crtc_state =
+		to_intel_crtc_state(crtc->base.state);
+	struct intel_encoder *encoder;
+
+	seq_printf(m, "[CRTC:%d:%s]:\n",
+		   crtc->base.base.id, crtc->base.name);
+
+	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+		   yesno(crtc_state->uapi.enable),
+		   yesno(crtc_state->uapi.active),
+		   DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+	if (crtc_state->hw.enable) {
+		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+			   yesno(crtc_state->hw.active),
+			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+		intel_scaler_info(m, crtc);
+	}
+
+	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+				    crtc_state->uapi.encoder_mask)
+		intel_encoder_info(m, crtc, encoder);
+
+	intel_plane_info(m, crtc);
+
+	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+		   yesno(!crtc->cpu_fifo_underrun_disabled),
+		   yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_crtc *crtc;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+	drm_modeset_lock_all(dev);
+
+	seq_printf(m, "CRTC info\n");
+	seq_printf(m, "---------\n");
+	for_each_intel_crtc(dev, crtc)
+		intel_crtc_info(m, crtc);
+
+	seq_printf(m, "\n");
+	seq_printf(m, "Connector info\n");
+	seq_printf(m, "--------------\n");
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter)
+		intel_connector_info(m, connector);
+	drm_connector_list_iter_end(&conn_iter);
+
+	drm_modeset_unlock_all(dev);
+
+	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+	return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
+	int i;
+
+	drm_modeset_lock_all(dev);
+
+	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+		   dev_priv->dpll.ref_clks.nssc,
+		   dev_priv->dpll.ref_clks.ssc);
+
+	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+
+		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+			   pll->info->id);
+		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+		seq_printf(m, " tracked hardware state:\n");
+		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
+		seq_printf(m, " dpll_md: 0x%08x\n",
+			   pll->state.hw_state.dpll_md);
+		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
+		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
+		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
+		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
+		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
+		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
+			   pll->state.hw_state.mg_refclkin_ctl);
+		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+			   pll->state.hw_state.mg_clktop2_coreclkctl1);
+		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
+			   pll->state.hw_state.mg_clktop2_hsclkctl);
+		seq_printf(m, " mg_pll_div0:  0x%08x\n",
+			   pll->state.hw_state.mg_pll_div0);
+		seq_printf(m, " mg_pll_div1:  0x%08x\n",
+			   pll->state.hw_state.mg_pll_div1);
+		seq_printf(m, " mg_pll_lf:    0x%08x\n",
+			   pll->state.hw_state.mg_pll_lf);
+		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+			   pll->state.hw_state.mg_pll_frac_lock);
+		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
+			   pll->state.hw_state.mg_pll_ssc);
+		seq_printf(m, " mg_pll_bias:  0x%08x\n",
+			   pll->state.hw_state.mg_pll_bias);
+		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
+	}
+	drm_modeset_unlock_all(dev);
+
+	return 0;
+}
+
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+
+	seq_printf(m, "Isochronous Priority Control: %s\n",
+			yesno(dev_priv->ipc_enabled));
+	return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	if (!HAS_IPC(dev_priv))
+		return -ENODEV;
+
+	return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+				     size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	intel_wakeref_t wakeref;
+	bool enable;
+	int ret;
+
+	ret = kstrtobool_from_user(ubuf, len, &enable);
+	if (ret < 0)
+		return ret;
+
+	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+		if (!dev_priv->ipc_enabled && enable)
+			drm_info(&dev_priv->drm,
+				 "Enabling IPC: WM will be proper only after next commit\n");
+		dev_priv->wm.distrust_bios_wm = true;
+		dev_priv->ipc_enabled = enable;
+		intel_enable_ipc(dev_priv);
+	}
+
+	return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_ipc_status_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_ipc_status_write
+};
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
+	struct skl_ddb_entry *entry;
+	struct intel_crtc *crtc;
+
+	if (INTEL_GEN(dev_priv) < 9)
+		return -ENODEV;
+
+	drm_modeset_lock_all(dev);
+
+	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+		enum pipe pipe = crtc->pipe;
+		enum plane_id plane_id;
+
+		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+		for_each_plane_id_on_crtc(crtc, plane_id) {
+			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
+				   entry->start, entry->end,
+				   skl_ddb_entry_size(entry));
+		}
+
+		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
+			   entry->end, skl_ddb_entry_size(entry));
+	}
+
+	drm_modeset_unlock_all(dev);
+
+	return 0;
+}
+
+static void drrs_status_per_crtc(struct seq_file *m,
+				 struct drm_device *dev,
+				 struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct i915_drrs *drrs = &dev_priv->drrs;
+	int vrefresh = 0;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		if (connector->state->crtc != &intel_crtc->base)
+			continue;
+
+		seq_printf(m, "%s:\n", connector->name);
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	seq_puts(m, "\n");
+
+	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
+		struct intel_panel *panel;
+
+		mutex_lock(&drrs->mutex);
+		/* DRRS Supported */
+		seq_puts(m, "\tDRRS Supported: Yes\n");
+
+		/* disable_drrs() will make drrs->dp NULL */
+		if (!drrs->dp) {
+			seq_puts(m, "Idleness DRRS: Disabled\n");
+			if (dev_priv->psr.enabled)
+				seq_puts(m,
+				"\tAs PSR is enabled, DRRS is not enabled\n");
+			mutex_unlock(&drrs->mutex);
+			return;
+		}
+
+		panel = &drrs->dp->attached_connector->panel;
+		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+					drrs->busy_frontbuffer_bits);
+
+		seq_puts(m, "\n\t\t");
+		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+			vrefresh = panel->fixed_mode->vrefresh;
+		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+			vrefresh = panel->downclock_mode->vrefresh;
+		} else {
+			seq_printf(m, "DRRS_State: Unknown(%d)\n",
+						drrs->refresh_rate_type);
+			mutex_unlock(&drrs->mutex);
+			return;
+		}
+		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+		seq_puts(m, "\n\t\t");
+		mutex_unlock(&drrs->mutex);
+	} else {
+		/* DRRS not supported. Print the VBT parameter*/
+		seq_puts(m, "\tDRRS Supported : No");
+	}
+	seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_crtc *intel_crtc;
+	int active_crtc_cnt = 0;
+
+	drm_modeset_lock_all(dev);
+	for_each_intel_crtc(dev, intel_crtc) {
+		if (intel_crtc->base.state->active) {
+			active_crtc_cnt++;
+			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
+
+			drrs_status_per_crtc(m, dev, intel_crtc);
+		}
+	}
+	drm_modeset_unlock_all(dev);
+
+	if (!active_crtc_cnt)
+		seq_puts(m, "No active crtc found\n");
+
+	return 0;
+}
+
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_encoder *intel_encoder;
+	struct intel_digital_port *intel_dig_port;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
+		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+			continue;
+
+		intel_dig_port = enc_to_dig_port(intel_encoder);
+		if (!intel_dig_port->dp.can_mst)
+			continue;
+
+		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+			   intel_dig_port->base.base.base.id,
+			   intel_dig_port->base.base.name);
+		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	return 0;
+}
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+						  const char __user *ubuf,
+						  size_t len, loff_t *offp)
+{
+	char *input_buffer;
+	int status = 0;
+	struct drm_device *dev;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	struct intel_dp *intel_dp;
+	int val = 0;
+
+	dev = ((struct seq_file *)file->private_data)->private;
+
+	if (len == 0)
+		return 0;
+
+	input_buffer = memdup_user_nul(ubuf, len);
+	if (IS_ERR(input_buffer))
+		return PTR_ERR(input_buffer);
+
+	drm_dbg(&to_i915(dev)->drm,
+		"Copied %d bytes from user\n", (unsigned int)len);
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		struct intel_encoder *encoder;
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		encoder = to_intel_encoder(connector->encoder);
+		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+			continue;
+
+		if (encoder && connector->status == connector_status_connected) {
+			intel_dp = enc_to_intel_dp(encoder);
+			status = kstrtoint(input_buffer, 10, &val);
+			if (status < 0)
+				break;
+			drm_dbg(&to_i915(dev)->drm,
+				"Got %d for test active\n", val);
+			/* To prevent erroneous activation of the compliance
+			 * testing code, only accept an actual value of 1 here
+			 */
+			if (val == 1)
+				intel_dp->compliance.test_active = true;
+			else
+				intel_dp->compliance.test_active = false;
+		}
+	}
+	drm_connector_list_iter_end(&conn_iter);
+	kfree(input_buffer);
+	if (status < 0)
+		return status;
+
+	*offp += len;
+	return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	struct intel_dp *intel_dp;
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		struct intel_encoder *encoder;
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		encoder = to_intel_encoder(connector->encoder);
+		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+			continue;
+
+		if (encoder && connector->status == connector_status_connected) {
+			intel_dp = enc_to_intel_dp(encoder);
+			if (intel_dp->compliance.test_active)
+				seq_puts(m, "1");
+			else
+				seq_puts(m, "0");
+		} else
+			seq_puts(m, "0");
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+					     struct file *file)
+{
+	return single_open(file, i915_displayport_test_active_show,
+			   inode->i_private);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_displayport_test_active_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	struct intel_dp *intel_dp;
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		struct intel_encoder *encoder;
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		encoder = to_intel_encoder(connector->encoder);
+		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+			continue;
+
+		if (encoder && connector->status == connector_status_connected) {
+			intel_dp = enc_to_intel_dp(encoder);
+			if (intel_dp->compliance.test_type ==
+			    DP_TEST_LINK_EDID_READ)
+				seq_printf(m, "%lx",
+					   intel_dp->compliance.test_data.edid);
+			else if (intel_dp->compliance.test_type ==
+				 DP_TEST_LINK_VIDEO_PATTERN) {
+				seq_printf(m, "hdisplay: %d\n",
+					   intel_dp->compliance.test_data.hdisplay);
+				seq_printf(m, "vdisplay: %d\n",
+					   intel_dp->compliance.test_data.vdisplay);
+				seq_printf(m, "bpc: %u\n",
+					   intel_dp->compliance.test_data.bpc);
+			}
+		} else
+			seq_puts(m, "0");
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_connector *connector;
+	struct drm_connector_list_iter conn_iter;
+	struct intel_dp *intel_dp;
+
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		struct intel_encoder *encoder;
+
+		if (connector->connector_type !=
+		    DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		encoder = to_intel_encoder(connector->encoder);
+		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+			continue;
+
+		if (encoder && connector->status == connector_status_connected) {
+			intel_dp = enc_to_intel_dp(encoder);
+			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+		} else
+			seq_puts(m, "0");
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
+	int level;
+	int num_levels;
+
+	if (IS_CHERRYVIEW(dev_priv))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev_priv))
+		num_levels = 1;
+	else if (IS_G4X(dev_priv))
+		num_levels = 3;
+	else
+		num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+	drm_modeset_lock_all(dev);
+
+	for (level = 0; level < num_levels; level++) {
+		unsigned int latency = wm[level];
+
+		/*
+		 * - WM1+ latency values in 0.5us units
+		 * - latencies are in us on gen9/vlv/chv
+		 */
+		if (INTEL_GEN(dev_priv) >= 9 ||
+		    IS_VALLEYVIEW(dev_priv) ||
+		    IS_CHERRYVIEW(dev_priv) ||
+		    IS_G4X(dev_priv))
+			latency *= 10;
+		else if (level > 0)
+			latency *= 5;
+
+		seq_printf(m, "WM%d %u (%u.%u usec)\n",
+			   level, wm[level], latency / 10, latency % 10);
+	}
+
+	drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	const u16 *latencies;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = dev_priv->wm.pri_latency;
+
+	wm_latency_show(m, latencies);
+
+	return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	const u16 *latencies;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = dev_priv->wm.spr_latency;
+
+	wm_latency_show(m, latencies);
+
+	return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	const u16 *latencies;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = dev_priv->wm.cur_latency;
+
+	wm_latency_show(m, latencies);
+
+	return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+		return -ENODEV;
+
+	return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	if (HAS_GMCH(dev_priv))
+		return -ENODEV;
+
+	return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	if (HAS_GMCH(dev_priv))
+		return -ENODEV;
+
+	return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+				size_t len, loff_t *offp, u16 wm[8])
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_device *dev = &dev_priv->drm;
+	u16 new[8] = { 0 };
+	int num_levels;
+	int level;
+	int ret;
+	char tmp[32];
+
+	if (IS_CHERRYVIEW(dev_priv))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev_priv))
+		num_levels = 1;
+	else if (IS_G4X(dev_priv))
+		num_levels = 3;
+	else
+		num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+	if (len >= sizeof(tmp))
+		return -EINVAL;
+
+	if (copy_from_user(tmp, ubuf, len))
+		return -EFAULT;
+
+	tmp[len] = '\0';
+
+	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+		     &new[0], &new[1], &new[2], &new[3],
+		     &new[4], &new[5], &new[6], &new[7]);
+	if (ret != num_levels)
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	for (level = 0; level < num_levels; level++)
+		wm[level] = new[level];
+
+	drm_modeset_unlock_all(dev);
+
+	return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+				    size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	u16 *latencies;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = dev_priv->wm.pri_latency;
+
+	return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+				    size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	u16 *latencies;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = dev_priv->wm.spr_latency;
+
+	return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+				    size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	u16 *latencies;
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		latencies = dev_priv->wm.skl_latency;
+	else
+		latencies = dev_priv->wm.cur_latency;
+
+	return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+	.owner = THIS_MODULE,
+	.open = pri_wm_latency_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+	.owner = THIS_MODULE,
+	.open = spr_wm_latency_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+	.owner = THIS_MODULE,
+	.open = cur_wm_latency_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = cur_wm_latency_write
+};
+
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+
+	/* Synchronize with everything first in case there's been an HPD
+	 * storm, but we haven't finished handling it in the kernel yet
+	 */
+	intel_synchronize_irq(dev_priv);
+	flush_work(&dev_priv->hotplug.dig_port_work);
+	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+
+	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+	seq_printf(m, "Detected: %s\n",
+		   yesno(delayed_work_pending(&hotplug->reenable_work)));
+
+	return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+					const char __user *ubuf, size_t len,
+					loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+	unsigned int new_threshold;
+	int i;
+	char *newline;
+	char tmp[16];
+
+	if (len >= sizeof(tmp))
+		return -EINVAL;
+
+	if (copy_from_user(tmp, ubuf, len))
+		return -EFAULT;
+
+	tmp[len] = '\0';
+
+	/* Strip newline, if any */
+	newline = strchr(tmp, '\n');
+	if (newline)
+		*newline = '\0';
+
+	if (strcmp(tmp, "reset") == 0)
+		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+		return -EINVAL;
+
+	if (new_threshold > 0)
+		drm_dbg_kms(&dev_priv->drm,
+			    "Setting HPD storm detection threshold to %d\n",
+			    new_threshold);
+	else
+		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	hotplug->hpd_storm_threshold = new_threshold;
+	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
+	for_each_hpd_pin(i)
+		hotplug->stats[i].count = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/* Re-enable hpd immediately if we were in an irq storm */
+	flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+	return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_hpd_storm_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_hpd_storm_ctl_write
+};
+
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = m->private;
+
+	seq_printf(m, "Enabled: %s\n",
+		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+	return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_hpd_short_storm_ctl_show,
+			   inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+					      const char __user *ubuf,
+					      size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	struct i915_hotplug *hotplug = &dev_priv->hotplug;
+	char *newline;
+	char tmp[16];
+	int i;
+	bool new_state;
+
+	if (len >= sizeof(tmp))
+		return -EINVAL;
+
+	if (copy_from_user(tmp, ubuf, len))
+		return -EFAULT;
+
+	tmp[len] = '\0';
+
+	/* Strip newline, if any */
+	newline = strchr(tmp, '\n');
+	if (newline)
+		*newline = '\0';
+
+	/* Reset to the "default" state for this system */
+	if (strcmp(tmp, "reset") == 0)
+		new_state = !HAS_DP_MST(dev_priv);
+	else if (kstrtobool(tmp, &new_state) != 0)
+		return -EINVAL;
+
+	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+		    new_state ? "En" : "Dis");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	hotplug->hpd_short_storm_enabled = new_state;
+	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
+	for_each_hpd_pin(i)
+		hotplug->stats[i].count = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	/* Re-enable hpd immediately if we were in an irq storm */
+	flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+	return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_hpd_short_storm_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_hpd_short_storm_ctl_write,
+};
+
+static int i915_drrs_ctl_set(void *data, u64 val)
+{
+	struct drm_i915_private *dev_priv = data;
+	struct drm_device *dev = &dev_priv->drm;
+	struct intel_crtc *crtc;
+
+	if (INTEL_GEN(dev_priv) < 7)
+		return -ENODEV;
+
+	for_each_intel_crtc(dev, crtc) {
+		struct drm_connector_list_iter conn_iter;
+		struct intel_crtc_state *crtc_state;
+		struct drm_connector *connector;
+		struct drm_crtc_commit *commit;
+		int ret;
+
+		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+		if (ret)
+			return ret;
+
+		crtc_state = to_intel_crtc_state(crtc->base.state);
+
+		if (!crtc_state->hw.active ||
+		    !crtc_state->has_drrs)
+			goto out;
+
+		commit = crtc_state->uapi.commit;
+		if (commit) {
+			ret = wait_for_completion_interruptible(&commit->hw_done);
+			if (ret)
+				goto out;
+		}
+
+		drm_connector_list_iter_begin(dev, &conn_iter);
+		drm_for_each_connector_iter(connector, &conn_iter) {
+			struct intel_encoder *encoder;
+			struct intel_dp *intel_dp;
+
+			if (!(crtc_state->uapi.connector_mask &
+			      drm_connector_mask(connector)))
+				continue;
+
+			encoder = intel_attached_encoder(to_intel_connector(connector));
+			if (encoder->type != INTEL_OUTPUT_EDP)
+				continue;
+
+			drm_dbg(&dev_priv->drm,
+				"Manually %sabling DRRS. %llu\n",
+				val ? "en" : "dis", val);
+
+			intel_dp = enc_to_intel_dp(encoder);
+			if (val)
+				intel_edp_drrs_enable(intel_dp,
+						      crtc_state);
+			else
+				intel_edp_drrs_disable(intel_dp,
+						       crtc_state);
+		}
+		drm_connector_list_iter_end(&conn_iter);
+
+out:
+		drm_modeset_unlock(&crtc->base.mutex);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+			       const char __user *ubuf,
+			       size_t cnt, loff_t *ppos)
+{
+	struct drm_i915_private *dev_priv = filp->private_data;
+	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = &dev_priv->drm;
+	int ret;
+	bool reset;
+
+	ret = kstrtobool_from_user(ubuf, cnt, &reset);
+	if (ret)
+		return ret;
+
+	if (!reset)
+		return cnt;
+
+	for_each_intel_crtc(dev, intel_crtc) {
+		struct drm_crtc_commit *commit;
+		struct intel_crtc_state *crtc_state;
+
+		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+		if (ret)
+			return ret;
+
+		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+		commit = crtc_state->uapi.commit;
+		if (commit) {
+			ret = wait_for_completion_interruptible(&commit->hw_done);
+			if (!ret)
+				ret = wait_for_completion_interruptible(&commit->flip_done);
+		}
+
+		if (!ret && crtc_state->hw.active) {
+			drm_dbg_kms(&dev_priv->drm,
+				    "Re-arming FIFO underruns on pipe %c\n",
+				    pipe_name(intel_crtc->pipe));
+
+			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+		}
+
+		drm_modeset_unlock(&intel_crtc->base.mutex);
+
+		if (ret)
+			return ret;
+	}
+
+	ret = intel_fbc_reset_underrun(dev_priv);
+	if (ret)
+		return ret;
+
+	return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = i915_fifo_underrun_reset_write,
+	.llseek = default_llseek,
+};
+
+static const struct drm_info_list intel_display_debugfs_list[] = {
+	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
+	{"i915_fbc_status", i915_fbc_status, 0},
+	{"i915_ips_status", i915_ips_status, 0},
+	{"i915_sr_status", i915_sr_status, 0},
+	{"i915_opregion", i915_opregion, 0},
+	{"i915_vbt", i915_vbt, 0},
+	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+	{"i915_edp_psr_status", i915_edp_psr_status, 0},
+	{"i915_power_domain_info", i915_power_domain_info, 0},
+	{"i915_dmc_info", i915_dmc_info, 0},
+	{"i915_display_info", i915_display_info, 0},
+	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+	{"i915_dp_mst_info", i915_dp_mst_info, 0},
+	{"i915_ddb_info", i915_ddb_info, 0},
+	{"i915_drrs_status", i915_drrs_status, 0},
+};
+
+static const struct {
+	const char *name;
+	const struct file_operations *fops;
+} intel_display_debugfs_files[] = {
+	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
+	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
+	{"i915_dp_test_data", &i915_displayport_test_data_fops},
+	{"i915_dp_test_type", &i915_displayport_test_type_fops},
+	{"i915_dp_test_active", &i915_displayport_test_active_fops},
+	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
+	{"i915_ipc_status", &i915_ipc_status_fops},
+	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
+	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
+};
+
+int intel_display_debugfs_register(struct drm_i915_private *i915)
+{
+	struct drm_minor *minor = i915->drm.primary;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
+		debugfs_create_file(intel_display_debugfs_files[i].name,
+				    S_IRUGO | S_IWUSR,
+				    minor->debugfs_root,
+				    to_i915(minor->dev),
+				    intel_display_debugfs_files[i].fops);
+	}
+
+	return drm_debugfs_create_files(intel_display_debugfs_list,
+					ARRAY_SIZE(intel_display_debugfs_list),
+					minor->debugfs_root, minor);
+}
+
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct intel_dp *intel_dp =
+		intel_attached_dp(to_intel_connector(connector));
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	seq_printf(m, "Panel power up delay: %d\n",
+		   intel_dp->panel_power_up_delay);
+	seq_printf(m, "Panel power down delay: %d\n",
+		   intel_dp->panel_power_down_delay);
+	seq_printf(m, "Backlight on delay: %d\n",
+		   intel_dp->backlight_on_delay);
+	seq_printf(m, "Backlight off delay: %d\n",
+		   intel_dp->backlight_off_delay);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
+
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	if (connector->status != connector_status_connected)
+		return -ENODEV;
+
+	/* HDCP is supported by connector */
+	if (!intel_connector->hdcp.shim)
+		return -EINVAL;
+
+	seq_printf(m, "%s:%d HDCP version: ", connector->name,
+		   connector->base.id);
+	intel_hdcp_info(m, intel_connector);
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+	struct drm_connector *connector = m->private;
+	struct drm_device *dev = connector->dev;
+	struct drm_crtc *crtc;
+	struct intel_dp *intel_dp;
+	struct drm_modeset_acquire_ctx ctx;
+	struct intel_crtc_state *crtc_state = NULL;
+	int ret = 0;
+	bool try_again = false;
+
+	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+	do {
+		try_again = false;
+		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+				       &ctx);
+		if (ret) {
+			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+				try_again = true;
+				continue;
+			}
+			break;
+		}
+		crtc = connector->state->crtc;
+		if (connector->status != connector_status_connected || !crtc) {
+			ret = -ENODEV;
+			break;
+		}
+		ret = drm_modeset_lock(&crtc->mutex, &ctx);
+		if (ret == -EDEADLK) {
+			ret = drm_modeset_backoff(&ctx);
+			if (!ret) {
+				try_again = true;
+				continue;
+			}
+			break;
+		} else if (ret) {
+			break;
+		}
+		intel_dp = intel_attached_dp(to_intel_connector(connector));
+		crtc_state = to_intel_crtc_state(crtc->state);
+		seq_printf(m, "DSC_Enabled: %s\n",
+			   yesno(crtc_state->dsc.compression_enable));
+		seq_printf(m, "DSC_Sink_Support: %s\n",
+			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+		seq_printf(m, "Force_DSC_Enable: %s\n",
+			   yesno(intel_dp->force_dsc_en));
+		if (!intel_dp_is_edp(intel_dp))
+			seq_printf(m, "FEC_Sink_Support: %s\n",
+				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+	} while (try_again);
+
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t len, loff_t *offp)
+{
+	bool dsc_enable = false;
+	int ret;
+	struct drm_connector *connector =
+		((struct seq_file *)file->private_data)->private;
+	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+	if (len == 0)
+		return 0;
+
+	drm_dbg(&i915->drm,
+		"Copied %zu bytes from user to force DSC\n", len);
+
+	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+	if (ret < 0)
+		return ret;
+
+	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
+		(dsc_enable) ? "true" : "false");
+	intel_dp->force_dsc_en = dsc_enable;
+
+	*offp += len;
+	return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+				     struct file *file)
+{
+	return single_open(file, i915_dsc_fec_support_show,
+			   inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_dsc_fec_support_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_dsc_fec_support_write
+};
+
+/**
+ * intel_connector_debugfs_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ *
+ * Returns 0 on success, negative error codes on error.
+ */
+int intel_connector_debugfs_add(struct drm_connector *connector)
+{
+	struct dentry *root = connector->debugfs_entry;
+	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+	/* The connector must have been registered beforehands. */
+	if (!root)
+		return -ENODEV;
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+				    connector, &i915_panel_fops);
+		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+				    connector, &i915_psr_sink_status_fops);
+	}
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+				    connector, &i915_hdcp_sink_capability_fops);
+	}
+
+	if (INTEL_GEN(dev_priv) >= 10 &&
+	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+				    connector, &i915_dsc_fec_support_fops);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
new file mode 100644
index 0000000..a3bea1c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_H__
+#define __INTEL_DISPLAY_DEBUGFS_H__
+
+struct drm_connector;
+struct drm_i915_private;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_display_debugfs_register(struct drm_i915_private *i915);
+int intel_connector_debugfs_add(struct drm_connector *connector);
+#else
+static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+#endif
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 46c40db..246e406 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -15,6 +15,7 @@
 #include "intel_display_types.h"
 #include "intel_dpio_phy.h"
 #include "intel_hotplug.h"
+#include "intel_pm.h"
 #include "intel_sideband.h"
 #include "intel_tc.h"
 #include "intel_vga.h"
@@ -159,7 +160,7 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 				    struct i915_power_well *power_well)
 {
-	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
 	power_well->desc->ops->enable(dev_priv, power_well);
 	power_well->hw_enabled = true;
 }
@@ -167,7 +168,7 @@ static void intel_power_well_enable(struct drm_i915_private *dev_priv,
 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
 				     struct i915_power_well *power_well)
 {
-	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
 	power_well->hw_enabled = false;
 	power_well->desc->ops->disable(dev_priv, power_well);
 }
@@ -182,8 +183,9 @@ static void intel_power_well_get(struct drm_i915_private *dev_priv,
 static void intel_power_well_put(struct drm_i915_private *dev_priv,
 				 struct i915_power_well *power_well)
 {
-	WARN(!power_well->count, "Use count on power well %s is already zero",
-	     power_well->desc->name);
+	drm_WARN(&dev_priv->drm, !power_well->count,
+		 "Use count on power well %s is already zero",
+		 power_well->desc->name);
 
 	if (!--power_well->count)
 		intel_power_well_disable(dev_priv, power_well);
@@ -289,11 +291,11 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
 	if (intel_de_wait_for_set(dev_priv, regs->driver,
 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
-		DRM_DEBUG_KMS("%s power well enable timeout\n",
-			      power_well->desc->name);
+		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+			    power_well->desc->name);
 
 		/* An AUX timeout is expected if the TBT DP tunnel is down. */
-		WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+		drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
 	}
 }
 
@@ -304,11 +306,11 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
 	u32 ret;
 
-	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
-	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
 	if (regs->kvmr.reg)
-		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
-	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
 
 	return ret;
 }
@@ -330,23 +332,25 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
 	 * Skip the wait in case any of the request bits are set and print a
 	 * diagnostic message.
 	 */
-	wait_for((disabled = !(I915_READ(regs->driver) &
+	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
 	if (disabled)
 		return;
 
-	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
-		      power_well->desc->name,
-		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+	drm_dbg_kms(&dev_priv->drm,
+		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+		    power_well->desc->name,
+		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
 }
 
 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
 					   enum skl_power_gate pg)
 {
 	/* Timeout 5us for PG#0, for other PGs 1us */
-	WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
-				      SKL_FUSE_PG_DIST_STATUS(pg), 1));
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
 }
 
 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -372,17 +376,18 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
 	}
 
-	val = I915_READ(regs->driver);
-	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+	val = intel_de_read(dev_priv, regs->driver);
+	intel_de_write(dev_priv, regs->driver,
+		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 	hsw_wait_for_power_well_enable(dev_priv, power_well);
 
 	/* Display WA #1178: cnl */
 	if (IS_CANNONLAKE(dev_priv) &&
 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
-		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+		val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
-		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+		intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
 	}
 
 	if (wait_fuses)
@@ -403,8 +408,9 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
 	hsw_power_well_pre_disable(dev_priv,
 				   power_well->desc->hsw.irq_pipe_mask);
 
-	val = I915_READ(regs->driver);
-	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+	val = intel_de_read(dev_priv, regs->driver);
+	intel_de_write(dev_priv, regs->driver,
+		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 	hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
 
@@ -419,14 +425,16 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 	u32 val;
 
-	WARN_ON(!IS_ICELAKE(dev_priv));
+	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 
-	val = I915_READ(regs->driver);
-	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+	val = intel_de_read(dev_priv, regs->driver);
+	intel_de_write(dev_priv, regs->driver,
+		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
 
 	if (INTEL_GEN(dev_priv) < 12) {
-		val = I915_READ(ICL_PORT_CL_DW12(phy));
-		I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+			       val | ICL_LANE_ENABLE_AUX);
 	}
 
 	hsw_wait_for_power_well_enable(dev_priv, power_well);
@@ -434,9 +442,9 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 	/* Display WA #1178: icl */
 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
-		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
-		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
 	}
 }
 
@@ -449,13 +457,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
 	u32 val;
 
-	WARN_ON(!IS_ICELAKE(dev_priv));
+	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 
-	val = I915_READ(ICL_PORT_CL_DW12(phy));
-	I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
+	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+		       val & ~ICL_LANE_ENABLE_AUX);
 
-	val = I915_READ(regs->driver);
-	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+	val = intel_de_read(dev_priv, regs->driver);
+	intel_de_write(dev_priv, regs->driver,
+		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
 
 	hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
@@ -485,7 +495,7 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
 	int refs = hweight64(power_well->desc->domains &
 			     async_put_domains_mask(&dev_priv->power_domains));
 
-	WARN_ON(refs > power_well->count);
+	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
 
 	return refs;
 }
@@ -515,7 +525,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 			continue;
 
 		dig_port = enc_to_dig_port(encoder);
-		if (WARN_ON(!dig_port))
+		if (drm_WARN_ON(&dev_priv->drm, !dig_port))
 			continue;
 
 		if (dig_port->aux_ch != aux_ch) {
@@ -526,10 +536,10 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
 		break;
 	}
 
-	if (WARN_ON(!dig_port))
+	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
 		return;
 
-	WARN_ON(!intel_tc_port_ref_held(dig_port));
+	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
 }
 
 #else
@@ -552,11 +562,11 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 
 	icl_tc_port_assert_ref_held(dev_priv, power_well);
 
-	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
 	val &= ~DP_AUX_CH_CTL_TBT_IO;
 	if (power_well->desc->hsw.is_tc_tbt)
 		val |= DP_AUX_CH_CTL_TBT_IO;
-	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
 
 	hsw_power_well_enable(dev_priv, power_well);
 
@@ -564,11 +574,13 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
 		enum tc_port tc_port;
 
 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
-		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+			       HIP_INDEX_VAL(tc_port, 0x2));
 
 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
-			DRM_WARN("Timeout waiting TC uC health\n");
+			drm_warn(&dev_priv->drm,
+				 "Timeout waiting TC uC health\n");
 	}
 }
 
@@ -596,7 +608,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
 	u32 val;
 
-	val = I915_READ(regs->driver);
+	val = intel_de_read(dev_priv, regs->driver);
 
 	/*
 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -606,22 +618,26 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
 	 */
 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
-		val |= I915_READ(regs->bios);
+		val |= intel_de_read(dev_priv, regs->bios);
 
 	return (val & mask) == mask;
 }
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 {
-	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
-		  "DC9 already programmed to be enabled.\n");
-	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-		  "DC5 still not disabled to enable DC9.\n");
-	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
-		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
-		  "Power well 2 on.\n");
-	WARN_ONCE(intel_irqs_enabled(dev_priv),
-		  "Interrupts not disabled yet.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+		      "DC9 already programmed to be enabled.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      intel_de_read(dev_priv, DC_STATE_EN) &
+		      DC_STATE_EN_UPTO_DC5,
+		      "DC5 still not disabled to enable DC9.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+		      "Power well 2 on.\n");
+	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+		      "Interrupts not disabled yet.\n");
 
 	 /*
 	  * TODO: check for the following to verify the conditions to enter DC9
@@ -634,10 +650,12 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 
 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
 {
-	WARN_ONCE(intel_irqs_enabled(dev_priv),
-		  "Interrupts not disabled yet.\n");
-	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
-		  "DC5 still not disabled.\n");
+	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+		      "Interrupts not disabled yet.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      intel_de_read(dev_priv, DC_STATE_EN) &
+		      DC_STATE_EN_UPTO_DC5,
+		      "DC5 still not disabled.\n");
 
 	 /*
 	  * TODO: check for the following to verify DC9 state was indeed
@@ -655,7 +673,7 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 	int rereads = 0;
 	u32 v;
 
-	I915_WRITE(DC_STATE_EN, state);
+	intel_de_write(dev_priv, DC_STATE_EN, state);
 
 	/* It has been observed that disabling the dc6 state sometimes
 	 * doesn't stick and dmc keeps returning old value. Make sure
@@ -663,10 +681,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 	 * we are confident that state is exactly what we want.
 	 */
 	do  {
-		v = I915_READ(DC_STATE_EN);
+		v = intel_de_read(dev_priv, DC_STATE_EN);
 
 		if (v != state) {
-			I915_WRITE(DC_STATE_EN, state);
+			intel_de_write(dev_priv, DC_STATE_EN, state);
 			rewrites++;
 			rereads = 0;
 		} else if (rereads++ > 5) {
@@ -676,13 +694,15 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
 	} while (rewrites < 100);
 
 	if (v != state)
-		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
-			  state, v);
+		drm_err(&dev_priv->drm,
+			"Writing dc state to 0x%x failed, now 0x%x\n",
+			state, v);
 
 	/* Most of the times we need one retry, avoid spam */
 	if (rewrites > 1)
-		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
-			      state, rewrites);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Rewrote dc state to 0x%x %d times\n",
+			    state, rewrites);
 }
 
 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
@@ -708,10 +728,11 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
 
-	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
-		      dev_priv->csr.dc_state, val);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Resetting DC state tracking from %02x to %02x\n",
+		    dev_priv->csr.dc_state, val);
 	dev_priv->csr.dc_state = val;
 }
 
@@ -743,18 +764,19 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
 	u32 val;
 	u32 mask;
 
-	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm,
+			     state & ~dev_priv->csr.allowed_dc_mask))
 		state &= dev_priv->csr.allowed_dc_mask;
 
-	val = I915_READ(DC_STATE_EN);
+	val = intel_de_read(dev_priv, DC_STATE_EN);
 	mask = gen9_dc_mask(dev_priv);
-	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
-		      val & mask, state);
+	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+		    val & mask, state);
 
 	/* Check if DMC is ignoring our DC state requests */
 	if ((val & mask) != dev_priv->csr.dc_state)
-		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
-			  dev_priv->csr.dc_state, val & mask);
+		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+			dev_priv->csr.dc_state, val & mask);
 
 	val &= ~mask;
 	val |= state;
@@ -791,7 +813,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
 
 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
 {
-	DRM_DEBUG_KMS("Enabling DC3CO\n");
+	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 }
 
@@ -799,10 +821,10 @@ static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	DRM_DEBUG_KMS("Disabling DC3CO\n");
-	val = I915_READ(DC_STATE_EN);
+	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+	val = intel_de_read(dev_priv, DC_STATE_EN);
 	val &= ~DC_STATE_DC3CO_STATUS;
-	I915_WRITE(DC_STATE_EN, val);
+	intel_de_write(dev_priv, DC_STATE_EN, val);
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 	/*
 	 * Delay of 200us DC3CO Exit time B.Spec 49196
@@ -814,7 +836,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc9(dev_priv);
 
-	DRM_DEBUG_KMS("Enabling DC9\n");
+	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
 	/*
 	 * Power sequencer reset is not needed on
 	 * platforms with South Display Engine on PCH,
@@ -829,7 +851,7 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 {
 	assert_can_disable_dc9(dev_priv);
 
-	DRM_DEBUG_KMS("Disabling DC9\n");
+	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
@@ -838,10 +860,13 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
 
 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
 {
-	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
-		  "CSR program storage start is NULL\n");
-	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
-	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      !intel_de_read(dev_priv, CSR_PROGRAM(0)),
+		      "CSR program storage start is NULL\n");
+	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
+		      "CSR SSP Base Not fine\n");
+	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
+		      "CSR HTP Not fine\n");
 }
 
 static struct i915_power_well *
@@ -861,7 +886,9 @@ lookup_power_well(struct drm_i915_private *dev_priv,
 	 * the first power well and hope the WARN gets reported so we can fix
 	 * our driver.
 	 */
-	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+	drm_WARN(&dev_priv->drm, 1,
+		 "Power well %d not defined for this platform\n",
+		 power_well_id);
 	return &dev_priv->power_domains.power_wells[0];
 }
 
@@ -884,7 +911,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 	mutex_lock(&power_domains->lock);
 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
 
-	if (WARN_ON(!power_well))
+	if (drm_WARN_ON(&dev_priv->drm, !power_well))
 		goto unlock;
 
 	state = sanitize_target_dc_state(dev_priv, state);
@@ -912,13 +939,22 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
 
 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
 {
-	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
-					SKL_DISP_PW_2);
+	enum i915_power_well_id high_pg;
 
-	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+	/* Power wells at this level and above must be disabled for DC5 entry */
+	if (INTEL_GEN(dev_priv) >= 12)
+		high_pg = TGL_DISP_PW_3;
+	else
+		high_pg = SKL_DISP_PW_2;
 
-	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
-		  "DC5 already programmed to be enabled.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      intel_display_power_well_is_enabled(dev_priv, high_pg),
+		      "Power wells above platform's DC5 limit still enabled.\n");
+
+	drm_WARN_ONCE(&dev_priv->drm,
+		      (intel_de_read(dev_priv, DC_STATE_EN) &
+		       DC_STATE_EN_UPTO_DC5),
+		      "DC5 already programmed to be enabled.\n");
 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
 
 	assert_csr_loaded(dev_priv);
@@ -928,22 +964,25 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc5(dev_priv);
 
-	DRM_DEBUG_KMS("Enabling DC5\n");
+	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
 
 	/* Wa Display #1183: skl,kbl,cfl */
 	if (IS_GEN9_BC(dev_priv))
-		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-			   SKL_SELECT_ALTERNATE_DC_EXIT);
+		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
 
 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
 }
 
 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 {
-	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
-		  "Backlight is not disabled.\n");
-	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
-		  "DC6 already programmed to be enabled.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+		      "Backlight is not disabled.\n");
+	drm_WARN_ONCE(&dev_priv->drm,
+		      (intel_de_read(dev_priv, DC_STATE_EN) &
+		       DC_STATE_EN_UPTO_DC6),
+		      "DC6 already programmed to be enabled.\n");
 
 	assert_csr_loaded(dev_priv);
 }
@@ -952,12 +991,12 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
 {
 	assert_can_enable_dc6(dev_priv);
 
-	DRM_DEBUG_KMS("Enabling DC6\n");
+	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
 
 	/* Wa Display #1183: skl,kbl,cfl */
 	if (IS_GEN9_BC(dev_priv))
-		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-			   SKL_SELECT_ALTERNATE_DC_EXIT);
+		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
 
 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 }
@@ -968,15 +1007,15 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
 	int pw_idx = power_well->desc->hsw.idx;
 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
-	u32 bios_req = I915_READ(regs->bios);
+	u32 bios_req = intel_de_read(dev_priv, regs->bios);
 
 	/* Take over the request bit if set by BIOS. */
 	if (bios_req & mask) {
-		u32 drv_req = I915_READ(regs->driver);
+		u32 drv_req = intel_de_read(dev_priv, regs->driver);
 
 		if (!(drv_req & mask))
-			I915_WRITE(regs->driver, drv_req | mask);
-		I915_WRITE(regs->bios, bios_req & ~mask);
+			intel_de_write(dev_priv, regs->driver, drv_req | mask);
+		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
 	}
 }
 
@@ -1022,22 +1061,25 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
-	return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
-		(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
 }
 
 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
 {
-	u32 tmp = I915_READ(DBUF_CTL);
+	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+	u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
 
-	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
-	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
-	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
+	drm_WARN(&dev_priv->drm,
+		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
+		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+		 hw_enabled_dbuf_slices,
+		 enabled_dbuf_slices);
 }
 
 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
 {
-	struct intel_cdclk_state cdclk_state = {};
+	struct intel_cdclk_config cdclk_config = {};
 
 	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
 		tgl_disable_dc3co(dev_priv);
@@ -1046,9 +1088,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
 
 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
-	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
-	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+					      &cdclk_config));
 
 	gen9_assert_dbuf_enabled(dev_priv);
 
@@ -1108,9 +1152,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
 					 struct i915_power_well *power_well)
 {
-	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
 		i830_enable_pipe(dev_priv, PIPE_A);
-	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
 		i830_enable_pipe(dev_priv, PIPE_B);
 }
 
@@ -1124,8 +1168,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
 					  struct i915_power_well *power_well)
 {
-	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
-		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
 }
 
 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1163,9 +1207,10 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 
 	if (wait_for(COND, 100))
-		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
-			  state,
-			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+		drm_err(&dev_priv->drm,
+			"timeout setting power well state %08x (%08x)\n",
+			state,
+			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 
 #undef COND
 
@@ -1204,8 +1249,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 	 * We only ever set the power-on and power-gate states, anything
 	 * else is unexpected.
 	 */
-	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
-		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
 	if (state == ctrl)
 		enabled = true;
 
@@ -1214,7 +1259,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 	 * is poking at the power controls too.
 	 */
 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
-	WARN_ON(ctrl != state);
+	drm_WARN_ON(&dev_priv->drm, ctrl != state);
 
 	vlv_punit_put(dev_priv);
 
@@ -1231,21 +1276,22 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
 	 * (and never recovering) in this case. intel_dsi_post_disable() will
 	 * clear it when we turn off the display.
 	 */
-	val = I915_READ(DSPCLK_GATE_D);
+	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
-	I915_WRITE(DSPCLK_GATE_D, val);
+	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
 
 	/*
 	 * Disable trickle feed and enable pnd deadline calculation
 	 */
-	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-	I915_WRITE(CBR1_VLV, 0);
+	intel_de_write(dev_priv, MI_ARB_VLV,
+		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+	intel_de_write(dev_priv, CBR1_VLV, 0);
 
-	WARN_ON(dev_priv->rawclk_freq == 0);
-
-	I915_WRITE(RAWCLK_FREQ_VLV,
-		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+					 1000));
 }
 
 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -1262,13 +1308,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
 	 */
 	for_each_pipe(dev_priv, pipe) {
-		u32 val = I915_READ(DPLL(pipe));
+		u32 val = intel_de_read(dev_priv, DPLL(pipe));
 
 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 		if (pipe != PIPE_A)
 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
-		I915_WRITE(DPLL(pipe), val);
+		intel_de_write(dev_priv, DPLL(pipe), val);
 	}
 
 	vlv_init_display_clock_gating(dev_priv);
@@ -1348,7 +1394,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	 * both PLLs disabled, or we risk losing DPIO and PLL
 	 * synchronization.
 	 */
-	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+	intel_de_write(dev_priv, DPIO_CTL,
+		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
 }
 
 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1360,7 +1407,8 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 		assert_pll_disabled(dev_priv, pipe);
 
 	/* Assert common reset */
-	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+	intel_de_write(dev_priv, DPIO_CTL,
+		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
 
 	vlv_set_power_well(dev_priv, power_well, false);
 }
@@ -1422,7 +1470,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
 		 */
 		if (BITS_SET(phy_control,
 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
-		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
 
 		if (BITS_SET(phy_control,
@@ -1467,9 +1515,10 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
 	 */
 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
 				       phy_status_mask, phy_status, 10))
-		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
-			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
-			   phy_status, dev_priv->chv_phy_control);
+		drm_err(&dev_priv->drm,
+			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+			phy_status, dev_priv->chv_phy_control);
 }
 
 #undef BITS_SET
@@ -1481,8 +1530,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	enum pipe pipe;
 	u32 tmp;
 
-	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
-		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+	drm_WARN_ON_ONCE(&dev_priv->drm,
+			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
 
 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
 		pipe = PIPE_A;
@@ -1499,7 +1549,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	/* Poll for phypwrgood signal */
 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
 				  PHY_POWERGOOD(phy), 1))
-		DRM_ERROR("Display PHY %d is not power up\n", phy);
+		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+			phy);
 
 	vlv_dpio_get(dev_priv);
 
@@ -1527,10 +1578,12 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 	vlv_dpio_put(dev_priv);
 
 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+		       dev_priv->chv_phy_control);
 
-	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
-		      phy, dev_priv->chv_phy_control);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+		    phy, dev_priv->chv_phy_control);
 
 	assert_chv_phy_status(dev_priv);
 }
@@ -1540,8 +1593,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 {
 	enum dpio_phy phy;
 
-	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
-		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+	drm_WARN_ON_ONCE(&dev_priv->drm,
+			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
 
 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
 		phy = DPIO_PHY0;
@@ -1553,12 +1607,14 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
 	}
 
 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+		       dev_priv->chv_phy_control);
 
 	vlv_set_power_well(dev_priv, power_well, false);
 
-	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
-		      phy, dev_priv->chv_phy_control);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+		    phy, dev_priv->chv_phy_control);
 
 	/* PHY is fully reset now, so we can enable the PHY state asserts */
 	dev_priv->chv_phy_assert[phy] = true;
@@ -1621,11 +1677,13 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
 
-	WARN(actual != expected,
-	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
-	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
-	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
-	     reg, val);
+	drm_WARN(&dev_priv->drm, actual != expected,
+		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+		 !!(actual & DPIO_ALLDL_POWERDOWN),
+		 !!(actual & DPIO_ANYDL_POWERDOWN),
+		 !!(expected & DPIO_ALLDL_POWERDOWN),
+		 !!(expected & DPIO_ANYDL_POWERDOWN),
+		 reg, val);
 }
 
 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
@@ -1646,10 +1704,12 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 	else
 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
 
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+		       dev_priv->chv_phy_control);
 
-	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
-		      phy, ch, dev_priv->chv_phy_control);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+		    phy, ch, dev_priv->chv_phy_control);
 
 	assert_chv_phy_status(dev_priv);
 
@@ -1677,10 +1737,12 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
 	else
 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
 
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+		       dev_priv->chv_phy_control);
 
-	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
-		      phy, ch, mask, dev_priv->chv_phy_control);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+		    phy, ch, mask, dev_priv->chv_phy_control);
 
 	assert_chv_phy_status(dev_priv);
 
@@ -1703,7 +1765,8 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
 	 * We only ever set the power-on and power-gate states, anything
 	 * else is unexpected.
 	 */
-	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+		    state != DP_SSS_PWR_GATE(pipe));
 	enabled = state == DP_SSS_PWR_ON(pipe);
 
 	/*
@@ -1711,7 +1774,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
 	 * is poking at the power controls too.
 	 */
 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
-	WARN_ON(ctrl << 16 != state);
+	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
 
 	vlv_punit_put(dev_priv);
 
@@ -1742,9 +1805,10 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
 
 	if (wait_for(COND, 100))
-		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
-			  state,
-			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+		drm_err(&dev_priv->drm,
+			"timeout setting power well state %08x (%08x)\n",
+			state,
+			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
 
 #undef COND
 
@@ -1752,6 +1816,13 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
 	vlv_punit_put(dev_priv);
 }
 
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+					struct i915_power_well *power_well)
+{
+	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+		       dev_priv->chv_phy_control);
+}
+
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
@@ -1981,12 +2052,13 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
 
 	power_domains = &dev_priv->power_domains;
 
-	WARN(!power_domains->domain_use_count[domain],
-	     "Use count on domain %s is already zero\n",
-	     name);
-	WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
-	     "Async disabling of domain %s is pending\n",
-	     name);
+	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+		 "Use count on domain %s is already zero\n",
+		 name);
+	drm_WARN(&dev_priv->drm,
+		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
+		 "Async disabling of domain %s is pending\n",
+		 name);
 
 	power_domains->domain_use_count[domain]--;
 
@@ -2131,7 +2203,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
 		goto out_verify;
 	}
 
-	WARN_ON(power_domains->domain_use_count[domain] != 1);
+	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
 
 	/* Let a pending work requeue itself or queue a new one. */
 	if (power_domains->async_put_wakeref) {
@@ -2206,7 +2278,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
 
 	verify_async_put_domains_state(power_domains);
 
-	WARN_ON(power_domains->async_put_wakeref);
+	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2674,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 	BIT_ULL(POWER_DOMAIN_INIT))
 
 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
-	TGL_PW_2_POWER_DOMAINS |			\
+	TGL_PW_3_POWER_DOMAINS |			\
 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
@@ -2734,7 +2806,7 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
 };
 
 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
-	.sync_hw = i9xx_power_well_sync_hw_noop,
+	.sync_hw = chv_pipe_power_well_sync_hw,
 	.enable = chv_pipe_power_well_enable,
 	.disable = chv_pipe_power_well_disable,
 	.is_enabled = chv_pipe_power_well_enabled,
@@ -3870,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
 		.name = "power well 3",
 		.domains = TGL_PW_3_POWER_DOMAINS,
 		.ops = &hsw_power_well_ops,
-		.id = DISP_PW_ID_NONE,
+		.id = TGL_DISP_PW_3,
 		{
 			.hsw.regs = &hsw_power_well_regs,
 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4204,11 +4276,13 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
 	} else if (enable_dc == -1) {
 		requested_dc = max_dc;
 	} else if (enable_dc > max_dc && enable_dc <= 4) {
-		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
-			      enable_dc, max_dc);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Adjusting requested max DC state (%d->%d)\n",
+			    enable_dc, max_dc);
 		requested_dc = max_dc;
 	} else {
-		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+		drm_err(&dev_priv->drm,
+			"Unexpected value for enable_dc (%d)\n", enable_dc);
 		requested_dc = max_dc;
 	}
 
@@ -4227,7 +4301,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
 		break;
 	}
 
-	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
 
 	return mask;
 }
@@ -4371,16 +4445,16 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
 {
 	u32 val, status;
 
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 	udelay(10);
 
-	status = I915_READ(reg) & DBUF_POWER_STATE;
+	status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
 	if ((enable && !status) || (!enable && status)) {
-		DRM_ERROR("DBus power %s timeout!\n",
-			  enable ? "enable" : "disable");
+		drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
+			enable ? "enable" : "disable");
 		return false;
 	}
 	return true;
@@ -4388,80 +4462,60 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
 
 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
 {
-	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+	icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
 }
 
 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
 {
-	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_GEN(dev_priv) < 11)
-		return 1;
-	return 2;
+	icl_dbuf_slices_update(dev_priv, 0);
 }
 
 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 			    u8 req_slices)
 {
-	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-	bool ret;
+	int i;
+	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
-	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
-		DRM_ERROR("Invalid number of dbuf slices requested\n");
-		return;
+	drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
+		 "Invalid number of dbuf slices requested\n");
+
+	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
+
+	/*
+	 * Might be running this in parallel to gen9_dc_off_power_well_enable
+	 * being called from intel_dp_detect for instance,
+	 * which causes assertion triggered by race condition,
+	 * as gen9_assert_dbuf_enabled might preempt this when registers
+	 * were already updated, while dev_priv was not.
+	 */
+	mutex_lock(&power_domains->lock);
+
+	for (i = 0; i < max_slices; i++) {
+		intel_dbuf_slice_set(dev_priv,
+				     DBUF_CTL_S(i),
+				     (req_slices & BIT(i)) != 0);
 	}
 
-	if (req_slices == hw_enabled_slices || req_slices == 0)
-		return;
+	dev_priv->enabled_dbuf_slices_mask = req_slices;
 
-	if (req_slices > hw_enabled_slices)
-		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
-	else
-		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
-
-	if (ret)
-		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+	mutex_unlock(&power_domains->lock);
 }
 
 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
-	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
-	POSTING_READ(DBUF_CTL_S2);
-
-	udelay(10);
-
-	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
-	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
-		DRM_ERROR("DBuf power enable timeout\n");
-	else
-		/*
-		 * FIXME: for now pretend that we only have 1 slice, see
-		 * intel_enabled_dbuf_slices_num().
-		 */
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+	skl_ddb_get_hw_state(dev_priv);
+	/*
+	 * Just power up at least 1 slice, we will
+	 * figure out later which slices we have and what we need.
+	 */
+	icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
+			       BIT(DBUF_S1));
 }
 
 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
-	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
-	POSTING_READ(DBUF_CTL_S2);
-
-	udelay(10);
-
-	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
-	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
-		DRM_ERROR("DBuf power disable timeout!\n");
-	else
-		/*
-		 * FIXME: for now pretend that the first slice is always
-		 * enabled, see intel_enabled_dbuf_slices_num().
-		 */
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+	icl_dbuf_slices_update(dev_priv, 0);
 }
 
 static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -4472,19 +4526,21 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
 		MBUS_ABOX_B_CREDIT_MASK |
 		MBUS_ABOX_BW_CREDIT_MASK;
-
-	val = I915_READ(MBUS_ABOX_CTL);
-	val &= ~mask;
-	val |= MBUS_ABOX_BT_CREDIT_POOL1(16) |
+	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
 		MBUS_ABOX_B_CREDIT(1) |
 		MBUS_ABOX_BW_CREDIT(1);
-	I915_WRITE(MBUS_ABOX_CTL, val);
+
+	intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
+	if (INTEL_GEN(dev_priv) >= 12) {
+		intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
+		intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
+	}
 }
 
 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
 {
-	u32 val = I915_READ(LCPLL_CTL);
+	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
 
 	/*
 	 * The LCPLL register should be turned on by the BIOS. For now
@@ -4493,13 +4549,13 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
 	 */
 
 	if (val & LCPLL_CD_SOURCE_FCLK)
-		DRM_ERROR("CDCLK source is not LCPLL\n");
+		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
 
 	if (val & LCPLL_PLL_DISABLE)
-		DRM_ERROR("LCPLL is disabled\n");
+		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
 
 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
-		DRM_ERROR("LCPLL not using non-SSC reference\n");
+		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
 }
 
 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -4511,26 +4567,26 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
 				pipe_name(crtc->pipe));
 
-	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
 			"Display power well on\n");
-	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
 			"SPLL enabled\n");
-	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
 			"WRPLL1 enabled\n");
-	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
 			"WRPLL2 enabled\n");
-	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
 			"Panel power on\n");
-	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
 			"CPU PWM1 enabled\n");
 	if (IS_HASWELL(dev_priv))
-		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
 				"CPU PWM2 enabled\n");
-	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
 			"PCH PWM1 enabled\n");
-	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
 			"Utility pin enabled\n");
-	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
 			"PCH GTC enabled\n");
 
 	/*
@@ -4545,9 +4601,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
 {
 	if (IS_HASWELL(dev_priv))
-		return I915_READ(D_COMP_HSW);
+		return intel_de_read(dev_priv, D_COMP_HSW);
 	else
-		return I915_READ(D_COMP_BDW);
+		return intel_de_read(dev_priv, D_COMP_BDW);
 }
 
 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
@@ -4555,10 +4611,11 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
 	if (IS_HASWELL(dev_priv)) {
 		if (sandybridge_pcode_write(dev_priv,
 					    GEN6_PCODE_WRITE_D_COMP, val))
-			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Failed to write to D_COMP\n");
 	} else {
-		I915_WRITE(D_COMP_BDW, val);
-		POSTING_READ(D_COMP_BDW);
+		intel_de_write(dev_priv, D_COMP_BDW, val);
+		intel_de_posting_read(dev_priv, D_COMP_BDW);
 	}
 }
 
@@ -4577,25 +4634,25 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 
 	assert_can_disable_lcpll(dev_priv);
 
-	val = I915_READ(LCPLL_CTL);
+	val = intel_de_read(dev_priv, LCPLL_CTL);
 
 	if (switch_to_fclk) {
 		val |= LCPLL_CD_SOURCE_FCLK;
-		I915_WRITE(LCPLL_CTL, val);
+		intel_de_write(dev_priv, LCPLL_CTL, val);
 
-		if (wait_for_us(I915_READ(LCPLL_CTL) &
+		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
-			DRM_ERROR("Switching to FCLK failed\n");
+			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
 
-		val = I915_READ(LCPLL_CTL);
+		val = intel_de_read(dev_priv, LCPLL_CTL);
 	}
 
 	val |= LCPLL_PLL_DISABLE;
-	I915_WRITE(LCPLL_CTL, val);
-	POSTING_READ(LCPLL_CTL);
+	intel_de_write(dev_priv, LCPLL_CTL, val);
+	intel_de_posting_read(dev_priv, LCPLL_CTL);
 
 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
-		DRM_ERROR("LCPLL still locked\n");
+		drm_err(&dev_priv->drm, "LCPLL still locked\n");
 
 	val = hsw_read_dcomp(dev_priv);
 	val |= D_COMP_COMP_DISABLE;
@@ -4604,13 +4661,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 
 	if (wait_for((hsw_read_dcomp(dev_priv) &
 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
-		DRM_ERROR("D_COMP RCOMP still in progress\n");
+		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
 
 	if (allow_power_down) {
-		val = I915_READ(LCPLL_CTL);
+		val = intel_de_read(dev_priv, LCPLL_CTL);
 		val |= LCPLL_POWER_DOWN_ALLOW;
-		I915_WRITE(LCPLL_CTL, val);
-		POSTING_READ(LCPLL_CTL);
+		intel_de_write(dev_priv, LCPLL_CTL, val);
+		intel_de_posting_read(dev_priv, LCPLL_CTL);
 	}
 }
 
@@ -4622,7 +4679,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	val = I915_READ(LCPLL_CTL);
+	val = intel_de_read(dev_priv, LCPLL_CTL);
 
 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -4636,8 +4693,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 
 	if (val & LCPLL_POWER_DOWN_ALLOW) {
 		val &= ~LCPLL_POWER_DOWN_ALLOW;
-		I915_WRITE(LCPLL_CTL, val);
-		POSTING_READ(LCPLL_CTL);
+		intel_de_write(dev_priv, LCPLL_CTL, val);
+		intel_de_posting_read(dev_priv, LCPLL_CTL);
 	}
 
 	val = hsw_read_dcomp(dev_priv);
@@ -4645,27 +4702,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 	val &= ~D_COMP_COMP_DISABLE;
 	hsw_write_dcomp(dev_priv, val);
 
-	val = I915_READ(LCPLL_CTL);
+	val = intel_de_read(dev_priv, LCPLL_CTL);
 	val &= ~LCPLL_PLL_DISABLE;
-	I915_WRITE(LCPLL_CTL, val);
+	intel_de_write(dev_priv, LCPLL_CTL, val);
 
 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
-		DRM_ERROR("LCPLL not locked yet\n");
+		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
 
 	if (val & LCPLL_CD_SOURCE_FCLK) {
-		val = I915_READ(LCPLL_CTL);
+		val = intel_de_read(dev_priv, LCPLL_CTL);
 		val &= ~LCPLL_CD_SOURCE_FCLK;
-		I915_WRITE(LCPLL_CTL, val);
+		intel_de_write(dev_priv, LCPLL_CTL, val);
 
-		if (wait_for_us((I915_READ(LCPLL_CTL) &
+		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
-			DRM_ERROR("Switching back to LCPLL failed\n");
+			drm_err(&dev_priv->drm,
+				"Switching back to LCPLL failed\n");
 	}
 
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
 	intel_update_cdclk(dev_priv);
-	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
 }
 
 /*
@@ -4695,12 +4753,12 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	DRM_DEBUG_KMS("Enabling package C8+\n");
+	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
 
 	if (HAS_PCH_LPT_LP(dev_priv)) {
-		val = I915_READ(SOUTH_DSPCLK_GATE_D);
+		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
-		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
 	}
 
 	lpt_disable_clkout_dp(dev_priv);
@@ -4711,15 +4769,15 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 {
 	u32 val;
 
-	DRM_DEBUG_KMS("Disabling package C8+\n");
+	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
 
 	hsw_restore_lcpll(dev_priv);
 	intel_init_pch_refclk(dev_priv);
 
 	if (HAS_PCH_LPT_LP(dev_priv)) {
-		val = I915_READ(SOUTH_DSPCLK_GATE_D);
+		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
-		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
 	}
 }
 
@@ -4737,14 +4795,14 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
 	}
 
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 
 	if (enable)
 		val |= reset_bits;
 	else
 		val &= ~reset_bits;
 
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 }
 
 static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -4769,7 +4827,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
 
 	mutex_unlock(&power_domains->lock);
 
-	intel_cdclk_init(dev_priv);
+	intel_cdclk_init_hw(dev_priv);
 
 	gen9_dbuf_enable(dev_priv);
 
@@ -4786,7 +4844,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 
 	gen9_dbuf_disable(dev_priv);
 
-	intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit_hw(dev_priv);
 
 	/* The spec doesn't call for removing the reset handshake flag */
 	/* disable PG1 and Misc I/O */
@@ -4830,7 +4888,7 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
 
 	mutex_unlock(&power_domains->lock);
 
-	intel_cdclk_init(dev_priv);
+	intel_cdclk_init_hw(dev_priv);
 
 	gen9_dbuf_enable(dev_priv);
 
@@ -4847,7 +4905,7 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
 
 	gen9_dbuf_disable(dev_priv);
 
-	intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit_hw(dev_priv);
 
 	/* The spec doesn't call for removing the reset handshake flag */
 
@@ -4889,7 +4947,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
 	mutex_unlock(&power_domains->lock);
 
 	/* 5. Enable CD clock */
-	intel_cdclk_init(dev_priv);
+	intel_cdclk_init_hw(dev_priv);
 
 	/* 6. Enable DBUF */
 	gen9_dbuf_enable(dev_priv);
@@ -4911,7 +4969,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 	gen9_dbuf_disable(dev_priv);
 
 	/* 3. Disable CD clock */
-	intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit_hw(dev_priv);
 
 	/*
 	 * 4. Disable Power Well 1 (PG1).
@@ -4970,25 +5028,23 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
 			break;
 
 	if (table[i].page_mask == 0) {
-		DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
-		I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
-		I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+		drm_dbg(&dev_priv->drm,
+			"Unknown memory configuration; disabling address buddy logic.\n");
+		intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+		intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
 	} else {
-		u32 val;
-
-		I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
-		I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+		intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
+			       table[i].page_mask);
+		intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
+			       table[i].page_mask);
 
 		/* Wa_22010178259:tgl */
-		val = I915_READ(BW_BUDDY1_CTL);
-		val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
-		val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
-		I915_WRITE(BW_BUDDY1_CTL, val);
-
-		val = I915_READ(BW_BUDDY2_CTL);
-		val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK;
-		val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8);
-		I915_WRITE(BW_BUDDY2_CTL, val);
+		intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
+			     BW_BUDDY_TLB_REQ_TIMER_MASK,
+			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+		intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
+			     BW_BUDDY_TLB_REQ_TIMER_MASK,
+			     REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
 	}
 }
 
@@ -5016,7 +5072,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 	mutex_unlock(&power_domains->lock);
 
 	/* 4. Enable CDCLK. */
-	intel_cdclk_init(dev_priv);
+	intel_cdclk_init_hw(dev_priv);
 
 	/* 5. Enable DBUF. */
 	icl_dbuf_enable(dev_priv);
@@ -5045,7 +5101,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 	icl_dbuf_disable(dev_priv);
 
 	/* 3. Disable CD clock */
-	intel_cdclk_uninit(dev_priv);
+	intel_cdclk_uninit_hw(dev_priv);
 
 	/*
 	 * 4. Disable Power Well 1 (PG1).
@@ -5090,7 +5146,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
 	 * current lane status.
 	 */
 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
-		u32 status = I915_READ(DPLL(PIPE_A));
+		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
 		unsigned int mask;
 
 		mask = status & DPLL_PORTB_READY_MASK;
@@ -5121,7 +5177,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
 	}
 
 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
-		u32 status = I915_READ(DPIO_PHY_STATUS);
+		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
 		unsigned int mask;
 
 		mask = status & DPLL_PORTD_READY_MASK;
@@ -5142,10 +5198,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
 	}
 
-	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
+		    dev_priv->chv_phy_control);
 
-	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
-		      dev_priv->chv_phy_control);
+	/* Defer application of initial phy_control to enabling the powerwell */
 }
 
 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
@@ -5158,10 +5214,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
 	/* If the display might be already active skip this */
 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
-	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
+	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
 		return;
 
-	DRM_DEBUG_KMS("toggling display PHY side reset\n");
+	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
 
 	/* cmnlane needs DPLL registers */
 	disp2d->desc->ops->enable(dev_priv, disp2d);
@@ -5189,8 +5245,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
 
 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
 {
-	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
-	     "VED not power gated\n");
+	drm_WARN(&dev_priv->drm,
+		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+		 "VED not power gated\n");
 }
 
 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
@@ -5201,9 +5258,9 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
 		{}
 	};
 
-	WARN(!pci_dev_present(isp_ids) &&
-	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
-	     "ISP not power gated\n");
+	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
+		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+		 "ISP not power gated\n");
 }
 
 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
@@ -5230,9 +5287,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 
 	power_domains->initializing = true;
 
-	/* Must happen before power domain init on VLV/CHV */
-	intel_update_rawclk(i915);
-
 	if (INTEL_GEN(i915) >= 11) {
 		icl_display_core_init(i915, resume);
 	} else if (IS_CANNONLAKE(i915)) {
@@ -5336,7 +5390,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
 {
 	struct i915_power_domains *power_domains = &i915->power_domains;
 
-	WARN_ON(power_domains->wakeref);
+	drm_WARN_ON(&i915->drm, power_domains->wakeref);
 	power_domains->wakeref =
 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
 
@@ -5418,7 +5472,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
 		intel_power_domains_init_hw(i915, true);
 		power_domains->display_core_suspended = false;
 	} else {
-		WARN_ON(power_domains->wakeref);
+		drm_WARN_ON(&i915->drm, power_domains->wakeref);
 		power_domains->wakeref =
 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
 	}
@@ -5436,13 +5490,13 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
 	for_each_power_well(i915, power_well) {
 		enum intel_display_power_domain domain;
 
-		DRM_DEBUG_DRIVER("%-25s %d\n",
-				 power_well->desc->name, power_well->count);
+		drm_dbg(&i915->drm, "%-25s %d\n",
+			power_well->desc->name, power_well->count);
 
 		for_each_power_domain(domain, power_well->desc->domains)
-			DRM_DEBUG_DRIVER("  %-23s %d\n",
-					 intel_display_power_domain_str(domain),
-					 power_domains->domain_use_count[domain]);
+			drm_dbg(&i915->drm, "  %-23s %d\n",
+				intel_display_power_domain_str(domain),
+				power_domains->domain_use_count[domain]);
 	}
 }
 
@@ -5475,19 +5529,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
 		if ((power_well->count || power_well->desc->always_on) !=
 		    enabled)
-			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
-				  power_well->desc->name,
-				  power_well->count, enabled);
+			drm_err(&i915->drm,
+				"power well %s state mismatch (refcount %d/enabled %d)",
+				power_well->desc->name,
+				power_well->count, enabled);
 
 		domains_count = 0;
 		for_each_power_domain(domain, power_well->desc->domains)
 			domains_count += power_domains->domain_use_count[domain];
 
 		if (power_well->count != domains_count) {
-			DRM_ERROR("power well %s refcount/domain refcount mismatch "
-				  "(refcount %d/domains refcount %d)\n",
-				  power_well->desc->name, power_well->count,
-				  domains_count);
+			drm_err(&i915->drm,
+				"power well %s refcount/domain refcount mismatch "
+				"(refcount %d/domains refcount %d)\n",
+				power_well->desc->name, power_well->count,
+				domains_count);
 			dump_domain_info = true;
 		}
 	}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2608a65a..da64a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -100,6 +100,7 @@ enum i915_power_well_id {
 	SKL_DISP_PW_MISC_IO,
 	SKL_DISP_PW_1,
 	SKL_DISP_PW_2,
+	TGL_DISP_PW_3,
 	SKL_DISP_DC_OFF,
 };
 
@@ -307,6 +308,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
 }
 #endif
 
+enum dbuf_slice {
+	DBUF_S1,
+	DBUF_S2,
+};
+
 #define with_intel_display_power(i915, domain, wf) \
 	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
 	     intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 888ea8a1..5e00e61 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -39,13 +39,14 @@
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_rect.h>
 #include <drm/drm_vblank.h>
-#include <drm/i915_drm.h>
 #include <drm/i915_mei_hdcp_interface.h>
 #include <media/cec-notifier.h>
 
 #include "i915_drv.h"
+#include "intel_de.h"
 
 struct drm_printer;
+struct __intel_global_objs_state;
 
 /*
  * Display related stuff
@@ -139,6 +140,9 @@ struct intel_encoder {
 	int (*compute_config)(struct intel_encoder *,
 			      struct intel_crtc_state *,
 			      struct drm_connector_state *);
+	int (*compute_config_late)(struct intel_encoder *,
+				   struct intel_crtc_state *,
+				   struct drm_connector_state *);
 	void (*update_prepare)(struct intel_atomic_state *,
 			       struct intel_encoder *,
 			       struct intel_crtc *);
@@ -214,6 +218,9 @@ struct intel_panel {
 		u8 controller;		/* bxt+ only */
 		struct pwm_device *pwm;
 
+		/* DPCD backlight */
+		u8 pwmgen_bit_count;
+
 		struct backlight_device *device;
 
 		/* Connector and platform specific backlight functions */
@@ -458,25 +465,8 @@ struct intel_atomic_state {
 
 	intel_wakeref_t wakeref;
 
-	struct {
-		/*
-		 * Logical state of cdclk (used for all scaling, watermark,
-		 * etc. calculations and checks). This is computed as if all
-		 * enabled crtcs were active.
-		 */
-		struct intel_cdclk_state logical;
-
-		/*
-		 * Actual state of cdclk, can be different from the logical
-		 * state only when all crtc's are DPMS off.
-		 */
-		struct intel_cdclk_state actual;
-
-		int force_min_cdclk;
-		bool force_min_cdclk_changed;
-		/* pipe to which cd2x update is synchronized */
-		enum pipe pipe;
-	} cdclk;
+	struct __intel_global_objs_state *global_objs;
+	int num_global_objs;
 
 	bool dpll_set, modeset;
 
@@ -491,10 +481,6 @@ struct intel_atomic_state {
 	u8 active_pipe_changes;
 
 	u8 active_pipes;
-	/* minimum acceptable cdclk for each pipe */
-	int min_cdclk[I915_MAX_PIPES];
-	/* minimum acceptable voltage level for each pipe */
-	u8 min_voltage_level[I915_MAX_PIPES];
 
 	struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
 
@@ -508,14 +494,11 @@ struct intel_atomic_state {
 
 	/*
 	 * active_pipes
-	 * min_cdclk[]
-	 * min_voltage_level[]
-	 * cdclk.*
 	 */
 	bool global_state_changed;
 
-	/* Gen9+ only */
-	struct skl_ddb_values wm_results;
+	/* Number of enabled DBuf slices */
+	u8 enabled_dbuf_slices_mask;
 
 	struct i915_sw_fence commit_ready;
 
@@ -611,6 +594,7 @@ struct intel_plane_state {
 
 struct intel_initial_plane_config {
 	struct intel_framebuffer *fb;
+	struct i915_vma *vma;
 	unsigned int tiling;
 	int size;
 	u32 base;
@@ -657,15 +641,30 @@ struct intel_crtc_scaler_state {
 /* Flag to use the scanline counter instead of the pixel counter */
 #define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
 
+struct intel_wm_level {
+	bool enable;
+	u32 pri_val;
+	u32 spr_val;
+	u32 cur_val;
+	u32 fbc_val;
+};
+
 struct intel_pipe_wm {
 	struct intel_wm_level wm[5];
-	u32 linetime;
 	bool fbc_wm_enabled;
 	bool pipe_enabled;
 	bool sprites_enabled;
 	bool sprites_scaled;
 };
 
+struct skl_wm_level {
+	u16 min_ddb_alloc;
+	u16 plane_res_b;
+	u8 plane_res_l;
+	bool plane_en;
+	bool ignore_lines;
+};
+
 struct skl_plane_wm {
 	struct skl_wm_level wm[8];
 	struct skl_wm_level uv_wm[8];
@@ -675,7 +674,6 @@ struct skl_plane_wm {
 
 struct skl_pipe_wm {
 	struct skl_plane_wm planes[I915_MAX_PLANES];
-	u32 linetime;
 };
 
 enum vlv_wm_level {
@@ -1046,6 +1044,10 @@ struct intel_crtc_state {
 		struct drm_dsc_config config;
 	} dsc;
 
+	/* HSW+ linetime watermarks */
+	u16 linetime;
+	u16 ips_linetime;
+
 	/* Forward Error correction State */
 	bool fec_enable;
 
@@ -1059,6 +1061,32 @@ struct intel_crtc_state {
 	enum transcoder mst_master_transcoder;
 };
 
+enum intel_pipe_crc_source {
+	INTEL_PIPE_CRC_SOURCE_NONE,
+	INTEL_PIPE_CRC_SOURCE_PLANE1,
+	INTEL_PIPE_CRC_SOURCE_PLANE2,
+	INTEL_PIPE_CRC_SOURCE_PLANE3,
+	INTEL_PIPE_CRC_SOURCE_PLANE4,
+	INTEL_PIPE_CRC_SOURCE_PLANE5,
+	INTEL_PIPE_CRC_SOURCE_PLANE6,
+	INTEL_PIPE_CRC_SOURCE_PLANE7,
+	INTEL_PIPE_CRC_SOURCE_PIPE,
+	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
+	INTEL_PIPE_CRC_SOURCE_TV,
+	INTEL_PIPE_CRC_SOURCE_DP_B,
+	INTEL_PIPE_CRC_SOURCE_DP_C,
+	INTEL_PIPE_CRC_SOURCE_DP_D,
+	INTEL_PIPE_CRC_SOURCE_AUTO,
+	INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR	128
+struct intel_pipe_crc {
+	spinlock_t lock;
+	int skipped;
+	enum intel_pipe_crc_source source;
+};
+
 struct intel_crtc {
 	struct drm_crtc base;
 	enum pipe pipe;
@@ -1102,6 +1130,10 @@ struct intel_crtc {
 
 	/* per pipe DSB related info */
 	struct intel_dsb dsb;
+
+#ifdef CONFIG_DEBUG_FS
+	struct intel_pipe_crc pipe_crc;
+#endif
 };
 
 struct intel_plane {
@@ -1181,8 +1213,6 @@ struct intel_hdmi {
 };
 
 struct intel_dp_mst_encoder;
-#define DP_MAX_DOWNSTREAM_PORTS		0x10
-
 /*
  * enum link_m_n_set:
  *	When platform provides two set of M_N registers for dp, we can
@@ -1250,6 +1280,7 @@ struct intel_dp {
 	int max_link_rate;
 	/* sink or branch descriptor */
 	struct drm_dp_desc desc;
+	u32 edid_quirks;
 	struct drm_dp_aux aux;
 	u32 aux_busy_last_status;
 	u8 train_set[4];
@@ -1422,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe)
 }
 
 static inline struct intel_crtc *
+intel_get_first_crtc(struct drm_i915_private *dev_priv)
+{
+	return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
+}
+
+static inline struct intel_crtc *
 intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
+	/* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
+	drm_WARN_ON(&dev_priv->drm,
+		    !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
 	return dev_priv->pipe_to_crtc_mapping[pipe];
 }
 
@@ -1469,7 +1509,7 @@ enc_to_dig_port(struct intel_encoder *encoder)
 }
 
 static inline struct intel_digital_port *
-conn_to_dig_port(struct intel_connector *connector)
+intel_attached_dig_port(struct intel_connector *connector)
 {
 	return enc_to_dig_port(intel_attached_encoder(connector));
 }
@@ -1486,6 +1526,11 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
 	return &enc_to_dig_port(encoder)->dp;
 }
 
+static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
+{
+	return enc_to_intel_dp(intel_attached_encoder(connector));
+}
+
 static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
 {
 	switch (encoder->type) {
@@ -1608,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
 		 (1 << INTEL_OUTPUT_DP_MST) |
 		 (1 << INTEL_OUTPUT_EDP));
 }
+
 static inline void
 intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	drm_wait_one_vblank(&dev_priv->drm, pipe);
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+	drm_crtc_wait_one_vblank(&crtc->base);
 }
+
 static inline void
 intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2084570..804b1d9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,7 +40,6 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_hdcp.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
 
 #include "i915_debugfs.h"
 #include "i915_drv.h"
@@ -49,6 +48,7 @@
 #include "intel_audio.h"
 #include "intel_connector.h"
 #include "intel_ddi.h"
+#include "intel_display_debugfs.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_dp_link_training.h"
@@ -146,11 +146,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
-static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
-{
-	return enc_to_intel_dp(intel_attached_encoder(connector));
-}
-
 static void intel_dp_link_down(struct intel_encoder *encoder,
 			       const struct intel_crtc_state *old_crtc_state);
 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -272,7 +267,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 	enum port port = dig_port->base.port;
 
-	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+	u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 
 	/* Low voltage SKUs are limited to max of 5.4G */
 	if (voltage == VOLTAGE_INFO_0_85V)
@@ -323,14 +318,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
 		162000, 270000
 	};
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *encoder = &dig_port->base;
 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[dig_port->base.port];
 	const int *source_rates;
-	int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
+	int size, max_rate = 0, vbt_max_rate;
 
 	/* This should only be done once */
-	WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_dp->source_rates || intel_dp->num_source_rates);
 
 	if (INTEL_GEN(dev_priv) >= 10) {
 		source_rates = cnl_rates;
@@ -354,6 +349,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
 		size = ARRAY_SIZE(g4x_rates);
 	}
 
+	vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
 	if (max_rate && vbt_max_rate)
 		max_rate = min(max_rate, vbt_max_rate);
 	else if (vbt_max_rate)
@@ -519,12 +515,13 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
 	 */
 	bits_per_pixel = (link_clock * lane_count * 8) /
 			 intel_dp_mode_to_fec_clock(mode_clock);
-	DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+	drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
 
 	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
 	max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
 		mode_hdisplay;
-	DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+	drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
+		    max_bpp_small_joiner_ram);
 
 	/*
 	 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
@@ -534,8 +531,8 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
 
 	/* Error out if the max bpp is less than smallest allowed valid bpp */
 	if (bits_per_pixel < valid_dsc_bpp[0]) {
-		DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
-			      bits_per_pixel, valid_dsc_bpp[0]);
+		drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+			    bits_per_pixel, valid_dsc_bpp[0]);
 		return 0;
 	}
 
@@ -760,20 +757,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 	u32 DP;
 
-	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
-		 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
-		 pipe_name(pipe), intel_dig_port->base.base.base.id,
-		 intel_dig_port->base.base.name))
+	if (drm_WARN(&dev_priv->drm,
+		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+		     pipe_name(pipe), intel_dig_port->base.base.base.id,
+		     intel_dig_port->base.base.name))
 		return;
 
-	DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
-		      pipe_name(pipe), intel_dig_port->base.base.base.id,
-		      intel_dig_port->base.base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+		    pipe_name(pipe), intel_dig_port->base.base.base.id,
+		    intel_dig_port->base.base.name);
 
 	/* Preserve the BIOS-computed detected bit. This is
 	 * supposed to be read-only.
 	 */
-	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
 	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 	DP |= DP_PORT_WIDTH(1);
 	DP |= DP_LINK_TRAIN_PAT_1;
@@ -783,7 +782,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 	else
 		DP |= DP_PIPE_SEL(pipe);
 
-	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
 
 	/*
 	 * The DPLL for the pipe must be enabled for this to work.
@@ -795,8 +794,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 
 		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
 				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
-			DRM_ERROR("Failed to force on pll for pipe %c!\n",
-				  pipe_name(pipe));
+			drm_err(&dev_priv->drm,
+				"Failed to force on pll for pipe %c!\n",
+				pipe_name(pipe));
 			return;
 		}
 	}
@@ -807,14 +807,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 	 * to make this power sequencer lock onto the port.
 	 * Otherwise even VDD force bit won't work.
 	 */
-	I915_WRITE(intel_dp->output_reg, DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
-	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
-	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
 	if (!pll_enabled) {
 		vlv_force_pll_off(dev_priv, pipe);
@@ -837,13 +837,16 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 		if (encoder->type == INTEL_OUTPUT_EDP) {
-			WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
-				intel_dp->active_pipe != intel_dp->pps_pipe);
+			drm_WARN_ON(&dev_priv->drm,
+				    intel_dp->active_pipe != INVALID_PIPE &&
+				    intel_dp->active_pipe !=
+				    intel_dp->pps_pipe);
 
 			if (intel_dp->pps_pipe != INVALID_PIPE)
 				pipes &= ~(1 << intel_dp->pps_pipe);
 		} else {
-			WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+			drm_WARN_ON(&dev_priv->drm,
+				    intel_dp->pps_pipe != INVALID_PIPE);
 
 			if (intel_dp->active_pipe != INVALID_PIPE)
 				pipes &= ~(1 << intel_dp->active_pipe);
@@ -866,10 +869,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	/* We should never land here with regular DP ports */
-	WARN_ON(!intel_dp_is_edp(intel_dp));
+	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 
-	WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
-		intel_dp->active_pipe != intel_dp->pps_pipe);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
+		    intel_dp->active_pipe != intel_dp->pps_pipe);
 
 	if (intel_dp->pps_pipe != INVALID_PIPE)
 		return intel_dp->pps_pipe;
@@ -880,16 +883,17 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 	 * Didn't find one. This should not happen since there
 	 * are two power sequencers and up to two eDP ports.
 	 */
-	if (WARN_ON(pipe == INVALID_PIPE))
+	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
 		pipe = PIPE_A;
 
 	vlv_steal_power_sequencer(dev_priv, pipe);
 	intel_dp->pps_pipe = pipe;
 
-	DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
-		      pipe_name(intel_dp->pps_pipe),
-		      intel_dig_port->base.base.base.id,
-		      intel_dig_port->base.base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+		    pipe_name(intel_dp->pps_pipe),
+		    intel_dig_port->base.base.base.id,
+		    intel_dig_port->base.base.name);
 
 	/* init power sequencer on this pipe and port */
 	intel_dp_init_panel_power_sequencer(intel_dp);
@@ -913,7 +917,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	/* We should never land here with regular DP ports */
-	WARN_ON(!intel_dp_is_edp(intel_dp));
+	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
 
 	if (!intel_dp->pps_reset)
 		return backlight_controller;
@@ -935,13 +939,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 			       enum pipe pipe)
 {
-	return I915_READ(PP_STATUS(pipe)) & PP_ON;
+	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
 }
 
 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 				enum pipe pipe)
 {
-	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 }
 
 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -958,7 +962,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 	enum pipe pipe;
 
 	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
-		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
+		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
 			PANEL_PORT_SELECT_MASK;
 
 		if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -997,16 +1001,18 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 
 	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 	if (intel_dp->pps_pipe == INVALID_PIPE) {
-		DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
-			      intel_dig_port->base.base.base.id,
-			      intel_dig_port->base.base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "no initial power sequencer for [ENCODER:%d:%s]\n",
+			    intel_dig_port->base.base.base.id,
+			    intel_dig_port->base.base.name);
 		return;
 	}
 
-	DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
-		      intel_dig_port->base.base.base.id,
-		      intel_dig_port->base.base.name,
-		      pipe_name(intel_dp->pps_pipe));
+	drm_dbg_kms(&dev_priv->drm,
+		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+		    intel_dig_port->base.base.base.id,
+		    intel_dig_port->base.base.name,
+		    pipe_name(intel_dp->pps_pipe));
 
 	intel_dp_init_panel_power_sequencer(intel_dp);
 	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1016,8 +1022,10 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 {
 	struct intel_encoder *encoder;
 
-	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
-		    !IS_GEN9_LP(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			!(IS_VALLEYVIEW(dev_priv) ||
+			  IS_CHERRYVIEW(dev_priv) ||
+			  IS_GEN9_LP(dev_priv))))
 		return;
 
 	/*
@@ -1033,7 +1041,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 	for_each_intel_dp(&dev_priv->drm, encoder) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-		WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+		drm_WARN_ON(&dev_priv->drm,
+			    intel_dp->active_pipe != INVALID_PIPE);
 
 		if (encoder->type != INTEL_OUTPUT_EDP)
 			continue;
@@ -1119,12 +1128,13 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 
 			pp_ctrl_reg = PP_CONTROL(pipe);
 			pp_div_reg  = PP_DIVISOR(pipe);
-			pp_div = I915_READ(pp_div_reg);
+			pp_div = intel_de_read(dev_priv, pp_div_reg);
 			pp_div &= PP_REFERENCE_DIVIDER_MASK;
 
 			/* 0x1F write to PP_DIV_REG sets max cycle delay */
-			I915_WRITE(pp_div_reg, pp_div | 0x1F);
-			I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+			intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
+			intel_de_write(dev_priv, pp_ctrl_reg,
+				       PANEL_UNLOCK_REGS);
 			msleep(intel_dp->panel_power_cycle_delay);
 		}
 	}
@@ -1142,7 +1152,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
 	    intel_dp->pps_pipe == INVALID_PIPE)
 		return false;
 
-	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
+	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
 }
 
 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
@@ -1155,7 +1165,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 	    intel_dp->pps_pipe == INVALID_PIPE)
 		return false;
 
-	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 }
 
 static void
@@ -1167,10 +1177,11 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
 		return;
 
 	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
-		WARN(1, "eDP powered off while attempting aux channel communication.\n");
-		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
-			      I915_READ(_pp_stat_reg(intel_dp)),
-			      I915_READ(_pp_ctrl_reg(intel_dp)));
+		drm_WARN(&dev_priv->drm, 1,
+			 "eDP powered off while attempting aux channel communication.\n");
+		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
 	}
 }
 
@@ -1191,8 +1202,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
 	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
 
 	if (!done)
-		DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
-			  intel_dp->aux.name, timeout_ms, status);
+		drm_err(&i915->drm,
+			"%s: did not complete or timeout within %ums (status 0x%08x)\n",
+			intel_dp->aux.name, timeout_ms, status);
 #undef C
 
 	return status;
@@ -1209,13 +1221,14 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 	 * The clock divider is based off the hrawclk, and would like to run at
 	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
 	 */
-	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+	return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
 }
 
 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	u32 freq;
 
 	if (index)
 		return 0;
@@ -1226,9 +1239,10 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 	 * divide by 2000 and use that
 	 */
 	if (dig_port->aux_ch == AUX_CH_A)
-		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
+		freq = dev_priv->cdclk.hw.cdclk;
 	else
-		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+		freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+	return DIV_ROUND_CLOSEST(freq, 2000);
 }
 
 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -1378,8 +1392,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 		const u32 status = intel_uncore_read(uncore, ch_ctl);
 
 		if (status != intel_dp->aux_busy_last_status) {
-			WARN(1, "dp_aux_ch not started status 0x%08x\n",
-			     status);
+			drm_WARN(&i915->drm, 1,
+				 "%s: not started (status 0x%08x)\n",
+				 intel_dp->aux.name, status);
 			intel_dp->aux_busy_last_status = status;
 		}
 
@@ -1388,7 +1403,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	}
 
 	/* Only 5 data registers! */
-	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+	if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
 		ret = -E2BIG;
 		goto out;
 	}
@@ -1440,7 +1455,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	}
 
 	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
-		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+			intel_dp->aux.name, status);
 		ret = -EBUSY;
 		goto out;
 	}
@@ -1450,7 +1466,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	 * Timeouts occur when the sink is not connected
 	 */
 	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
-		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+			intel_dp->aux.name, status);
 		ret = -EIO;
 		goto out;
 	}
@@ -1458,7 +1475,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	/* Timeouts occur when the device isn't connected, so they're
 	 * "normal" -- don't fill the kernel log with these */
 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
-		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+			    intel_dp->aux.name, status);
 		ret = -ETIMEDOUT;
 		goto out;
 	}
@@ -1473,8 +1491,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 	 * drm layer takes care for the necessary retries.
 	 */
 	if (recv_bytes == 0 || recv_bytes > 20) {
-		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
-			      recv_bytes);
+		drm_dbg_kms(&i915->drm,
+			    "%s: Forbidden recv_bytes = %d on aux transaction\n",
+			    intel_dp->aux.name, recv_bytes);
 		ret = -EBUSY;
 		goto out;
 	}
@@ -1742,7 +1761,8 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
 	drm_dp_aux_init(&intel_dp->aux);
 
 	/* Failure to allocate our preferred name is not critical */
-	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+	intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
+				       aux_ch_name(dig_port->aux_ch),
 				       port_name(encoder->port));
 	intel_dp->aux.transfer = intel_dp_aux_transfer;
 }
@@ -1918,8 +1938,9 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
 		/* Get bpp from vbt only for panels that dont have bpp in edid */
 		if (intel_connector->base.display_info.bpc == 0 &&
 		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
-			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
-				      dev_priv->vbt.edp.bpp);
+			drm_dbg_kms(&dev_priv->drm,
+				    "clamping bpp for eDP panel to BIOS-provided %i\n",
+				    dev_priv->vbt.edp.bpp);
 			bpp = dev_priv->vbt.edp.bpp;
 		}
 	}
@@ -2115,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 
 	/* Min Input BPC for ICL+ is 8 */
 	if (pipe_bpp < 8 * 3) {
-		DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "No DSC support for less than 8bpc\n");
 		return -EINVAL;
 	}
 
@@ -2150,7 +2172,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 						     adjusted_mode->crtc_clock,
 						     adjusted_mode->crtc_hdisplay);
 		if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
-			DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Compressed BPP/Slice Count not supported\n");
 			return -EINVAL;
 		}
 		pipe_config->dsc.compressed_bpp = min_t(u16,
@@ -2167,26 +2190,28 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
 		if (pipe_config->dsc.slice_count > 1) {
 			pipe_config->dsc.dsc_split = true;
 		} else {
-			DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Cannot split stream to use 2 VDSC instances\n");
 			return -EINVAL;
 		}
 	}
 
 	ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
 	if (ret < 0) {
-		DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
-			      "Compressed BPP = %d\n",
-			      pipe_config->pipe_bpp,
-			      pipe_config->dsc.compressed_bpp);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Cannot compute valid DSC parameters for Input Bpp = %d "
+			    "Compressed BPP = %d\n",
+			    pipe_config->pipe_bpp,
+			    pipe_config->dsc.compressed_bpp);
 		return ret;
 	}
 
 	pipe_config->dsc.compression_enable = true;
-	DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
-		      "Compressed Bpp = %d Slice Count = %d\n",
-		      pipe_config->pipe_bpp,
-		      pipe_config->dsc.compressed_bpp,
-		      pipe_config->dsc.slice_count);
+	drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+		    "Compressed Bpp = %d Slice Count = %d\n",
+		    pipe_config->pipe_bpp,
+		    pipe_config->dsc.compressed_bpp,
+		    pipe_config->dsc.slice_count);
 
 	return 0;
 }
@@ -2214,7 +2239,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
 						    intel_dp->max_link_rate);
 
 	/* No common link rates between source and sink */
-	WARN_ON(common_len <= 0);
+	drm_WARN_ON(encoder->base.dev, common_len <= 0);
 
 	limits.min_clock = 0;
 	limits.max_clock = common_len - 1;
@@ -2373,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
-	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
 					   DP_DPCD_QUIRK_CONSTANT_N);
 	int ret = 0, output_bpp;
 
@@ -2515,7 +2540,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 	/* Preserve the BIOS-computed detected bit. This is
 	 * supposed to be read-only.
 	 */
-	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
 
 	/* Handle DP bits in common between all three register formats */
 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -2539,12 +2564,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 
 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
-		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+		trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
 		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			trans_dp |= TRANS_DP_ENH_FRAMING;
 		else
 			trans_dp &= ~TRANS_DP_ENH_FRAMING;
-		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
+		intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
 	} else {
 		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
 			intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -2590,18 +2615,20 @@ static void wait_panel_status(struct intel_dp *intel_dp,
 	pp_stat_reg = _pp_stat_reg(intel_dp);
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
-	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
-			mask, value,
-			I915_READ(pp_stat_reg),
-			I915_READ(pp_ctrl_reg));
+	drm_dbg_kms(&dev_priv->drm,
+		    "mask %08x value %08x status %08x control %08x\n",
+		    mask, value,
+		    intel_de_read(dev_priv, pp_stat_reg),
+		    intel_de_read(dev_priv, pp_ctrl_reg));
 
 	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
 				       mask, value, 5000))
-		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
-				I915_READ(pp_stat_reg),
-				I915_READ(pp_ctrl_reg));
+		drm_err(&dev_priv->drm,
+			"Panel status timeout: status %08x control %08x\n",
+			intel_de_read(dev_priv, pp_stat_reg),
+			intel_de_read(dev_priv, pp_ctrl_reg));
 
-	DRM_DEBUG_KMS("Wait complete\n");
+	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
 }
 
 static void wait_panel_on(struct intel_dp *intel_dp)
@@ -2660,9 +2687,9 @@ static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	control = I915_READ(_pp_ctrl_reg(intel_dp));
-	if (WARN_ON(!HAS_DDI(dev_priv) &&
-		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
 		control &= ~PANEL_UNLOCK_MASK;
 		control |= PANEL_UNLOCK_REGS;
 	}
@@ -2696,9 +2723,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 	intel_display_power_get(dev_priv,
 				intel_aux_power_domain(intel_dig_port));
 
-	DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
-		      intel_dig_port->base.base.base.id,
-		      intel_dig_port->base.base.name);
+	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+		    intel_dig_port->base.base.base.id,
+		    intel_dig_port->base.base.name);
 
 	if (!edp_have_panel_power(intel_dp))
 		wait_panel_power_cycle(intel_dp);
@@ -2709,17 +2736,19 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
 	pp_stat_reg = _pp_stat_reg(intel_dp);
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
-	I915_WRITE(pp_ctrl_reg, pp);
-	POSTING_READ(pp_ctrl_reg);
-	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
-			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
+	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+		    intel_de_read(dev_priv, pp_stat_reg),
+		    intel_de_read(dev_priv, pp_ctrl_reg));
 	/*
 	 * If the panel wasn't on, delay before accessing aux channel
 	 */
 	if (!edp_have_panel_power(intel_dp)) {
-		DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
-			      intel_dig_port->base.base.base.id,
-			      intel_dig_port->base.base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
+			    intel_dig_port->base.base.base.id,
+			    intel_dig_port->base.base.name);
 		msleep(intel_dp->panel_power_up_delay);
 	}
 
@@ -2759,14 +2788,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	WARN_ON(intel_dp->want_panel_vdd);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
 
 	if (!edp_have_panel_vdd(intel_dp))
 		return;
 
-	DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
-		      intel_dig_port->base.base.base.id,
-		      intel_dig_port->base.base.name);
+	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+		    intel_dig_port->base.base.base.id,
+		    intel_dig_port->base.base.name);
 
 	pp = ilk_get_pp_control(intel_dp);
 	pp &= ~EDP_FORCE_VDD;
@@ -2774,12 +2803,13 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 	pp_stat_reg = _pp_stat_reg(intel_dp);
 
-	I915_WRITE(pp_ctrl_reg, pp);
-	POSTING_READ(pp_ctrl_reg);
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 
 	/* Make sure sequencer is idle before allowing subsequent activity */
-	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
-	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+		    intel_de_read(dev_priv, pp_stat_reg),
+		    intel_de_read(dev_priv, pp_ctrl_reg));
 
 	if ((pp & PANEL_POWER_ON) == 0)
 		intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2851,14 +2881,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 	if (!intel_dp_is_edp(intel_dp))
 		return;
 
-	DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
-		      dp_to_dig_port(intel_dp)->base.base.base.id,
-		      dp_to_dig_port(intel_dp)->base.base.name);
+	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+		    dp_to_dig_port(intel_dp)->base.base.base.id,
+		    dp_to_dig_port(intel_dp)->base.base.name);
 
-	if (WARN(edp_have_panel_power(intel_dp),
-		 "[ENCODER:%d:%s] panel power already on\n",
-		 dp_to_dig_port(intel_dp)->base.base.base.id,
-		 dp_to_dig_port(intel_dp)->base.base.name))
+	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+		     "[ENCODER:%d:%s] panel power already on\n",
+		     dp_to_dig_port(intel_dp)->base.base.base.id,
+		     dp_to_dig_port(intel_dp)->base.base.name))
 		return;
 
 	wait_panel_power_cycle(intel_dp);
@@ -2868,24 +2898,24 @@ static void edp_panel_on(struct intel_dp *intel_dp)
 	if (IS_GEN(dev_priv, 5)) {
 		/* ILK workaround: disable reset around power sequence */
 		pp &= ~PANEL_POWER_RESET;
-		I915_WRITE(pp_ctrl_reg, pp);
-		POSTING_READ(pp_ctrl_reg);
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 	}
 
 	pp |= PANEL_POWER_ON;
 	if (!IS_GEN(dev_priv, 5))
 		pp |= PANEL_POWER_RESET;
 
-	I915_WRITE(pp_ctrl_reg, pp);
-	POSTING_READ(pp_ctrl_reg);
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 
 	wait_panel_on(intel_dp);
 	intel_dp->last_power_on = jiffies;
 
 	if (IS_GEN(dev_priv, 5)) {
 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
-		I915_WRITE(pp_ctrl_reg, pp);
-		POSTING_READ(pp_ctrl_reg);
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 	}
 }
 
@@ -2913,11 +2943,12 @@ static void edp_panel_off(struct intel_dp *intel_dp)
 	if (!intel_dp_is_edp(intel_dp))
 		return;
 
-	DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
-		      dig_port->base.base.base.id, dig_port->base.base.name);
+	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+		    dig_port->base.base.base.id, dig_port->base.base.name);
 
-	WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
-	     dig_port->base.base.base.id, dig_port->base.base.name);
+	drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
+		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+		 dig_port->base.base.base.id, dig_port->base.base.name);
 
 	pp = ilk_get_pp_control(intel_dp);
 	/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2929,8 +2960,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
 
 	intel_dp->want_panel_vdd = false;
 
-	I915_WRITE(pp_ctrl_reg, pp);
-	POSTING_READ(pp_ctrl_reg);
+	intel_de_write(dev_priv, pp_ctrl_reg, pp);
+	intel_de_posting_read(dev_priv, pp_ctrl_reg);
 
 	wait_panel_off(intel_dp);
 	intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2971,8 +3002,8 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 		pp = ilk_get_pp_control(intel_dp);
 		pp |= EDP_BLC_ENABLE;
 
-		I915_WRITE(pp_ctrl_reg, pp);
-		POSTING_READ(pp_ctrl_reg);
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 	}
 }
 
@@ -3007,8 +3038,8 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 		pp = ilk_get_pp_control(intel_dp);
 		pp &= ~EDP_BLC_ENABLE;
 
-		I915_WRITE(pp_ctrl_reg, pp);
-		POSTING_READ(pp_ctrl_reg);
+		intel_de_write(dev_priv, pp_ctrl_reg, pp);
+		intel_de_posting_read(dev_priv, pp_ctrl_reg);
 	}
 
 	intel_dp->last_backlight_off = jiffies;
@@ -3059,7 +3090,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
+	bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
 
 	I915_STATE_WARN(cur_state != state,
 			"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -3070,7 +3101,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
 
 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
 {
-	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
+	bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
 
 	I915_STATE_WARN(cur_state != state,
 			"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -3089,8 +3120,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
 	assert_dp_port_disabled(intel_dp);
 	assert_edp_pll_disabled(dev_priv);
 
-	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
-		      pipe_config->port_clock);
+	drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+		    pipe_config->port_clock);
 
 	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
 
@@ -3099,8 +3130,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
 	else
 		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
 
-	I915_WRITE(DP_A, intel_dp->DP);
-	POSTING_READ(DP_A);
+	intel_de_write(dev_priv, DP_A, intel_dp->DP);
+	intel_de_posting_read(dev_priv, DP_A);
 	udelay(500);
 
 	/*
@@ -3114,8 +3145,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
 
 	intel_dp->DP |= DP_PLL_ENABLE;
 
-	I915_WRITE(DP_A, intel_dp->DP);
-	POSTING_READ(DP_A);
+	intel_de_write(dev_priv, DP_A, intel_dp->DP);
+	intel_de_posting_read(dev_priv, DP_A);
 	udelay(200);
 }
 
@@ -3129,12 +3160,12 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
 	assert_dp_port_disabled(intel_dp);
 	assert_edp_pll_enabled(dev_priv);
 
-	DRM_DEBUG_KMS("disabling eDP PLL\n");
+	drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
 
 	intel_dp->DP &= ~DP_PLL_ENABLE;
 
-	I915_WRITE(DP_A, intel_dp->DP);
-	POSTING_READ(DP_A);
+	intel_de_write(dev_priv, DP_A, intel_dp->DP);
+	intel_de_posting_read(dev_priv, DP_A);
 	udelay(200);
 }
 
@@ -3149,7 +3180,7 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
 	 * FIXME should really check all downstream ports...
 	 */
 	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
-		intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+		drm_dp_is_branch(intel_dp->dpcd) &&
 		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
 }
 
@@ -3214,7 +3245,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
 	enum pipe p;
 
 	for_each_pipe(dev_priv, p) {
-		u32 val = I915_READ(TRANS_DP_CTL(p));
+		u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
 
 		if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
 			*pipe = p;
@@ -3222,7 +3253,8 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+	drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+		    port_name(port));
 
 	/* must initialize pipe to something for the asserts */
 	*pipe = PIPE_A;
@@ -3237,7 +3269,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
 	bool ret;
 	u32 val;
 
-	val = I915_READ(dp_reg);
+	val = intel_de_read(dev_priv, dp_reg);
 
 	ret = val & DP_PORT_EN;
 
@@ -3289,12 +3321,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 	else
 		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
 
-	tmp = I915_READ(intel_dp->output_reg);
+	tmp = intel_de_read(dev_priv, intel_dp->output_reg);
 
 	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
 
 	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
-		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+		u32 trans_dp = intel_de_read(dev_priv,
+					     TRANS_DP_CTL(crtc->pipe));
 
 		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
 			flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3328,7 +3361,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 	intel_dp_get_m_n(crtc, pipe_config);
 
 	if (port == PORT_A) {
-		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+		if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
 			pipe_config->port_clock = 162000;
 		else
 			pipe_config->port_clock = 270000;
@@ -3353,8 +3386,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 		 * up by the BIOS, and thus we can't get the mode at module
 		 * load.
 		 */
-		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
-			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+		drm_dbg_kms(&dev_priv->drm,
+			    "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+			    pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
 		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
 	}
 }
@@ -3447,11 +3481,12 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 	u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
 
 	if (dp_train_pat & train_pat_mask)
-		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
-			      dp_train_pat & train_pat_mask);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Using DP training pattern TPS%d\n",
+			    dp_train_pat & train_pat_mask);
 
 	if (HAS_DDI(dev_priv)) {
-		u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
+		u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 
 		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
 			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3477,7 +3512,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
 			break;
 		}
-		I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
+		intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
 
 	} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
 		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3494,7 +3529,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		case DP_TRAINING_PATTERN_3:
-			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "TPS3 not supported, using TPS2 instead\n");
 			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		}
@@ -3513,7 +3549,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		case DP_TRAINING_PATTERN_3:
-			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "TPS3 not supported, using TPS2 instead\n");
 			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		}
@@ -3539,8 +3576,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
 	if (old_crtc_state->has_audio)
 		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
-	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
 static void intel_enable_dp(struct intel_encoder *encoder,
@@ -3550,11 +3587,11 @@ static void intel_enable_dp(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
-	u32 dp_reg = I915_READ(intel_dp->output_reg);
+	u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
 	enum pipe pipe = crtc->pipe;
 	intel_wakeref_t wakeref;
 
-	if (WARN_ON(dp_reg & DP_PORT_EN))
+	if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
 		return;
 
 	with_pps_lock(intel_dp, wakeref) {
@@ -3583,8 +3620,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
 	intel_dp_stop_link_train(intel_dp);
 
 	if (pipe_config->has_audio) {
-		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
-				 pipe_name(pipe));
+		drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
+			pipe_name(pipe));
 		intel_audio_codec_enable(encoder, pipe_config, conn_state);
 	}
 }
@@ -3625,9 +3662,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 	enum pipe pipe = intel_dp->pps_pipe;
 	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 
-	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
 
-	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 		return;
 
 	edp_panel_vdd_off_sync(intel_dp);
@@ -3641,11 +3678,12 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 	 * port select always when logically disconnecting a power sequencer
 	 * from a port.
 	 */
-	DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
-		      pipe_name(pipe), intel_dig_port->base.base.base.id,
-		      intel_dig_port->base.base.name);
-	I915_WRITE(pp_on_reg, 0);
-	POSTING_READ(pp_on_reg);
+	drm_dbg_kms(&dev_priv->drm,
+		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+		    pipe_name(pipe), intel_dig_port->base.base.base.id,
+		    intel_dig_port->base.base.name);
+	intel_de_write(dev_priv, pp_on_reg, 0);
+	intel_de_posting_read(dev_priv, pp_on_reg);
 
 	intel_dp->pps_pipe = INVALID_PIPE;
 }
@@ -3660,17 +3698,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 	for_each_intel_dp(&dev_priv->drm, encoder) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-		WARN(intel_dp->active_pipe == pipe,
-		     "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
-		     pipe_name(pipe), encoder->base.base.id,
-		     encoder->base.name);
+		drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
+			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+			 pipe_name(pipe), encoder->base.base.id,
+			 encoder->base.name);
 
 		if (intel_dp->pps_pipe != pipe)
 			continue;
 
-		DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
-			      pipe_name(pipe), encoder->base.base.id,
-			      encoder->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+			    pipe_name(pipe), encoder->base.base.id,
+			    encoder->base.name);
 
 		/* make sure vdd is off before we steal it */
 		vlv_detach_power_sequencer(intel_dp);
@@ -3686,7 +3725,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
 
 	if (intel_dp->pps_pipe != INVALID_PIPE &&
 	    intel_dp->pps_pipe != crtc->pipe) {
@@ -3712,9 +3751,10 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
 	/* now it's all ours */
 	intel_dp->pps_pipe = crtc->pipe;
 
-	DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
-		      pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
-		      encoder->base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+		    pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+		    encoder->base.name);
 
 	/* init power sequencer on this pipe and port */
 	intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4140,18 +4180,22 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
 	}
 
 	if (mask)
-		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+		drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+			    signal_levels);
 
-	DRM_DEBUG_KMS("Using vswing level %d\n",
-		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
-	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
-		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
-			DP_TRAIN_PRE_EMPHASIS_SHIFT);
+	drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
+		    train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+		    train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
+	drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
+		    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+		    DP_TRAIN_PRE_EMPHASIS_SHIFT,
+		    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+		    " (max)" : "");
 
 	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
 
-	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
 void
@@ -4164,8 +4208,8 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
 
 	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
 
-	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -4178,10 +4222,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 	if (!HAS_DDI(dev_priv))
 		return;
 
-	val = I915_READ(intel_dp->regs.dp_tp_ctl);
+	val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
 	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
 	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
-	I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+	intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
 
 	/*
 	 * Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -4195,7 +4239,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 
 	if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
 				  DP_TP_STATUS_IDLE_DONE, 1))
-		DRM_ERROR("Timed out waiting for DP idle patterns\n");
+		drm_err(&dev_priv->drm,
+			"Timed out waiting for DP idle patterns\n");
 }
 
 static void
@@ -4208,10 +4253,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
 	enum port port = encoder->port;
 	u32 DP = intel_dp->DP;
 
-	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+	if (drm_WARN_ON(&dev_priv->drm,
+			(intel_de_read(dev_priv, intel_dp->output_reg) &
+			 DP_PORT_EN) == 0))
 		return;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
 	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -4221,12 +4268,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
 		DP &= ~DP_LINK_TRAIN_MASK;
 		DP |= DP_LINK_TRAIN_PAT_IDLE;
 	}
-	I915_WRITE(intel_dp->output_reg, DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
 	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
-	I915_WRITE(intel_dp->output_reg, DP);
-	POSTING_READ(intel_dp->output_reg);
+	intel_de_write(dev_priv, intel_dp->output_reg, DP);
+	intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
 	/*
 	 * HW workaround for IBX, we need to move the port
@@ -4245,12 +4292,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
 		DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
 		DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
 			DP_LINK_TRAIN_PAT_1;
-		I915_WRITE(intel_dp->output_reg, DP);
-		POSTING_READ(intel_dp->output_reg);
+		intel_de_write(dev_priv, intel_dp->output_reg, DP);
+		intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
 		DP &= ~DP_PORT_EN;
-		I915_WRITE(intel_dp->output_reg, DP);
-		POSTING_READ(intel_dp->output_reg);
+		intel_de_write(dev_priv, intel_dp->output_reg, DP);
+		intel_de_posting_read(dev_priv, intel_dp->output_reg);
 
 		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -4370,7 +4417,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 
 	/* this function is meant to be called only once */
-	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
 
 	if (!intel_dp_read_dpcd(intel_dp))
 		return false;
@@ -4390,8 +4437,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
 			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
 			     sizeof(intel_dp->edp_dpcd))
-		DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
-			      intel_dp->edp_dpcd);
+		drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+			    (int)sizeof(intel_dp->edp_dpcd),
+			    intel_dp->edp_dpcd);
 
 	/*
 	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
@@ -4466,7 +4514,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 	 * it don't care about read it here and in intel_edp_init_dpcd().
 	 */
 	if (!intel_dp_is_edp(intel_dp) &&
-	    !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
+	    !drm_dp_has_quirk(&intel_dp->desc, 0,
+			      DP_DPCD_QUIRK_NO_SINK_COUNT)) {
 		u8 count;
 		ssize_t r;
 
@@ -5125,7 +5174,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
 
 	crtc_state = to_intel_crtc_state(crtc->base.state);
 
-	WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+	drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
 
 	if (!crtc_state->hw.active)
 		return 0;
@@ -5195,7 +5244,8 @@ intel_dp_hotplug(struct intel_encoder *encoder,
 
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
-	WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+	drm_WARN(encoder->base.dev, ret,
+		 "Acquiring modeset locks failed with %i\n", ret);
 
 	/*
 	 * Keeping it consistent with intel_ddi_hotplug() and
@@ -5281,7 +5331,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
 	intel_psr_short_pulse(intel_dp);
 
 	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
-		DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Link Training Compliance Test requested\n");
 		/* Send a Hotplug Uevent to userspace to start modeset */
 		drm_kms_helper_hotplug_event(&dev_priv->drm);
 	}
@@ -5370,7 +5421,7 @@ static bool ibx_digital_port_connected(struct intel_encoder *encoder)
 		return false;
 	}
 
-	return I915_READ(SDEISR) & bit;
+	return intel_de_read(dev_priv, SDEISR) & bit;
 }
 
 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
@@ -5393,7 +5444,7 @@ static bool cpt_digital_port_connected(struct intel_encoder *encoder)
 		return false;
 	}
 
-	return I915_READ(SDEISR) & bit;
+	return intel_de_read(dev_priv, SDEISR) & bit;
 }
 
 static bool spt_digital_port_connected(struct intel_encoder *encoder)
@@ -5412,7 +5463,7 @@ static bool spt_digital_port_connected(struct intel_encoder *encoder)
 		return cpt_digital_port_connected(encoder);
 	}
 
-	return I915_READ(SDEISR) & bit;
+	return intel_de_read(dev_priv, SDEISR) & bit;
 }
 
 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
@@ -5435,7 +5486,7 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
 		return false;
 	}
 
-	return I915_READ(PORT_HOTPLUG_STAT) & bit;
+	return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
 }
 
 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
@@ -5458,7 +5509,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
 		return false;
 	}
 
-	return I915_READ(PORT_HOTPLUG_STAT) & bit;
+	return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
 }
 
 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
@@ -5466,7 +5517,7 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	if (encoder->hpd_pin == HPD_PORT_A)
-		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+		return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
 	else
 		return ibx_digital_port_connected(encoder);
 }
@@ -5476,7 +5527,7 @@ static bool snb_digital_port_connected(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	if (encoder->hpd_pin == HPD_PORT_A)
-		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+		return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
 	else
 		return cpt_digital_port_connected(encoder);
 }
@@ -5486,7 +5537,7 @@ static bool ivb_digital_port_connected(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	if (encoder->hpd_pin == HPD_PORT_A)
-		return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
+		return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
 	else
 		return cpt_digital_port_connected(encoder);
 }
@@ -5496,7 +5547,7 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 	if (encoder->hpd_pin == HPD_PORT_A)
-		return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
+		return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
 	else
 		return cpt_digital_port_connected(encoder);
 }
@@ -5521,16 +5572,16 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
 		return false;
 	}
 
-	return I915_READ(GEN8_DE_PORT_ISR) & bit;
+	return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
 }
 
 static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
 				      enum phy phy)
 {
 	if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
-		return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+		return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
 
-	return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
+	return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
 }
 
 static bool icp_digital_port_connected(struct intel_encoder *encoder)
@@ -5631,6 +5682,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
 
 	intel_dp->has_audio = drm_detect_monitor_audio(edid);
 	drm_dp_cec_set_edid(&intel_dp->aux, edid);
+	intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
 }
 
 static void
@@ -5643,6 +5695,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 	intel_connector->detect_edid = NULL;
 
 	intel_dp->has_audio = false;
+	intel_dp->edid_quirks = 0;
 }
 
 static int
@@ -5656,9 +5709,10 @@ intel_dp_detect(struct drm_connector *connector,
 	struct intel_encoder *encoder = &dig_port->base;
 	enum drm_connector_status status;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-		      connector->base.id, connector->name);
-	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+		    connector->base.id, connector->name);
+	drm_WARN_ON(&dev_priv->drm,
+		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
 	/* Can't disconnect eDP */
 	if (intel_dp_is_edp(intel_dp))
@@ -5673,9 +5727,10 @@ intel_dp_detect(struct drm_connector *connector,
 		memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
 
 		if (intel_dp->is_mst) {
-			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
-				      intel_dp->is_mst,
-				      intel_dp->mst_mgr.mst_state);
+			drm_dbg_kms(&dev_priv->drm,
+				    "MST device may have disappeared %d vs %d\n",
+				    intel_dp->is_mst,
+				    intel_dp->mst_mgr.mst_state);
 			intel_dp->is_mst = false;
 			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
 							intel_dp->is_mst);
@@ -5763,8 +5818,8 @@ intel_dp_force(struct drm_connector *connector)
 		intel_aux_power_domain(dig_port);
 	intel_wakeref_t wakeref;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-		      connector->base.id, connector->name);
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+		    connector->base.id, connector->name);
 	intel_dp_unset_edid(intel_dp);
 
 	if (connector->status != connector_status_connected)
@@ -5815,7 +5870,7 @@ intel_dp_connector_register(struct drm_connector *connector)
 	if (ret)
 		return ret;
 
-	i915_debugfs_connector_add(connector);
+	intel_connector_debugfs_add(connector);
 
 	DRM_DEBUG_KMS("registering %s bus for %s\n",
 		      intel_dp->aux.name, connector->kdev->kobj.name);
@@ -6396,6 +6451,7 @@ static
 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
 				      bool is_repeater, u8 content_type)
 {
+	int ret;
 	struct hdcp2_dp_errata_stream_type stream_type_msg;
 
 	if (is_repeater)
@@ -6411,8 +6467,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
 	stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
 	stream_type_msg.stream_type = content_type;
 
-	return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+	ret =  intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
 					sizeof(stream_type_msg));
+
+	return ret < 0 ? ret : 0;
+
 }
 
 static
@@ -6492,7 +6551,8 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 	 * schedule a vdd off, so we don't hold on to the reference
 	 * indefinitely.
 	 */
-	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "VDD left on by BIOS, adjusting state tracking\n");
 	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
 
 	edp_panel_vdd_schedule_off(intel_dp);
@@ -6519,7 +6579,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
 	intel_wakeref_t wakeref;
 
 	if (!HAS_DDI(dev_priv))
-		intel_dp->DP = I915_READ(intel_dp->output_reg);
+		intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
 
 	if (lspcon->active)
 		lspcon_resume(lspcon);
@@ -6545,6 +6605,140 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
 	}
 }
 
+static int intel_modeset_tile_group(struct intel_atomic_state *state,
+				    int tile_group_id)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct drm_connector_list_iter conn_iter;
+	struct drm_connector *connector;
+	int ret = 0;
+
+	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter) {
+		struct drm_connector_state *conn_state;
+		struct intel_crtc_state *crtc_state;
+		struct intel_crtc *crtc;
+
+		if (!connector->has_tile ||
+		    connector->tile_group->id != tile_group_id)
+			continue;
+
+		conn_state = drm_atomic_get_connector_state(&state->base,
+							    connector);
+		if (IS_ERR(conn_state)) {
+			ret = PTR_ERR(conn_state);
+			break;
+		}
+
+		crtc = to_intel_crtc(conn_state->crtc);
+
+		if (!crtc)
+			continue;
+
+		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+		crtc_state->uapi.mode_changed = true;
+
+		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+		if (ret)
+			break;
+	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	return ret;
+}
+
+static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_crtc *crtc;
+
+	if (transcoders == 0)
+		return 0;
+
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		struct intel_crtc_state *crtc_state;
+		int ret;
+
+		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->hw.enable)
+			continue;
+
+		if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
+			continue;
+
+		crtc_state->uapi.mode_changed = true;
+
+		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
+		if (ret)
+			return ret;
+
+		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+		if (ret)
+			return ret;
+
+		transcoders &= ~BIT(crtc_state->cpu_transcoder);
+	}
+
+	drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+
+	return 0;
+}
+
+static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+				      struct drm_connector *connector)
+{
+	const struct drm_connector_state *old_conn_state =
+		drm_atomic_get_old_connector_state(&state->base, connector);
+	const struct intel_crtc_state *old_crtc_state;
+	struct intel_crtc *crtc;
+	u8 transcoders;
+
+	crtc = to_intel_crtc(old_conn_state->crtc);
+	if (!crtc)
+		return 0;
+
+	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+
+	if (!old_crtc_state->hw.active)
+		return 0;
+
+	transcoders = old_crtc_state->sync_mode_slaves_mask;
+	if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
+		transcoders |= BIT(old_crtc_state->master_transcoder);
+
+	return intel_modeset_affected_transcoders(state,
+						  transcoders);
+}
+
+static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+					   struct drm_atomic_state *_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(conn->dev);
+	struct intel_atomic_state *state = to_intel_atomic_state(_state);
+	int ret;
+
+	ret = intel_digital_connector_atomic_check(conn, &state->base);
+	if (ret)
+		return ret;
+
+	if (INTEL_GEN(dev_priv) < 11)
+		return 0;
+
+	if (!intel_connector_needs_modeset(state, conn))
+		return 0;
+
+	if (conn->has_tile) {
+		ret = intel_modeset_tile_group(state, conn->tile_group->id);
+		if (ret)
+			return ret;
+	}
+
+	return intel_modeset_synced_crtcs(state, conn);
+}
+
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
 	.force = intel_dp_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -6561,7 +6755,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
 	.detect_ctx = intel_dp_detect,
 	.get_modes = intel_dp_get_modes,
 	.mode_valid = intel_dp_mode_valid,
-	.atomic_check = intel_digital_connector_atomic_check,
+	.atomic_check = intel_dp_connector_atomic_check,
 };
 
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -6697,10 +6891,10 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
 
 	/* Ensure PPS is unlocked */
 	if (!HAS_DDI(dev_priv))
-		I915_WRITE(regs.pp_ctrl, pp_ctl);
+		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
 
-	pp_on = I915_READ(regs.pp_on);
-	pp_off = I915_READ(regs.pp_off);
+	pp_on = intel_de_read(dev_priv, regs.pp_on);
+	pp_off = intel_de_read(dev_priv, regs.pp_off);
 
 	/* Pull timing values out of registers */
 	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -6711,7 +6905,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
 	if (i915_mmio_reg_valid(regs.pp_div)) {
 		u32 pp_div;
 
-		pp_div = I915_READ(regs.pp_div);
+		pp_div = intel_de_read(dev_priv, regs.pp_div);
 
 		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
 	} else {
@@ -6768,8 +6962,9 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
 	 */
 	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
 		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
-		DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
-			      vbt.t11_t12);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Increasing T12 panel delay as per the quirk to %d\n",
+			    vbt.t11_t12);
 	}
 	/* T11_T12 delay is special and actually in units of 100ms, but zero
 	 * based in the hw (so we need to add 100 ms). But the sw vbt
@@ -6811,12 +7006,15 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
 	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
 #undef get_delay
 
-	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
-		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
-		      intel_dp->panel_power_cycle_delay);
+	drm_dbg_kms(&dev_priv->drm,
+		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+		    intel_dp->panel_power_up_delay,
+		    intel_dp->panel_power_down_delay,
+		    intel_dp->panel_power_cycle_delay);
 
-	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
-		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+		    intel_dp->backlight_on_delay,
+		    intel_dp->backlight_off_delay);
 
 	/*
 	 * We override the HW backlight delays to 1 because we do manual waits
@@ -6841,7 +7039,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	u32 pp_on, pp_off, port_sel = 0;
-	int div = dev_priv->rawclk_freq / 1000;
+	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
 	struct pps_registers regs;
 	enum port port = dp_to_dig_port(intel_dp)->base.port;
 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -6865,14 +7063,16 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 	if (force_disable_vdd) {
 		u32 pp = ilk_get_pp_control(intel_dp);
 
-		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+			 "Panel power already on\n");
 
 		if (pp & EDP_FORCE_VDD)
-			DRM_DEBUG_KMS("VDD already on, disabling first\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "VDD already on, disabling first\n");
 
 		pp &= ~EDP_FORCE_VDD;
 
-		I915_WRITE(regs.pp_ctrl, pp);
+		intel_de_write(dev_priv, regs.pp_ctrl, pp);
 	}
 
 	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -6903,31 +7103,31 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 
 	pp_on |= port_sel;
 
-	I915_WRITE(regs.pp_on, pp_on);
-	I915_WRITE(regs.pp_off, pp_off);
+	intel_de_write(dev_priv, regs.pp_on, pp_on);
+	intel_de_write(dev_priv, regs.pp_off, pp_off);
 
 	/*
 	 * Compute the divisor for the pp clock, simply match the Bspec formula.
 	 */
 	if (i915_mmio_reg_valid(regs.pp_div)) {
-		I915_WRITE(regs.pp_div,
-			   REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
-			   REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+		intel_de_write(dev_priv, regs.pp_div,
+			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
 	} else {
 		u32 pp_ctl;
 
-		pp_ctl = I915_READ(regs.pp_ctrl);
+		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
 		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
 		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
-		I915_WRITE(regs.pp_ctrl, pp_ctl);
+		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
 	}
 
-	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
-		      I915_READ(regs.pp_on),
-		      I915_READ(regs.pp_off),
-		      i915_mmio_reg_valid(regs.pp_div) ?
-		      I915_READ(regs.pp_div) :
-		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+	drm_dbg_kms(&dev_priv->drm,
+		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+		    intel_de_read(dev_priv, regs.pp_on),
+		    intel_de_read(dev_priv, regs.pp_off),
+		    i915_mmio_reg_valid(regs.pp_div) ?
+		    intel_de_read(dev_priv, regs.pp_div) :
+		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
 }
 
 static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6964,22 +7164,24 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
 
 	if (refresh_rate <= 0) {
-		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Refresh rate should be positive non-zero.\n");
 		return;
 	}
 
 	if (intel_dp == NULL) {
-		DRM_DEBUG_KMS("DRRS not supported.\n");
+		drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
 		return;
 	}
 
 	if (!intel_crtc) {
-		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "DRRS: intel_crtc not initialized\n");
 		return;
 	}
 
 	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
-		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+		drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
 		return;
 	}
 
@@ -6988,13 +7190,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 		index = DRRS_LOW_RR;
 
 	if (index == dev_priv->drrs.refresh_rate_type) {
-		DRM_DEBUG_KMS(
-			"DRRS requested for previously set RR...ignoring\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "DRRS requested for previously set RR...ignoring\n");
 		return;
 	}
 
 	if (!crtc_state->hw.active) {
-		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "eDP encoder disabled. CRTC not Active\n");
 		return;
 	}
 
@@ -7008,13 +7211,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 			break;
 		case DRRS_MAX_RR:
 		default:
-			DRM_ERROR("Unsupported refreshrate type\n");
+			drm_err(&dev_priv->drm,
+				"Unsupported refreshrate type\n");
 		}
 	} else if (INTEL_GEN(dev_priv) > 6) {
 		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
 		u32 val;
 
-		val = I915_READ(reg);
+		val = intel_de_read(dev_priv, reg);
 		if (index > DRRS_HIGH_RR) {
 			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
@@ -7026,12 +7230,13 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
 			else
 				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
 		}
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 
 	dev_priv->drrs.refresh_rate_type = index;
 
-	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+	drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
+		    refresh_rate);
 }
 
 /**
@@ -7047,18 +7252,19 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 	if (!crtc_state->has_drrs) {
-		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+		drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
 		return;
 	}
 
 	if (dev_priv->psr.enabled) {
-		DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR enabled. Not enabling DRRS.\n");
 		return;
 	}
 
 	mutex_lock(&dev_priv->drrs.mutex);
 	if (dev_priv->drrs.dp) {
-		DRM_DEBUG_KMS("DRRS already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
 		goto unlock;
 	}
 
@@ -7284,25 +7490,28 @@ intel_dp_drrs_init(struct intel_connector *connector,
 	mutex_init(&dev_priv->drrs.mutex);
 
 	if (INTEL_GEN(dev_priv) <= 6) {
-		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "DRRS supported for Gen7 and above\n");
 		return NULL;
 	}
 
 	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
-		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
+		drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
 		return NULL;
 	}
 
 	downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
 	if (!downclock_mode) {
-		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Downclock mode is not found. DRRS not supported\n");
 		return NULL;
 	}
 
 	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
 
 	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
-	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
+	drm_dbg_kms(&dev_priv->drm,
+		    "seamless DRRS supported for eDP panel.\n");
 	return downclock_mode;
 }
 
@@ -7331,8 +7540,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	 * with an already powered-on LVDS power sequencer.
 	 */
 	if (intel_get_lvds_encoder(dev_priv)) {
-		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
-		DRM_INFO("LVDS was detected, not registering eDP\n");
+		drm_WARN_ON(dev,
+			    !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+		drm_info(&dev_priv->drm,
+			 "LVDS was detected, not registering eDP\n");
 
 		return false;
 	}
@@ -7348,7 +7559,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 
 	if (!has_dpcd) {
 		/* if this fails, presume the device is a ghost */
-		DRM_INFO("failed to retrieve link info, disabling eDP\n");
+		drm_info(&dev_priv->drm,
+			 "failed to retrieve link info, disabling eDP\n");
 		goto out_vdd_off;
 	}
 
@@ -7356,8 +7568,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
 	if (edid) {
 		if (drm_add_edid_modes(connector, edid)) {
-			drm_connector_update_edid_property(connector,
-								edid);
+			drm_connector_update_edid_property(connector, edid);
+			intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
 		} else {
 			kfree(edid);
 			edid = ERR_PTR(-EINVAL);
@@ -7393,17 +7605,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 		if (pipe != PIPE_A && pipe != PIPE_B)
 			pipe = PIPE_A;
 
-		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
-			      pipe_name(pipe));
+		drm_dbg_kms(&dev_priv->drm,
+			    "using pipe %c for initial backlight setup\n",
+			    pipe_name(pipe));
 	}
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
 	intel_connector->panel.backlight.power = intel_edp_backlight_power;
 	intel_panel_setup_backlight(connector, pipe);
 
-	if (fixed_mode)
-		drm_connector_init_panel_orientation_property(
-			connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+	if (fixed_mode) {
+		drm_connector_set_panel_orientation_with_quirk(connector,
+				dev_priv->vbt.orientation,
+				fixed_mode->hdisplay, fixed_mode->vdisplay);
+	}
 
 	return true;
 
@@ -7459,10 +7674,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	INIT_WORK(&intel_connector->modeset_retry_work,
 		  intel_dp_modeset_retry_work_fn);
 
-	if (WARN(intel_dig_port->max_lanes < 1,
-		 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
-		 intel_dig_port->max_lanes, intel_encoder->base.base.id,
-		 intel_encoder->base.name))
+	if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+		     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+		     intel_dig_port->max_lanes, intel_encoder->base.base.id,
+		     intel_encoder->base.name))
 		return false;
 
 	intel_dp_set_source_rates(intel_dp);
@@ -7472,7 +7687,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	intel_dp->active_pipe = INVALID_PIPE;
 
 	/* Preserve the current hw state. */
-	intel_dp->DP = I915_READ(intel_dp->output_reg);
+	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
 	intel_dp->attached_connector = intel_connector;
 
 	if (intel_dp_is_port_edp(dev_priv, port)) {
@@ -7480,7 +7695,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 		 * Currently we don't support eDP on TypeC ports, although in
 		 * theory it could work on TypeC legacy ports.
 		 */
-		WARN_ON(intel_phy_is_tc(dev_priv, phy));
+		drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
 		type = DRM_MODE_CONNECTOR_eDP;
 	} else {
 		type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -7498,14 +7713,16 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 		intel_encoder->type = INTEL_OUTPUT_EDP;
 
 	/* eDP only on port B and/or C on vlv/chv */
-	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-		    intel_dp_is_edp(intel_dp) &&
-		    port != PORT_B && port != PORT_C))
+	if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+			      IS_CHERRYVIEW(dev_priv)) &&
+			intel_dp_is_edp(intel_dp) &&
+			port != PORT_B && port != PORT_C))
 		return false;
 
-	DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
-		      type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
-		      intel_encoder->base.base.id, intel_encoder->base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Adding %s connector on [ENCODER:%d:%s]\n",
+		    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+		    intel_encoder->base.base.id, intel_encoder->base.name);
 
 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7518,6 +7735,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 		connector->ycbcr_420_allowed = true;
 
 	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+	intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	intel_dp_aux_init(intel_dp);
 
@@ -7543,7 +7761,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
 		int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
 		if (ret)
-			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "HDCP init failed, skipping.\n");
 	}
 
 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -7551,8 +7770,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 	 * generated on the port when a cable is not attached.
 	 */
 	if (IS_G45(dev_priv)) {
-		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
-		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+		u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+		intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+			       (temp & ~0xf) | 0xd);
 	}
 
 	return true;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 3da1660..0c7be8e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -8,8 +8,6 @@
 
 #include <linux/types.h>
 
-#include <drm/i915_drm.h>
-
 #include "i915_reg.h"
 
 enum pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7c653f8..3e706bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,10 +57,27 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
  */
 static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	u8 read_val[2] = { 0x0 };
+	u8 mode_reg;
 	u16 level = 0;
 
+	if (drm_dp_dpcd_readb(&intel_dp->aux,
+			      DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+			      &mode_reg) != 1) {
+		DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
+			      DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+		return 0;
+	}
+
+	/*
+	 * If we're not in DPCD control mode yet, the programmed brightness
+	 * value is meaningless and we should assume max brightness
+	 */
+	if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) !=
+	    DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)
+		return connector->panel.backlight.max;
+
 	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
 			     &read_val, sizeof(read_val)) < 0) {
 		DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
@@ -82,7 +99,7 @@ static void
 intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
 	u8 vals[2] = { 0x0 };
 
 	vals[0] = level;
@@ -110,62 +127,29 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
 static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
-	int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
-	u8 pn, pn_min, pn_max;
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+	int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
 
-	/* Find desired value of (F x P)
-	 * Note that, if F x P is out of supported range, the maximum value or
-	 * minimum value will applied automatically. So no need to check that.
-	 */
 	freq = dev_priv->vbt.backlight.pwm_freq_hz;
-	DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
 	if (!freq) {
 		DRM_DEBUG_KMS("Use panel default backlight frequency\n");
 		return false;
 	}
 
 	fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+	f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+	fxp_actual = f << pn;
 
-	/* Use highest possible value of Pn for more granularity of brightness
-	 * adjustment while satifying the conditions below.
-	 * - Pn is in the range of Pn_min and Pn_max
-	 * - F is in the range of 1 and 255
-	 * - FxP is within 25% of desired value.
-	 *   Note: 25% is arbitrary value and may need some tweak.
-	 */
-	if (drm_dp_dpcd_readb(&intel_dp->aux,
-			       DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
-		DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
-		return false;
-	}
-	if (drm_dp_dpcd_readb(&intel_dp->aux,
-			       DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
-		DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
-		return false;
-	}
-	pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-	pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
+	/* Ensure frequency is within 25% of desired value */
 	fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
 	fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
-	if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
-		DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+
+	if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
+		DRM_DEBUG_KMS("Actual frequency out of range\n");
 		return false;
 	}
 
-	for (pn = pn_max; pn >= pn_min; pn--) {
-		f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
-		fxp_actual = f << pn;
-		if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
-			break;
-	}
-
-	if (drm_dp_dpcd_writeb(&intel_dp->aux,
-			       DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
-		DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
-		return false;
-	}
 	if (drm_dp_dpcd_writeb(&intel_dp->aux,
 			       DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
 		DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
@@ -178,7 +162,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 					  const struct drm_connector_state *conn_state)
 {
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
-	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_panel *panel = &connector->panel;
 	u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -197,6 +182,12 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 	case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
 		new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
 		new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+
+		if (drm_dp_dpcd_writeb(&intel_dp->aux,
+				       DP_EDP_PWMGEN_BIT_COUNT,
+				       panel->backlight.pwmgen_bit_count) < 0)
+			DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+
 		break;
 
 	/* Do nothing when it is already DPCD mode */
@@ -216,8 +207,9 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 		}
 	}
 
+	intel_dp_aux_set_backlight(conn_state,
+				   connector->panel.backlight.level);
 	set_aux_backlight_enable(intel_dp, true);
-	intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
 }
 
 static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -226,20 +218,91 @@ static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old
 				 false);
 }
 
+static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+{
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_panel *panel = &connector->panel;
+	u32 max_backlight = 0;
+	int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
+	u8 pn, pn_min, pn_max;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, &pn) == 1) {
+		pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+		max_backlight = (1 << pn) - 1;
+	}
+
+	/* Find desired value of (F x P)
+	 * Note that, if F x P is out of supported range, the maximum value or
+	 * minimum value will applied automatically. So no need to check that.
+	 */
+	freq = i915->vbt.backlight.pwm_freq_hz;
+	DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+	if (!freq) {
+		DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+		return max_backlight;
+	}
+
+	fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+
+	/* Use highest possible value of Pn for more granularity of brightness
+	 * adjustment while satifying the conditions below.
+	 * - Pn is in the range of Pn_min and Pn_max
+	 * - F is in the range of 1 and 255
+	 * - FxP is within 25% of desired value.
+	 *   Note: 25% is arbitrary value and may need some tweak.
+	 */
+	if (drm_dp_dpcd_readb(&intel_dp->aux,
+			      DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
+		DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+		return max_backlight;
+	}
+	if (drm_dp_dpcd_readb(&intel_dp->aux,
+			      DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
+		DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+		return max_backlight;
+	}
+	pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+	pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+	fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
+	fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
+	if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
+		DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+		return max_backlight;
+	}
+
+	for (pn = pn_max; pn >= pn_min; pn--) {
+		f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+		fxp_actual = f << pn;
+		if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
+			break;
+	}
+
+	DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+	if (drm_dp_dpcd_writeb(&intel_dp->aux,
+			       DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
+		DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+		return max_backlight;
+	}
+	panel->backlight.pwmgen_bit_count = pn;
+
+	max_backlight = (1 << pn) - 1;
+
+	return max_backlight;
+}
+
 static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
 					enum pipe pipe)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 	struct intel_panel *panel = &connector->panel;
 
-	if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
-		panel->backlight.max = 0xFFFF;
-	else
-		panel->backlight.max = 0xFF;
+	panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+	if (!panel->backlight.max)
+		return -ENODEV;
 
 	panel->backlight.min = 0;
 	panel->backlight.level = intel_dp_aux_get_backlight(connector);
-
 	panel->backlight.enabled = panel->backlight.level != 0;
 
 	return 0;
@@ -248,7 +311,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
 static bool
 intel_dp_aux_display_control_capable(struct intel_connector *connector)
 {
-	struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
 
 	/* Check the eDP Display control capabilities registers to determine if
 	 * the panel can support backlight control over the aux channel
@@ -265,15 +328,31 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
 int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
 {
 	struct intel_panel *panel = &intel_connector->panel;
-	struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+	struct drm_device *dev = intel_connector->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	if (i915_modparams.enable_dpcd_backlight == 0 ||
-	    (i915_modparams.enable_dpcd_backlight == -1 &&
-	    dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
+	    !intel_dp_aux_display_control_capable(intel_connector))
 		return -ENODEV;
 
-	if (!intel_dp_aux_display_control_capable(intel_connector))
+	/*
+	 * There are a lot of machines that don't advertise the backlight
+	 * control interface to use properly in their VBIOS, :\
+	 */
+	if (dev_priv->vbt.backlight.type !=
+	    INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
+	    !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
+			      DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
+		DRM_DEV_INFO(dev->dev,
+			     "Panel advertises DPCD backlight support, but "
+			     "VBT disagrees. If your backlight controls "
+			     "don't work try booting with "
+			     "i915.enable_dpcd_backlight=1. If your machine "
+			     "needs this, please file a _new_ bug report on "
+			     "drm/i915, see " FDO_BUG_URL " for details.\n");
 		return -ENODEV;
+	}
 
 	panel->backlight.setup = intel_dp_aux_setup_backlight;
 	panel->backlight.enable = intel_dp_aux_enable_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2a1130d..a7defb3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -130,6 +130,7 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
 static bool
 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 {
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 	u8 voltage;
 	int voltage_tries, cr_tries, max_cr_tries;
 	bool max_vswing_reached = false;
@@ -143,9 +144,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 			      &link_bw, &rate_select);
 
 	if (link_bw)
-		DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+		drm_dbg_kms(&i915->drm,
+			    "Using LINK_BW_SET value %02x\n", link_bw);
 	else
-		DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+		drm_dbg_kms(&i915->drm,
+			    "Using LINK_RATE_SET value %02x\n", rate_select);
 
 	/* Write the link configuration data */
 	link_config[0] = link_bw;
@@ -169,7 +172,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 	if (!intel_dp_reset_link_train(intel_dp,
 				       DP_TRAINING_PATTERN_1 |
 				       DP_LINK_SCRAMBLING_DISABLE)) {
-		DRM_ERROR("failed to enable link training\n");
+		drm_err(&i915->drm, "failed to enable link training\n");
 		return false;
 	}
 
@@ -193,22 +196,23 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
 
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
-			DRM_ERROR("failed to get link status\n");
+			drm_err(&i915->drm, "failed to get link status\n");
 			return false;
 		}
 
 		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
-			DRM_DEBUG_KMS("clock recovery OK\n");
+			drm_dbg_kms(&i915->drm, "clock recovery OK\n");
 			return true;
 		}
 
 		if (voltage_tries == 5) {
-			DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+			drm_dbg_kms(&i915->drm,
+				    "Same voltage tried 5 times\n");
 			return false;
 		}
 
 		if (max_vswing_reached) {
-			DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+			drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
 			return false;
 		}
 
@@ -217,7 +221,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 		/* Update training set as requested by target */
 		intel_get_adjust_train(intel_dp, link_status);
 		if (!intel_dp_update_link_train(intel_dp)) {
-			DRM_ERROR("failed to update link training\n");
+			drm_err(&i915->drm,
+				"failed to update link training\n");
 			return false;
 		}
 
@@ -231,7 +236,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 			max_vswing_reached = true;
 
 	}
-	DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+	drm_err(&i915->drm,
+		"Failed clock recovery %d times, giving up!\n", max_cr_tries);
 	return false;
 }
 
@@ -256,9 +262,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 		return DP_TRAINING_PATTERN_4;
 	} else if (intel_dp->link_rate == 810000) {
 		if (!source_tps4)
-			DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+				    "8.1 Gbps link rate without source HBR3/TPS4 support\n");
 		if (!sink_tps4)
-			DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+				    "8.1 Gbps link rate without sink TPS4 support\n");
 	}
 	/*
 	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
@@ -271,9 +279,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 		return  DP_TRAINING_PATTERN_3;
 	} else if (intel_dp->link_rate >= 540000) {
 		if (!source_tps3)
-			DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
+			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+				    ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
 		if (!sink_tps3)
-			DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+				    ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
 	}
 
 	return DP_TRAINING_PATTERN_2;
@@ -282,6 +292,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 static bool
 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 {
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 	int tries;
 	u32 training_pattern;
 	u8 link_status[DP_LINK_STATUS_SIZE];
@@ -295,7 +306,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 	/* channel equalization */
 	if (!intel_dp_set_link_train(intel_dp,
 				     training_pattern)) {
-		DRM_ERROR("failed to start channel equalization\n");
+		drm_err(&i915->drm, "failed to start channel equalization\n");
 		return false;
 	}
 
@@ -303,7 +314,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
-			DRM_ERROR("failed to get link status\n");
+			drm_err(&i915->drm,
+				"failed to get link status\n");
 			break;
 		}
 
@@ -311,23 +323,25 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 		if (!drm_dp_clock_recovery_ok(link_status,
 					      intel_dp->lane_count)) {
 			intel_dp_dump_link_status(link_status);
-			DRM_DEBUG_KMS("Clock recovery check failed, cannot "
-				      "continue channel equalization\n");
+			drm_dbg_kms(&i915->drm,
+				    "Clock recovery check failed, cannot "
+				    "continue channel equalization\n");
 			break;
 		}
 
 		if (drm_dp_channel_eq_ok(link_status,
 					 intel_dp->lane_count)) {
 			channel_eq = true;
-			DRM_DEBUG_KMS("Channel EQ done. DP Training "
-				      "successful\n");
+			drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
+				    "successful\n");
 			break;
 		}
 
 		/* Update training set as requested by target */
 		intel_get_adjust_train(intel_dp, link_status);
 		if (!intel_dp_update_link_train(intel_dp)) {
-			DRM_ERROR("failed to update link training\n");
+			drm_err(&i915->drm,
+				"failed to update link training\n");
 			break;
 		}
 	}
@@ -335,7 +349,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 	/* Try 5 times, else fail and try at lower BW */
 	if (tries == 5) {
 		intel_dp_dump_link_status(link_status);
-		DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
+		drm_dbg_kms(&i915->drm,
+			    "Channel equalization failed 5 times\n");
 	}
 
 	intel_dp_set_idle_link_train(intel_dp);
@@ -362,17 +377,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 	if (!intel_dp_link_training_channel_equalization(intel_dp))
 		goto failure_handling;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
-		      intel_connector->base.base.id,
-		      intel_connector->base.name,
-		      intel_dp->link_rate, intel_dp->lane_count);
+	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+		    "[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
+		    intel_connector->base.base.id,
+		    intel_connector->base.name,
+		    intel_dp->link_rate, intel_dp->lane_count);
 	return;
 
  failure_handling:
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
-		      intel_connector->base.base.id,
-		      intel_connector->base.name,
-		      intel_dp->link_rate, intel_dp->lane_count);
+	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+		    "[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+		    intel_connector->base.base.id,
+		    intel_connector->base.name,
+		    intel_dp->link_rate, intel_dp->lane_count);
 	if (!intel_dp_get_link_train_fallback_values(intel_dp,
 						     intel_dp->link_rate,
 						     intel_dp->lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index cba68c5..44f3fd25 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->hw.adjusted_mode;
 	void *port = connector->port;
-	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
 					   DP_DPCD_QUIRK_CONSTANT_N);
 	int bpp, slots = -EINVAL;
 
@@ -352,8 +352,9 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 
 	intel_dp->active_mst_links--;
 	last_mst_stream = intel_dp->active_mst_links == 0;
-	WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
-		!intel_dp_mst_is_master_trans(old_crtc_state));
+	drm_WARN_ON(&dev_priv->drm,
+		    INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+		    !intel_dp_mst_is_master_trans(old_crtc_state));
 
 	intel_crtc_vblank_off(old_crtc_state);
 
@@ -361,9 +362,12 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
 
 	drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
-	val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+	val = intel_de_read(dev_priv,
+			    TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
 	val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
-	I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+	intel_de_write(dev_priv,
+		       TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+		       val);
 
 	if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
 				  DP_TP_STATUS_ACT_SENT, 1))
@@ -437,8 +441,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 	connector->encoder = encoder;
 	intel_mst->connector = connector;
 	first_mst_stream = intel_dp->active_mst_links == 0;
-	WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
-		!intel_dp_mst_is_master_trans(pipe_config));
+	drm_WARN_ON(&dev_priv->drm,
+		    INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+		    !intel_dp_mst_is_master_trans(pipe_config));
 
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
@@ -459,8 +464,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 		DRM_ERROR("failed to allocate vcpi\n");
 
 	intel_dp->active_mst_links++;
-	temp = I915_READ(intel_dp->regs.dp_tp_status);
-	I915_WRITE(intel_dp->regs.dp_tp_status, temp);
+	temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
+	intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
 
 	ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
 
@@ -475,6 +480,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
 		intel_ddi_enable_pipe_clock(pipe_config);
 
 	intel_ddi_set_dp_msa(pipe_config, conn_state);
+
+	intel_dp_set_m_n(pipe_config, M1_N1);
 }
 
 static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -486,6 +493,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
+	drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+
+	intel_enable_pipe(pipe_config);
+
+	intel_crtc_vblank_on(pipe_config);
+
 	DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
 
 	if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
@@ -535,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
 	return ret;
 }
 
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	int ret;
+
+	ret = drm_dp_mst_connector_late_register(connector,
+						 intel_connector->port);
+	if (ret < 0)
+		return ret;
+
+	ret = intel_connector_register(connector);
+	if (ret < 0)
+		drm_dp_mst_connector_early_unregister(connector,
+						      intel_connector->port);
+
+	return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	intel_connector_unregister(connector);
+	drm_dp_mst_connector_early_unregister(connector,
+					      intel_connector->port);
+}
+
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.atomic_get_property = intel_digital_connector_atomic_get_property,
 	.atomic_set_property = intel_digital_connector_atomic_set_property,
-	.late_register = intel_connector_register,
-	.early_unregister = intel_connector_unregister,
+	.late_register = intel_dp_mst_connector_late_register,
+	.early_unregister = intel_dp_mst_connector_early_unregister,
 	.destroy = intel_connector_destroy,
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -632,9 +674,9 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
 
 static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
 {
-	if (connector->encoder && connector->base.state->crtc) {
+	if (intel_attached_encoder(connector) && connector->base.state->crtc) {
 		enum pipe pipe;
-		if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+		if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
 			return false;
 		return true;
 	}
@@ -706,36 +748,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 	return NULL;
 }
 
-static void intel_dp_register_mst_connector(struct drm_connector *connector)
-{
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
-	if (dev_priv->fbdev)
-		drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
-						connector);
-
-	drm_connector_register(connector);
-}
-
-static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
-					   struct drm_connector *connector)
-{
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
-	drm_connector_unregister(connector);
-
-	if (dev_priv->fbdev)
-		drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
-						   connector);
-
-	drm_connector_put(connector);
-}
-
 static const struct drm_dp_mst_topology_cbs mst_cbs = {
 	.add_connector = intel_dp_add_mst_connector,
-	.register_connector = intel_dp_register_mst_connector,
-	.destroy_connector = intel_dp_destroy_mst_connector,
 };
 
 static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 6fb1f7a..399a7ed 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -259,7 +259,8 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
 		}
 	}
 
-	WARN(1, "PHY not found for PORT %c", port_name(port));
+	drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
+		 port_name(port));
 	*phy = DPIO_PHY0;
 	*ch = DPIO_CH0;
 }
@@ -278,33 +279,34 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
 	 * While we write to the group register to program all lanes at once we
 	 * can read only lane registers and we pick lanes 0/1 for that.
 	 */
-	val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+	val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
 	val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
-	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+	intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
 
-	val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+	val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
 	val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
 	val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
-	I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+	intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
 
-	val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+	val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
 	val &= ~SCALE_DCOMP_METHOD;
 	if (enable)
 		val |= SCALE_DCOMP_METHOD;
 
 	if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
-		DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+		drm_err(&dev_priv->drm,
+			"Disabled scaling while ouniqetrangenmethod was set");
 
-	I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+	intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
 
-	val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+	val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
 	val &= ~DE_EMPHASIS;
 	val |= deemphasis << DEEMPH_SHIFT;
-	I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+	intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
 
-	val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+	val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
 	val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
-	I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+	intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
 }
 
 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
@@ -314,20 +316,20 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
 
 	phy_info = bxt_get_phy_info(dev_priv, phy);
 
-	if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+	if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
 		return false;
 
-	if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+	if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
 	     (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
-		DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
-				 phy);
+		drm_dbg(&dev_priv->drm,
+			"DDI PHY %d powered, but power hasn't settled\n", phy);
 
 		return false;
 	}
 
-	if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
-		DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
-				 phy);
+	if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+		drm_dbg(&dev_priv->drm,
+			"DDI PHY %d powered, but still in reset\n", phy);
 
 		return false;
 	}
@@ -337,7 +339,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
 
 static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 {
-	u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+	u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
 
 	return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
 }
@@ -347,7 +349,8 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
 {
 	if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
 				  GRC_DONE, 10))
-		DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+		drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
+			phy);
 }
 
 static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
@@ -364,18 +367,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 			dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
 
 		if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
-			DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
-					 "won't reprogram it\n", phy);
+			drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
+				"won't reprogram it\n", phy);
 			return;
 		}
 
-		DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
-				 "force reprogramming it\n", phy);
+		drm_dbg(&dev_priv->drm,
+			"DDI PHY %d enabled with invalid state, "
+			"force reprogramming it\n", phy);
 	}
 
-	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+	val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
 	val |= phy_info->pwron_mask;
-	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+	intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
 
 	/*
 	 * The PHY registers start out inaccessible and respond to reads with
@@ -390,29 +394,30 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 				       PHY_RESERVED | PHY_POWER_GOOD,
 				       PHY_POWER_GOOD,
 				       1))
-		DRM_ERROR("timeout during PHY%d power on\n", phy);
+		drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
+			phy);
 
 	/* Program PLL Rcomp code offset */
-	val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+	val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
 	val &= ~IREF0RC_OFFSET_MASK;
 	val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
-	I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+	intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
 
-	val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+	val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
 	val &= ~IREF1RC_OFFSET_MASK;
 	val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
-	I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+	intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
 
 	/* Program power gating */
-	val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+	val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
 	val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
 		SUS_CLK_CONFIG;
-	I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+	intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
 
 	if (phy_info->dual_channel) {
-		val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+		val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
 		val |= DW6_OLDO_DYN_PWR_DOWN_EN;
-		I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+		intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
 	}
 
 	if (phy_info->rcomp_phy != -1) {
@@ -430,19 +435,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
 		grc_code = val << GRC_CODE_FAST_SHIFT |
 			   val << GRC_CODE_SLOW_SHIFT |
 			   val;
-		I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+		intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
 
-		val = I915_READ(BXT_PORT_REF_DW8(phy));
+		val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
 		val |= GRC_DIS | GRC_RDY_OVRD;
-		I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+		intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
 	}
 
 	if (phy_info->reset_delay)
 		udelay(phy_info->reset_delay);
 
-	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+	val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
 	val |= COMMON_RESET_DIS;
-	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+	intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
 }
 
 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -452,13 +457,13 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 
 	phy_info = bxt_get_phy_info(dev_priv, phy);
 
-	val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+	val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
 	val &= ~COMMON_RESET_DIS;
-	I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+	intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
 
-	val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+	val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
 	val &= ~phy_info->pwron_mask;
-	I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+	intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
 }
 
 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -496,7 +501,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 	va_list args;
 	u32 val;
 
-	val = I915_READ(reg);
+	val = intel_de_read(dev_priv, reg);
 	if ((val & mask) == expected)
 		return true;
 
@@ -504,7 +509,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
 	vaf.fmt = reg_fmt;
 	vaf.va = &args;
 
-	DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+	drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
 			 "current %08x, expected %08x (mask %08x)\n",
 			 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
 			 mask);
@@ -599,7 +604,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 
 	for (lane = 0; lane < 4; lane++) {
-		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+		u32 val = intel_de_read(dev_priv,
+					BXT_PORT_TX_DW14_LN(phy, ch, lane));
 
 		/*
 		 * Note that on CHV this flag is called UPAR, but has
@@ -609,7 +615,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
 		if (lane_lat_optim_mask & BIT(lane))
 			val |= LATENCY_OPTIM;
 
-		I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+		intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+			       val);
 	}
 }
 
@@ -627,7 +634,8 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
 
 	mask = 0;
 	for (lane = 0; lane < 4; lane++) {
-		u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+		u32 val = intel_de_read(dev_priv,
+					BXT_PORT_TX_DW14_LN(phy, ch, lane));
 
 		if (val & LATENCY_OPTIM)
 			mask |= BIT(lane);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c75e34d..2d47f1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -45,6 +45,22 @@
  * commit phase.
  */
 
+struct intel_dpll_mgr {
+	const struct dpll_info *dpll_info;
+
+	bool (*get_dplls)(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc,
+			  struct intel_encoder *encoder);
+	void (*put_dplls)(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc);
+	void (*update_active_dpll)(struct intel_atomic_state *state,
+				   struct intel_crtc *crtc,
+				   struct intel_encoder *encoder);
+	void (*update_ref_clks)(struct drm_i915_private *i915);
+	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+			      const struct intel_dpll_hw_state *hw_state);
+};
+
 static void
 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
 				  struct intel_shared_dpll_state *shared_dpll)
@@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
 	enum intel_dpll_id i;
 
 	/* Copy shared dpll state */
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
 
 		shared_dpll[i] = pll->state;
 	}
@@ -88,7 +104,7 @@ struct intel_shared_dpll *
 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
 			    enum intel_dpll_id id)
 {
-	return &dev_priv->shared_dplls[id];
+	return &dev_priv->dpll.shared_dplls[id];
 }
 
 /**
@@ -103,11 +119,14 @@ enum intel_dpll_id
 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
 			 struct intel_shared_dpll *pll)
 {
-	if (WARN_ON(pll < dev_priv->shared_dplls||
-		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+	long pll_idx = pll - dev_priv->dpll.shared_dplls;
+
+	if (drm_WARN_ON(&dev_priv->drm,
+			pll_idx < 0 ||
+			pll_idx >= dev_priv->dpll.num_shared_dpll))
 		return -1;
 
-	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+	return pll_idx;
 }
 
 /* For ILK+ */
@@ -118,7 +137,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 	bool cur_state;
 	struct intel_dpll_hw_state hw_state;
 
-	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+	if (drm_WARN(&dev_priv->drm, !pll,
+		     "asserting DPLL %s with no DPLL\n", onoff(state)))
 		return;
 
 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
@@ -140,19 +160,19 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 
-	if (WARN_ON(pll == NULL))
+	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
 		return;
 
-	mutex_lock(&dev_priv->dpll_lock);
-	WARN_ON(!pll->state.crtc_mask);
+	mutex_lock(&dev_priv->dpll.lock);
+	drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
 	if (!pll->active_mask) {
-		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
-		WARN_ON(pll->on);
+		drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
+		drm_WARN_ON(&dev_priv->drm, pll->on);
 		assert_shared_dpll_disabled(dev_priv, pll);
 
 		pll->info->funcs->prepare(dev_priv, pll);
 	}
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 /**
@@ -169,35 +189,36 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 	unsigned int old_mask;
 
-	if (WARN_ON(pll == NULL))
+	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
 		return;
 
-	mutex_lock(&dev_priv->dpll_lock);
+	mutex_lock(&dev_priv->dpll.lock);
 	old_mask = pll->active_mask;
 
-	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
-	    WARN_ON(pll->active_mask & crtc_mask))
+	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
+	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
 		goto out;
 
 	pll->active_mask |= crtc_mask;
 
-	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
-		      pll->info->name, pll->active_mask, pll->on,
-		      crtc->base.base.id);
+	drm_dbg_kms(&dev_priv->drm,
+		    "enable %s (active %x, on? %d) for crtc %d\n",
+		    pll->info->name, pll->active_mask, pll->on,
+		    crtc->base.base.id);
 
 	if (old_mask) {
-		WARN_ON(!pll->on);
+		drm_WARN_ON(&dev_priv->drm, !pll->on);
 		assert_shared_dpll_enabled(dev_priv, pll);
 		goto out;
 	}
-	WARN_ON(pll->on);
+	drm_WARN_ON(&dev_priv->drm, pll->on);
 
-	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
 	pll->info->funcs->enable(dev_priv, pll);
 	pll->on = true;
 
 out:
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 /**
@@ -220,27 +241,28 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
 	if (pll == NULL)
 		return;
 
-	mutex_lock(&dev_priv->dpll_lock);
-	if (WARN_ON(!(pll->active_mask & crtc_mask)))
+	mutex_lock(&dev_priv->dpll.lock);
+	if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
 		goto out;
 
-	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
-		      pll->info->name, pll->active_mask, pll->on,
-		      crtc->base.base.id);
+	drm_dbg_kms(&dev_priv->drm,
+		    "disable %s (active %x, on? %d) for crtc %d\n",
+		    pll->info->name, pll->active_mask, pll->on,
+		    crtc->base.base.id);
 
 	assert_shared_dpll_enabled(dev_priv, pll);
-	WARN_ON(!pll->on);
+	drm_WARN_ON(&dev_priv->drm, !pll->on);
 
 	pll->active_mask &= ~crtc_mask;
 	if (pll->active_mask)
 		goto out;
 
-	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
 	pll->info->funcs->disable(dev_priv, pll);
 	pll->on = false;
 
 out:
-	mutex_unlock(&dev_priv->dpll_lock);
+	mutex_unlock(&dev_priv->dpll.lock);
 }
 
 static struct intel_shared_dpll *
@@ -256,10 +278,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
 
 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
 
-	WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
 
 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
-		pll = &dev_priv->shared_dplls[i];
+		pll = &dev_priv->dpll.shared_dplls[i];
 
 		/* Only want to check enabled timings first */
 		if (shared_dpll[i].crtc_mask == 0) {
@@ -271,20 +293,21 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
 		if (memcmp(pll_state,
 			   &shared_dpll[i].hw_state,
 			   sizeof(*pll_state)) == 0) {
-			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
-				      crtc->base.base.id, crtc->base.name,
-				      pll->info->name,
-				      shared_dpll[i].crtc_mask,
-				      pll->active_mask);
+			drm_dbg_kms(&dev_priv->drm,
+				    "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+				    crtc->base.base.id, crtc->base.name,
+				    pll->info->name,
+				    shared_dpll[i].crtc_mask,
+				    pll->active_mask);
 			return pll;
 		}
 	}
 
 	/* Ok no matching timings, maybe there's a free one? */
 	if (unused_pll) {
-		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
-			      crtc->base.base.id, crtc->base.name,
-			      unused_pll->info->name);
+		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
+			    crtc->base.base.id, crtc->base.name,
+			    unused_pll->info->name);
 		return unused_pll;
 	}
 
@@ -297,6 +320,7 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
 			    const struct intel_shared_dpll *pll,
 			    const struct intel_dpll_hw_state *pll_state)
 {
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
 	struct intel_shared_dpll_state *shared_dpll;
 	const enum intel_dpll_id id = pll->info->id;
 
@@ -305,8 +329,8 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
 	if (shared_dpll[id].crtc_mask == 0)
 		shared_dpll[id].hw_state = *pll_state;
 
-	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
-			 pipe_name(crtc->pipe));
+	drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
+		pipe_name(crtc->pipe));
 
 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
 }
@@ -357,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
 	if (!state->dpll_set)
 		return;
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
 		struct intel_shared_dpll *pll =
-			&dev_priv->shared_dplls[i];
+			&dev_priv->dpll.shared_dplls[i];
 
 		swap(pll->state, shared_dpll[i]);
 	}
@@ -378,10 +402,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(PCH_DPLL(id));
+	val = intel_de_read(dev_priv, PCH_DPLL(id));
 	hw_state->dpll = val;
-	hw_state->fp0 = I915_READ(PCH_FP0(id));
-	hw_state->fp1 = I915_READ(PCH_FP1(id));
+	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
+	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
 
@@ -393,8 +417,8 @@ static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
 {
 	const enum intel_dpll_id id = pll->info->id;
 
-	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
-	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
+	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
 }
 
 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -404,7 +428,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
 
 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
 
-	val = I915_READ(PCH_DREF_CONTROL);
+	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
 			    DREF_SUPERSPREAD_SOURCE_MASK));
 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
@@ -418,10 +442,10 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 	/* PCH refclock must be enabled first */
 	ibx_assert_pch_refclk_enabled(dev_priv);
 
-	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
 
 	/* Wait for the clocks to stabilize. */
-	POSTING_READ(PCH_DPLL(id));
+	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 	udelay(150);
 
 	/* The pixel multiplier can only be updated once the
@@ -429,8 +453,8 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 	 *
 	 * So write it again.
 	 */
-	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
-	POSTING_READ(PCH_DPLL(id));
+	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
+	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 	udelay(200);
 }
 
@@ -439,8 +463,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 {
 	const enum intel_dpll_id id = pll->info->id;
 
-	I915_WRITE(PCH_DPLL(id), 0);
-	POSTING_READ(PCH_DPLL(id));
+	intel_de_write(dev_priv, PCH_DPLL(id), 0);
+	intel_de_posting_read(dev_priv, PCH_DPLL(id));
 	udelay(200);
 }
 
@@ -457,11 +481,12 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
 	if (HAS_PCH_IBX(dev_priv)) {
 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 		i = (enum intel_dpll_id) crtc->pipe;
-		pll = &dev_priv->shared_dplls[i];
+		pll = &dev_priv->dpll.shared_dplls[i];
 
-		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
-			      crtc->base.base.id, crtc->base.name,
-			      pll->info->name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[CRTC:%d:%s] using pre-allocated %s\n",
+			    crtc->base.base.id, crtc->base.name,
+			    pll->info->name);
 	} else {
 		pll = intel_find_shared_dpll(state, crtc,
 					     &crtc_state->dpll_hw_state,
@@ -484,12 +509,13 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
-		      "fp0: 0x%x, fp1: 0x%x\n",
-		      hw_state->dpll,
-		      hw_state->dpll_md,
-		      hw_state->fp0,
-		      hw_state->fp1);
+	drm_dbg_kms(&dev_priv->drm,
+		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+		    "fp0: 0x%x, fp1: 0x%x\n",
+		    hw_state->dpll,
+		    hw_state->dpll_md,
+		    hw_state->fp0,
+		    hw_state->fp1);
 }
 
 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
@@ -499,21 +525,34 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
 	.get_hw_state = ibx_pch_dpll_get_hw_state,
 };
 
+static const struct dpll_info pch_plls[] = {
+	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+	{ },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+	.dpll_info = pch_plls,
+	.get_dplls = ibx_get_dpll,
+	.put_dplls = intel_put_dpll,
+	.dump_hw_state = ibx_dump_hw_state,
+};
+
 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
 			       struct intel_shared_dpll *pll)
 {
 	const enum intel_dpll_id id = pll->info->id;
 
-	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
-	POSTING_READ(WRPLL_CTL(id));
+	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
 	udelay(20);
 }
 
 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
 				struct intel_shared_dpll *pll)
 {
-	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
-	POSTING_READ(SPLL_CTL);
+	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
+	intel_de_posting_read(dev_priv, SPLL_CTL);
 	udelay(20);
 }
 
@@ -523,9 +562,9 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
 	const enum intel_dpll_id id = pll->info->id;
 	u32 val;
 
-	val = I915_READ(WRPLL_CTL(id));
-	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
-	POSTING_READ(WRPLL_CTL(id));
+	val = intel_de_read(dev_priv, WRPLL_CTL(id));
+	intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
 
 	/*
 	 * Try to set up the PCH reference clock once all DPLLs
@@ -541,9 +580,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
 	enum intel_dpll_id id = pll->info->id;
 	u32 val;
 
-	val = I915_READ(SPLL_CTL);
-	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
-	POSTING_READ(SPLL_CTL);
+	val = intel_de_read(dev_priv, SPLL_CTL);
+	intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+	intel_de_posting_read(dev_priv, SPLL_CTL);
 
 	/*
 	 * Try to set up the PCH reference clock once all DPLLs
@@ -566,7 +605,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(WRPLL_CTL(id));
+	val = intel_de_read(dev_priv, WRPLL_CTL(id));
 	hw_state->wrpll = val;
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -586,7 +625,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(SPLL_CTL);
+	val = intel_de_read(dev_priv, SPLL_CTL);
 	hw_state->spll = val;
 
 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -811,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 }
 
 static struct intel_shared_dpll *
-hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
-		      struct intel_crtc *crtc)
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+		       struct intel_crtc *crtc)
 {
 	struct intel_crtc_state *crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
@@ -839,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
 	return pll;
 }
 
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+				  const struct intel_shared_dpll *pll)
+{
+	int refclk;
+	int n, p, r;
+	u32 wrpll = pll->state.hw_state.wrpll;
+
+	switch (wrpll & WRPLL_REF_MASK) {
+	case WRPLL_REF_SPECIAL_HSW:
+		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+		if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+			refclk = dev_priv->dpll.ref_clks.nssc;
+			break;
+		}
+		/* fall through */
+	case WRPLL_REF_PCH_SSC:
+		/*
+		 * We could calculate spread here, but our checking
+		 * code only cares about 5% accuracy, and spread is a max of
+		 * 0.5% downspread.
+		 */
+		refclk = dev_priv->dpll.ref_clks.ssc;
+		break;
+	case WRPLL_REF_LCPLL:
+		refclk = 2700000;
+		break;
+	default:
+		MISSING_CASE(wrpll);
+		return 0;
+	}
+
+	r = wrpll & WRPLL_DIVIDER_REF_MASK;
+	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+	/* Convert to KHz, p & r have a fixed point portion */
+	return (refclk * n / 10) / (p * r) * 2;
+}
+
 static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	struct intel_shared_dpll *pll;
@@ -858,7 +936,8 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
 		pll_id = DPLL_ID_LCPLL_2700;
 		break;
 	default:
-		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+			    clock);
 		return NULL;
 	}
 
@@ -870,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
 	return pll;
 }
 
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+				  const struct intel_shared_dpll *pll)
+{
+	int link_clock = 0;
+
+	switch (pll->info->id) {
+	case DPLL_ID_LCPLL_810:
+		link_clock = 81000;
+		break;
+	case DPLL_ID_LCPLL_1350:
+		link_clock = 135000;
+		break;
+	case DPLL_ID_LCPLL_2700:
+		link_clock = 270000;
+		break;
+	default:
+		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+		break;
+	}
+
+	return link_clock * 2;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+		      struct intel_crtc *crtc)
+{
+	struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+
+	if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+		return NULL;
+
+	crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
+					 SPLL_REF_MUXED_SSC;
+
+	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+				      BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+				 const struct intel_shared_dpll *pll)
+{
+	int link_clock = 0;
+
+	switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
+	case SPLL_FREQ_810MHz:
+		link_clock = 81000;
+		break;
+	case SPLL_FREQ_1350MHz:
+		link_clock = 135000;
+		break;
+	case SPLL_FREQ_2700MHz:
+		link_clock = 270000;
+		break;
+	default:
+		drm_WARN(&i915->drm, 1, "bad spll freq\n");
+		break;
+	}
+
+	return link_clock * 2;
+}
+
 static bool hsw_get_dpll(struct intel_atomic_state *state,
 			 struct intel_crtc *crtc,
 			 struct intel_encoder *encoder)
@@ -881,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
 	memset(&crtc_state->dpll_hw_state, 0,
 	       sizeof(crtc_state->dpll_hw_state));
 
-	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
-	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
-		pll = hsw_ddi_dp_get_dpll(crtc_state);
-	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
-		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
-			return false;
-
-		crtc_state->dpll_hw_state.spll =
-			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
-		pll = intel_find_shared_dpll(state, crtc,
-					     &crtc_state->dpll_hw_state,
-					     BIT(DPLL_ID_SPLL));
-	} else {
+	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+	else if (intel_crtc_has_dp_encoder(crtc_state))
+		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+		pll = hsw_ddi_spll_get_dpll(state, crtc);
+	else
 		return false;
-	}
 
 	if (!pll)
 		return false;
@@ -910,23 +1043,35 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
 	return true;
 }
 
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+	i915->dpll.ref_clks.ssc = 135000;
+	/* Non-SSC is only used on non-ULT HSW. */
+	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+		i915->dpll.ref_clks.nssc = 24000;
+	else
+		i915->dpll.ref_clks.nssc = 135000;
+}
+
 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
-		      hw_state->wrpll, hw_state->spll);
+	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+		    hw_state->wrpll, hw_state->spll);
 }
 
 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
 	.enable = hsw_ddi_wrpll_enable,
 	.disable = hsw_ddi_wrpll_disable,
 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+	.get_freq = hsw_ddi_wrpll_get_freq,
 };
 
 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
 	.enable = hsw_ddi_spll_enable,
 	.disable = hsw_ddi_spll_disable,
 	.get_hw_state = hsw_ddi_spll_get_hw_state,
+	.get_freq = hsw_ddi_spll_get_freq,
 };
 
 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
@@ -950,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
 	.enable = hsw_ddi_lcpll_enable,
 	.disable = hsw_ddi_lcpll_disable,
 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+	.get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
+	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
+	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
+	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
+	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+	{ },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+	.dpll_info = hsw_plls,
+	.get_dplls = hsw_get_dpll,
+	.put_dplls = intel_put_dpll,
+	.update_ref_clks = hsw_update_dpll_ref_clks,
+	.dump_hw_state = hsw_dump_hw_state,
 };
 
 struct skl_dpll_regs {
@@ -989,15 +1153,15 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
 	const enum intel_dpll_id id = pll->info->id;
 	u32 val;
 
-	val = I915_READ(DPLL_CTRL1);
+	val = intel_de_read(dev_priv, DPLL_CTRL1);
 
 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
 		 DPLL_CTRL1_SSC(id) |
 		 DPLL_CTRL1_LINK_RATE_MASK(id));
 	val |= pll->state.hw_state.ctrl1 << (id * 6);
 
-	I915_WRITE(DPLL_CTRL1, val);
-	POSTING_READ(DPLL_CTRL1);
+	intel_de_write(dev_priv, DPLL_CTRL1, val);
+	intel_de_posting_read(dev_priv, DPLL_CTRL1);
 }
 
 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1008,17 +1172,17 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 
 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
 
-	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
-	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
-	POSTING_READ(regs[id].cfgcr1);
-	POSTING_READ(regs[id].cfgcr2);
+	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
+	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
 
 	/* the enable bit is always bit 31 */
-	I915_WRITE(regs[id].ctl,
-		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
+	intel_de_write(dev_priv, regs[id].ctl,
+		       intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
 
 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
-		DRM_ERROR("DPLL %d not locked\n", id);
+		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
 }
 
 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
@@ -1034,9 +1198,9 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	const enum intel_dpll_id id = pll->info->id;
 
 	/* the enable bit is always bit 31 */
-	I915_WRITE(regs[id].ctl,
-		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
-	POSTING_READ(regs[id].ctl);
+	intel_de_write(dev_priv, regs[id].ctl,
+		       intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+	intel_de_posting_read(dev_priv, regs[id].ctl);
 }
 
 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
@@ -1061,17 +1225,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
 	ret = false;
 
-	val = I915_READ(regs[id].ctl);
+	val = intel_de_read(dev_priv, regs[id].ctl);
 	if (!(val & LCPLL_PLL_ENABLE))
 		goto out;
 
-	val = I915_READ(DPLL_CTRL1);
+	val = intel_de_read(dev_priv, DPLL_CTRL1);
 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
 
 	/* avoid reading back stale values if HDMI mode is not enabled */
 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
-		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
-		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
+		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
+		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
 	}
 	ret = true;
 
@@ -1099,11 +1263,11 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
 	ret = false;
 
 	/* DPLL0 is always enabled since it drives CDCLK */
-	val = I915_READ(regs[id].ctl);
-	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+	val = intel_de_read(dev_priv, regs[id].ctl);
+	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
 		goto out;
 
-	val = I915_READ(DPLL_CTRL1);
+	val = intel_de_read(dev_priv, DPLL_CTRL1);
 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
 
 	ret = true;
@@ -1222,6 +1386,7 @@ struct skl_wrpll_params {
 
 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
 				      u64 afe_clock,
+				      int ref_clock,
 				      u64 central_freq,
 				      u32 p0, u32 p1, u32 p2)
 {
@@ -1281,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
 	 * Intermediate values are in Hz.
 	 * Divide by MHz to match bsepc
 	 */
-	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
 	params->dco_fraction =
-		div_u64((div_u64(dco_freq, 24) -
+		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
 }
 
 static bool
 skl_ddi_calculate_wrpll(int clock /* in Hz */,
+			int ref_clock,
 			struct skl_wrpll_params *wrpll_params)
 {
 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
@@ -1354,14 +1520,15 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
 	 */
 	p0 = p1 = p2 = 0;
 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
-	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
-				  p0, p1, p2);
+	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+				  ctx.central_freq, p0, p1, p2);
 
 	return true;
 }
 
 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 {
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 	u32 ctrl1, cfgcr1, cfgcr2;
 	struct skl_wrpll_params wrpll_params = { 0, };
 
@@ -1374,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+				     i915->dpll.ref_clks.nssc,
 				     &wrpll_params))
 		return false;
 
@@ -1396,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+				  const struct intel_shared_dpll *pll)
+{
+	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+	int ref_clock = i915->dpll.ref_clks.nssc;
+	u32 p0, p1, p2, dco_freq;
+
+	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
+		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+	else
+		p1 = 1;
+
+
+	switch (p0) {
+	case DPLL_CFGCR2_PDIV_1:
+		p0 = 1;
+		break;
+	case DPLL_CFGCR2_PDIV_2:
+		p0 = 2;
+		break;
+	case DPLL_CFGCR2_PDIV_3:
+		p0 = 3;
+		break;
+	case DPLL_CFGCR2_PDIV_7:
+		p0 = 7;
+		break;
+	}
+
+	switch (p2) {
+	case DPLL_CFGCR2_KDIV_5:
+		p2 = 5;
+		break;
+	case DPLL_CFGCR2_KDIV_2:
+		p2 = 2;
+		break;
+	case DPLL_CFGCR2_KDIV_3:
+		p2 = 3;
+		break;
+	case DPLL_CFGCR2_KDIV_1:
+		p2 = 1;
+		break;
+	}
+
+	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+		   ref_clock;
+
+	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+		    ref_clock / 0x8000;
+
+	if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+		return 0;
+
+	return dco_freq / (p0 * p1 * p2 * 5);
+}
+
 static bool
 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 {
@@ -1436,25 +1662,62 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+				  const struct intel_shared_dpll *pll)
+{
+	int link_clock = 0;
+
+	switch ((pll->state.hw_state.ctrl1 &
+		 DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+	case DPLL_CTRL1_LINK_RATE_810:
+		link_clock = 81000;
+		break;
+	case DPLL_CTRL1_LINK_RATE_1080:
+		link_clock = 108000;
+		break;
+	case DPLL_CTRL1_LINK_RATE_1350:
+		link_clock = 135000;
+		break;
+	case DPLL_CTRL1_LINK_RATE_1620:
+		link_clock = 162000;
+		break;
+	case DPLL_CTRL1_LINK_RATE_2160:
+		link_clock = 216000;
+		break;
+	case DPLL_CTRL1_LINK_RATE_2700:
+		link_clock = 270000;
+		break;
+	default:
+		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+		break;
+	}
+
+	return link_clock * 2;
+}
+
 static bool skl_get_dpll(struct intel_atomic_state *state,
 			 struct intel_crtc *crtc,
 			 struct intel_encoder *encoder)
 {
 	struct intel_crtc_state *crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 	struct intel_shared_dpll *pll;
 	bool bret;
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
 		if (!bret) {
-			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+			drm_dbg_kms(&i915->drm,
+				    "Could not get HDMI pll dividers.\n");
 			return false;
 		}
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
 		if (!bret) {
-			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+			drm_dbg_kms(&i915->drm,
+				    "Could not set DP dpll HW state.\n");
 			return false;
 		}
 	} else {
@@ -1482,10 +1745,29 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
 	return true;
 }
 
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+				const struct intel_shared_dpll *pll)
+{
+	/*
+	 * ctrl1 register is already shifted for each pll, just use 0 to get
+	 * the internal shift for each field
+	 */
+	if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+		return skl_ddi_wrpll_get_freq(i915, pll);
+	else
+		return skl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+	/* No SSC ref */
+	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	DRM_DEBUG_KMS("dpll_hw_state: "
+	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
 		      hw_state->ctrl1,
 		      hw_state->cfgcr1,
@@ -1496,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
 	.enable = skl_ddi_pll_enable,
 	.disable = skl_ddi_pll_disable,
 	.get_hw_state = skl_ddi_pll_get_hw_state,
+	.get_freq = skl_ddi_pll_get_freq,
 };
 
 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
 	.enable = skl_ddi_dpll0_enable,
 	.disable = skl_ddi_dpll0_disable,
 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
+	.get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
+	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
+	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
+	{ },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+	.dpll_info = skl_plls,
+	.get_dplls = skl_get_dpll,
+	.put_dplls = intel_put_dpll,
+	.update_ref_clks = skl_update_dpll_ref_clks,
+	.dump_hw_state = skl_dump_hw_state,
 };
 
 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1515,113 +1815,114 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
 
 	/* Non-SSC reference */
-	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 	temp |= PORT_PLL_REF_SEL;
-	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
 
 	if (IS_GEMINILAKE(dev_priv)) {
-		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 		temp |= PORT_PLL_POWER_ENABLE;
-		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
 
-		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
 				 PORT_PLL_POWER_STATE), 200))
-			DRM_ERROR("Power state not set for PLL:%d\n", port);
+			drm_err(&dev_priv->drm,
+				"Power state not set for PLL:%d\n", port);
 	}
 
 	/* Disable 10 bit clock */
-	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
 
 	/* Write P1 & P2 */
-	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
 	temp |= pll->state.hw_state.ebb0;
-	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
 
 	/* Write M2 integer */
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
 	temp &= ~PORT_PLL_M2_MASK;
 	temp |= pll->state.hw_state.pll0;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
 
 	/* Write N */
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
 	temp &= ~PORT_PLL_N_MASK;
 	temp |= pll->state.hw_state.pll1;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
 
 	/* Write M2 fraction */
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
 	temp &= ~PORT_PLL_M2_FRAC_MASK;
 	temp |= pll->state.hw_state.pll2;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
 
 	/* Write M2 fraction enable */
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
 	temp |= pll->state.hw_state.pll3;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
 
 	/* Write coeff */
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
 	temp &= ~PORT_PLL_INT_COEFF_MASK;
 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
 	temp |= pll->state.hw_state.pll6;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
 
 	/* Write calibration val */
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
 	temp |= pll->state.hw_state.pll8;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
 
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
 	temp |= pll->state.hw_state.pll9;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
 
-	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
 	temp &= ~PORT_PLL_DCO_AMP_MASK;
 	temp |= pll->state.hw_state.pll10;
-	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
 
 	/* Recalibrate with new settings */
-	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
 	temp |= PORT_PLL_RECALIBRATE;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
 	temp |= pll->state.hw_state.ebb4;
-	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
 
 	/* Enable PLL */
-	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 	temp |= PORT_PLL_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 
-	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
 			200))
-		DRM_ERROR("PLL %d not locked\n", port);
+		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
 
 	if (IS_GEMINILAKE(dev_priv)) {
-		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
 		temp |= DCC_DELAY_RANGE_2;
-		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
 	}
 
 	/*
 	 * While we write to the group register to program all lanes at once we
 	 * can read only lane registers and we pick lanes 0/1 for that.
 	 */
-	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
 	temp &= ~LANE_STAGGER_MASK;
 	temp &= ~LANESTAGGER_STRAP_OVRD;
 	temp |= pll->state.hw_state.pcsdw12;
-	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
 }
 
 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1630,19 +1931,20 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
 	u32 temp;
 
-	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 	temp &= ~PORT_PLL_ENABLE;
-	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+	intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 
 	if (IS_GEMINILAKE(dev_priv)) {
-		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+		temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 		temp &= ~PORT_PLL_POWER_ENABLE;
-		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+		intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
 
-		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
-				PORT_PLL_POWER_STATE), 200))
-			DRM_ERROR("Power state not reset for PLL:%d\n", port);
+		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
+				  PORT_PLL_POWER_STATE), 200))
+			drm_err(&dev_priv->drm,
+				"Power state not reset for PLL:%d\n", port);
 	}
 }
 
@@ -1666,40 +1968,40 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
 	ret = false;
 
-	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
 	if (!(val & PORT_PLL_ENABLE))
 		goto out;
 
-	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
 
-	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
 
-	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
 	hw_state->pll0 &= PORT_PLL_M2_MASK;
 
-	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
 	hw_state->pll1 &= PORT_PLL_N_MASK;
 
-	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
 
-	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
 
-	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
 			  PORT_PLL_INT_COEFF_MASK |
 			  PORT_PLL_GAIN_CTL_MASK;
 
-	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
 
-	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
 
-	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
 			   PORT_PLL_DCO_AMP_MASK;
 
@@ -1708,11 +2010,14 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	 * can read only lane registers. We configure all lanes the same way, so
 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
 	 */
-	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
-	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
-		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
-				 hw_state->pcsdw12,
-				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
+	hw_state->pcsdw12 = intel_de_read(dev_priv,
+					  BXT_PORT_PCS_DW12_LN01(phy, ch));
+	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+		drm_dbg(&dev_priv->drm,
+			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+			hw_state->pcsdw12,
+			intel_de_read(dev_priv,
+				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
 	ret = true;
@@ -1751,6 +2056,7 @@ static bool
 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
 			  struct bxt_clk_div *clk_div)
 {
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct dpll best_clock;
 
@@ -1760,9 +2066,9 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
 	 * i9xx_crtc_compute_clock
 	 */
 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
-		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
-				 crtc_state->port_clock,
-				 pipe_name(crtc->pipe));
+		drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
+			crtc_state->port_clock,
+			pipe_name(crtc->pipe));
 		return false;
 	}
 
@@ -1799,6 +2105,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
 				      const struct bxt_clk_div *clk_div)
 {
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
 	int clock = crtc_state->port_clock;
 	int vco = clk_div->vco;
@@ -1824,7 +2131,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
 		gain_ctl = 1;
 		targ_cnt = 9;
 	} else {
-		DRM_ERROR("Invalid VCO\n");
+		drm_err(&i915->drm, "Invalid VCO\n");
 		return false;
 	}
 
@@ -1885,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
 }
 
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+				const struct intel_shared_dpll *pll)
+{
+	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+	struct dpll clock;
+
+	clock.m1 = 2;
+	clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+		clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+	clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+	clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+	clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+	return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+}
+
 static bool bxt_get_dpll(struct intel_atomic_state *state,
 			 struct intel_crtc *crtc,
 			 struct intel_encoder *encoder)
@@ -1907,8 +2231,8 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
 	id = (enum intel_dpll_id) encoder->port;
 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
 
-	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
-		      crtc->base.base.id, crtc->base.name, pll->info->name);
+	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
+		    crtc->base.base.id, crtc->base.name, pll->info->name);
 
 	intel_reference_shared_dpll(state, crtc,
 				    pll, &crtc_state->dpll_hw_state);
@@ -1918,89 +2242,37 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
 	return true;
 }
 
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+	i915->dpll.ref_clks.ssc = 100000;
+	i915->dpll.ref_clks.nssc = 100000;
+	/* DSI non-SSC ref 19.2MHz */
+}
+
 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
-		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
-		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
-		      hw_state->ebb0,
-		      hw_state->ebb4,
-		      hw_state->pll0,
-		      hw_state->pll1,
-		      hw_state->pll2,
-		      hw_state->pll3,
-		      hw_state->pll6,
-		      hw_state->pll8,
-		      hw_state->pll9,
-		      hw_state->pll10,
-		      hw_state->pcsdw12);
+	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+		    hw_state->ebb0,
+		    hw_state->ebb4,
+		    hw_state->pll0,
+		    hw_state->pll1,
+		    hw_state->pll2,
+		    hw_state->pll3,
+		    hw_state->pll6,
+		    hw_state->pll8,
+		    hw_state->pll9,
+		    hw_state->pll10,
+		    hw_state->pcsdw12);
 }
 
 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
 	.enable = bxt_ddi_pll_enable,
 	.disable = bxt_ddi_pll_disable,
 	.get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
-	const struct dpll_info *dpll_info;
-
-	bool (*get_dplls)(struct intel_atomic_state *state,
-			  struct intel_crtc *crtc,
-			  struct intel_encoder *encoder);
-	void (*put_dplls)(struct intel_atomic_state *state,
-			  struct intel_crtc *crtc);
-	void (*update_active_dpll)(struct intel_atomic_state *state,
-				   struct intel_crtc *crtc,
-				   struct intel_encoder *encoder);
-	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
-			      const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
-	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
-	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
-	{ },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
-	.dpll_info = pch_plls,
-	.get_dplls = ibx_get_dpll,
-	.put_dplls = intel_put_dpll,
-	.dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
-	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
-	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
-	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
-	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
-	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
-	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
-	{ },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
-	.dpll_info = hsw_plls,
-	.get_dplls = hsw_get_dpll,
-	.put_dplls = intel_put_dpll,
-	.dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
-	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
-	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
-	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
-	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
-	{ },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
-	.dpll_info = skl_plls,
-	.get_dplls = skl_get_dpll,
-	.put_dplls = intel_put_dpll,
-	.dump_hw_state = skl_dump_hw_state,
+	.get_freq = bxt_ddi_pll_get_freq,
 };
 
 static const struct dpll_info bxt_plls[] = {
@@ -2014,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
 	.dpll_info = bxt_plls,
 	.get_dplls = bxt_get_dpll,
 	.put_dplls = intel_put_dpll,
+	.update_ref_clks = bxt_update_dpll_ref_clks,
 	.dump_hw_state = bxt_dump_hw_state,
 };
 
@@ -2024,32 +2297,32 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	u32 val;
 
 	/* 1. Enable DPLL power in DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(id));
+	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
 	val |= PLL_POWER_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(id), val);
+	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
 
 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
 				  PLL_POWER_STATE, 5))
-		DRM_ERROR("PLL %d Power not enabled\n", id);
+		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
 
 	/*
 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
 	 * select DP mode, and set DP link rate.
 	 */
 	val = pll->state.hw_state.cfgcr0;
-	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
+	intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
 
 	/* 4. Reab back to ensure writes completed */
-	POSTING_READ(CNL_DPLL_CFGCR0(id));
+	intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
 
 	/* 3. Configure DPLL_CFGCR0 */
 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
 		val = pll->state.hw_state.cfgcr1;
-		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
+		intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
 		/* 4. Reab back to ensure writes completed */
-		POSTING_READ(CNL_DPLL_CFGCR1(id));
+		intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
 	}
 
 	/*
@@ -2062,13 +2335,13 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 6. Enable DPLL in DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(id));
+	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
 	val |= PLL_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(id), val);
+	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
 
 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
-		DRM_ERROR("PLL %d not locked\n", id);
+		drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
 
 	/*
 	 * 8. If the frequency will result in a change to the voltage
@@ -2106,13 +2379,13 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 3. Disable DPLL through DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(id));
+	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
 	val &= ~PLL_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(id), val);
+	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
 
 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
-		DRM_ERROR("PLL %d locked\n", id);
+		drm_err(&dev_priv->drm, "PLL %d locked\n", id);
 
 	/*
 	 * 5. If the frequency will result in a change to the voltage
@@ -2124,14 +2397,14 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
 	 */
 
 	/* 6. Disable DPLL power in DPLL_ENABLE. */
-	val = I915_READ(CNL_DPLL_ENABLE(id));
+	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
 	val &= ~PLL_POWER_ENABLE;
-	I915_WRITE(CNL_DPLL_ENABLE(id), val);
+	intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
 
 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
 				    PLL_POWER_STATE, 5))
-		DRM_ERROR("PLL %d Power not disabled\n", id);
+		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
 }
 
 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2150,16 +2423,17 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
 
 	ret = false;
 
-	val = I915_READ(CNL_DPLL_ENABLE(id));
+	val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
 	if (!(val & PLL_ENABLE))
 		goto out;
 
-	val = I915_READ(CNL_DPLL_CFGCR0(id));
+	val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
 	hw_state->cfgcr0 = val;
 
 	/* avoid reading back stale values if HDMI mode is not enabled */
 	if (val & DPLL_CFGCR0_HDMI_MODE) {
-		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
+		hw_state->cfgcr1 = intel_de_read(dev_priv,
+						 CNL_DPLL_CFGCR1(id));
 	}
 	ret = true;
 
@@ -2256,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
 	params->dco_fraction = dco & 0x7fff;
 }
 
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
-	int ref_clock = dev_priv->cdclk.hw.ref;
-
-	/*
-	 * For ICL+, the spec states: if reference frequency is 38.4,
-	 * use 19.2 because the DPLL automatically divides that by 2.
-	 */
-	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
-		ref_clock = 19200;
-
-	return ref_clock;
-}
-
 static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
-			struct skl_wrpll_params *wrpll_params)
+__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+			  struct skl_wrpll_params *wrpll_params,
+			  int ref_clock)
 {
-	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	u32 afe_clock = crtc_state->port_clock * 5;
-	u32 ref_clock;
 	u32 dco_min = 7998000;
 	u32 dco_max = 10000000;
 	u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2308,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
 		return false;
 
 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
-	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
 				  pdiv, qdiv, kdiv);
 
 	return true;
 }
 
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+			struct skl_wrpll_params *wrpll_params)
+{
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+					 i915->dpll.ref_clks.nssc);
+}
+
 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 {
 	u32 cfgcr0, cfgcr1;
@@ -2344,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
+static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+				    const struct intel_shared_dpll *pll,
+				    int ref_clock)
+{
+	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+	u32 p0, p1, p2, dco_freq;
+
+	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+	else
+		p1 = 1;
+
+
+	switch (p0) {
+	case DPLL_CFGCR1_PDIV_2:
+		p0 = 2;
+		break;
+	case DPLL_CFGCR1_PDIV_3:
+		p0 = 3;
+		break;
+	case DPLL_CFGCR1_PDIV_5:
+		p0 = 5;
+		break;
+	case DPLL_CFGCR1_PDIV_7:
+		p0 = 7;
+		break;
+	}
+
+	switch (p2) {
+	case DPLL_CFGCR1_KDIV_1:
+		p2 = 1;
+		break;
+	case DPLL_CFGCR1_KDIV_2:
+		p2 = 2;
+		break;
+	case DPLL_CFGCR1_KDIV_3:
+		p2 = 3;
+		break;
+	}
+
+	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+		   ref_clock;
+
+	dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+		      DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+	if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
+		return 0;
+
+	return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+				  const struct intel_shared_dpll *pll)
+{
+	return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
+}
+
 static bool
 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 {
@@ -2389,30 +2717,72 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
 	return true;
 }
 
+static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+				  const struct intel_shared_dpll *pll)
+{
+	int link_clock = 0;
+
+	switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
+	case DPLL_CFGCR0_LINK_RATE_810:
+		link_clock = 81000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_1080:
+		link_clock = 108000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_1350:
+		link_clock = 135000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_1620:
+		link_clock = 162000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_2160:
+		link_clock = 216000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_2700:
+		link_clock = 270000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_3240:
+		link_clock = 324000;
+		break;
+	case DPLL_CFGCR0_LINK_RATE_4050:
+		link_clock = 405000;
+		break;
+	default:
+		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+		break;
+	}
+
+	return link_clock * 2;
+}
+
 static bool cnl_get_dpll(struct intel_atomic_state *state,
 			 struct intel_crtc *crtc,
 			 struct intel_encoder *encoder)
 {
 	struct intel_crtc_state *crtc_state =
 		intel_atomic_get_new_crtc_state(state, crtc);
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
 	struct intel_shared_dpll *pll;
 	bool bret;
 
 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
 		if (!bret) {
-			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+			drm_dbg_kms(&i915->drm,
+				    "Could not get HDMI pll dividers.\n");
 			return false;
 		}
 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
 		if (!bret) {
-			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+			drm_dbg_kms(&i915->drm,
+				    "Could not set DP dpll HW state.\n");
 			return false;
 		}
 	} else {
-		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
-			      crtc_state->output_types);
+		drm_dbg_kms(&i915->drm,
+			    "Skip DPLL setup for output_types 0x%x\n",
+			    crtc_state->output_types);
 		return false;
 	}
 
@@ -2422,7 +2792,7 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
 				     BIT(DPLL_ID_SKL_DPLL1) |
 				     BIT(DPLL_ID_SKL_DPLL0));
 	if (!pll) {
-		DRM_DEBUG_KMS("No PLL selected\n");
+		drm_dbg_kms(&i915->drm, "No PLL selected\n");
 		return false;
 	}
 
@@ -2434,19 +2804,35 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
 	return true;
 }
 
+static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
+				const struct intel_shared_dpll *pll)
+{
+	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
+		return cnl_ddi_wrpll_get_freq(i915, pll);
+	else
+		return cnl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+	/* No SSC reference */
+	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	DRM_DEBUG_KMS("dpll_hw_state: "
-		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
-		      hw_state->cfgcr0,
-		      hw_state->cfgcr1);
+	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
+		    "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
+		    hw_state->cfgcr0,
+		    hw_state->cfgcr1);
 }
 
 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
 	.enable = cnl_ddi_pll_enable,
 	.disable = cnl_ddi_pll_disable,
 	.get_hw_state = cnl_ddi_pll_get_hw_state,
+	.get_freq = cnl_ddi_pll_get_freq,
 };
 
 static const struct dpll_info cnl_plls[] = {
@@ -2460,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
 	.dpll_info = cnl_plls,
 	.get_dplls = cnl_get_dpll,
 	.put_dplls = intel_put_dpll,
+	.update_ref_clks = cnl_update_dpll_ref_clks,
 	.dump_hw_state = cnl_dump_hw_state,
 };
 
@@ -2555,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	const struct icl_combo_pll_params *params =
-		dev_priv->cdclk.hw.ref == 24000 ?
+		dev_priv->dpll.ref_clks.nssc == 24000 ?
 		icl_dp_combo_pll_24MHz_values :
 		icl_dp_combo_pll_19_2MHz_values;
 	int clock = crtc_state->port_clock;
@@ -2578,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 
 	if (INTEL_GEN(dev_priv) >= 12) {
-		switch (dev_priv->cdclk.hw.ref) {
+		switch (dev_priv->dpll.ref_clks.nssc) {
 		default:
-			MISSING_CASE(dev_priv->cdclk.hw.ref);
+			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
 			/* fall-through */
 		case 19200:
 		case 38400:
@@ -2591,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
 			break;
 		}
 	} else {
-		switch (dev_priv->cdclk.hw.ref) {
+		switch (dev_priv->dpll.ref_clks.nssc) {
 		default:
-			MISSING_CASE(dev_priv->cdclk.hw.ref);
+			MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
 			/* fall-through */
 		case 19200:
 		case 38400:
@@ -2608,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
 	return true;
 }
 
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+				    const struct intel_shared_dpll *pll)
+{
+	/*
+	 * The PLL outputs multiple frequencies at the same time, selection is
+	 * made at DDI clock mux level.
+	 */
+	drm_WARN_ON(&i915->drm, 1);
+
+	return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+	int ref_clock = i915->dpll.ref_clks.nssc;
+
+	/*
+	 * For ICL+, the spec states: if reference frequency is 38.4,
+	 * use 19.2 because the DPLL automatically divides that by 2.
+	 */
+	if (ref_clock == 38400)
+		ref_clock = 19200;
+
+	return ref_clock;
+}
+
+static bool
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+	       struct skl_wrpll_params *wrpll_params)
+{
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+	return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+					 icl_wrpll_ref_clock(i915));
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+				      const struct intel_shared_dpll *pll)
+{
+	return __cnl_ddi_wrpll_get_freq(i915, pll,
+					icl_wrpll_ref_clock(i915));
+}
+
 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 				struct intel_encoder *encoder,
 				struct intel_dpll_hw_state *pll_state)
@@ -2622,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
-		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+		ret = icl_calc_wrpll(crtc_state, &pll_params);
 	else
 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
 
@@ -2745,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 				  struct intel_dpll_hw_state *pll_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	int refclk_khz = dev_priv->cdclk.hw.ref;
+	int refclk_khz = dev_priv->dpll.ref_clks.nssc;
 	int clock = crtc_state->port_clock;
 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
 	u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2761,7 +3191,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 
 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
 				      pll_state, is_dkl)) {
-		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Failed to find divisors for clock %d\n", clock);
 		return false;
 	}
 
@@ -2774,8 +3205,9 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 		}
 
 		if (m2div_int > 255) {
-			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
-				      clock);
+			drm_dbg_kms(&dev_priv->drm,
+				    "Failed to find mdiv for clock %d\n",
+				    clock);
 			return false;
 		}
 	}
@@ -2944,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
 	return true;
 }
 
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+				   const struct intel_shared_dpll *pll)
+{
+	const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+	u64 tmp;
+
+	ref_clock = dev_priv->dpll.ref_clks.nssc;
+
+	if (INTEL_GEN(dev_priv) >= 12) {
+		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+			m2_frac = pll_state->mg_pll_bias &
+				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+		} else {
+			m2_frac = 0;
+		}
+	} else {
+		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+			m2_frac = pll_state->mg_pll_div0 &
+				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
+			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+		} else {
+			m2_frac = 0;
+		}
+	}
+
+	switch (pll_state->mg_clktop2_hsclkctl &
+		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+		div1 = 2;
+		break;
+	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+		div1 = 3;
+		break;
+	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+		div1 = 5;
+		break;
+	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+		div1 = 7;
+		break;
+	default:
+		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+		return 0;
+	}
+
+	div2 = (pll_state->mg_clktop2_hsclkctl &
+		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+	/* div2 value of 0 is same as 1 means no div */
+	if (div2 == 0)
+		div2 = 1;
+
+	/*
+	 * Adjust the original formula to delay the division by 2^22 in order to
+	 * minimize possible rounding errors.
+	 */
+	tmp = (u64)m1 * m2_int * ref_clock +
+	      (((u64)m1 * m2_frac * ref_clock) >> 22);
+	tmp = div_u64(tmp, 5 * div1 * div2);
+
+	return tmp;
+}
+
 /**
  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
  * @crtc_state: state for the CRTC to select the DPLL for
@@ -2996,7 +3500,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
 	unsigned long dpll_mask;
 
 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
-		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Could not calculate combo PHY PLL state.\n");
 
 		return false;
 	}
@@ -3013,8 +3518,9 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
 						&port_dpll->hw_state,
 						dpll_mask);
 	if (!port_dpll->pll) {
-		DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
-			      encoder->base.base.id, encoder->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "No combo PHY PLL found for [ENCODER:%d:%s]\n",
+			    encoder->base.base.id, encoder->base.name);
 		return false;
 	}
 
@@ -3038,7 +3544,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
 
 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
-		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Could not calculate TBT PLL state.\n");
 		return false;
 	}
 
@@ -3046,7 +3553,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
 						&port_dpll->hw_state,
 						BIT(DPLL_ID_ICL_TBTPLL));
 	if (!port_dpll->pll) {
-		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+		drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
 		return false;
 	}
 	intel_reference_shared_dpll(state, crtc,
@@ -3055,7 +3562,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
 
 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
-		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Could not calculate MG PHY PLL state.\n");
 		goto err_unreference_tbt_pll;
 	}
 
@@ -3065,7 +3573,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
 						&port_dpll->hw_state,
 						BIT(dpll_id));
 	if (!port_dpll->pll) {
-		DRM_DEBUG_KMS("No MG PHY PLL found\n");
+		drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
 		goto err_unreference_tbt_pll;
 	}
 	intel_reference_shared_dpll(state, crtc,
@@ -3140,37 +3648,39 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(MG_PLL_ENABLE(tc_port));
+	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
 	if (!(val & PLL_ENABLE))
 		goto out;
 
-	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+						  MG_REFCLKIN_CTL(tc_port));
 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
 
 	hw_state->mg_clktop2_coreclkctl1 =
-		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
 	hw_state->mg_clktop2_coreclkctl1 &=
 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
 
 	hw_state->mg_clktop2_hsclkctl =
-		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
 	hw_state->mg_clktop2_hsclkctl &=
 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
 
-	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
-	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
-	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
-	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
-	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
+	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
+	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
+	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
+						   MG_PLL_FRAC_LOCK(tc_port));
+	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
 
-	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
 	hw_state->mg_pll_tdc_coldst_bias =
-		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
 
-	if (dev_priv->cdclk.hw.ref == 38400) {
+	if (dev_priv->dpll.ref_clks.nssc == 38400) {
 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
 		hw_state->mg_pll_bias_mask = 0;
 	} else {
@@ -3202,7 +3712,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(MG_PLL_ENABLE(tc_port));
+	val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
 	if (!(val & PLL_ENABLE))
 		goto out;
 
@@ -3210,13 +3720,15 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	 * All registers read here have the same HIP_INDEX_REG even though
 	 * they are on different building blocks
 	 */
-	I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+		       HIP_INDEX_VAL(tc_port, 0x2));
 
-	hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+						  DKL_REFCLKIN_CTL(tc_port));
 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
 
 	hw_state->mg_clktop2_hsclkctl =
-		I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+		intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
 	hw_state->mg_clktop2_hsclkctl &=
 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
@@ -3224,32 +3736,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
 
 	hw_state->mg_clktop2_coreclkctl1 =
-		I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+		intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
 	hw_state->mg_clktop2_coreclkctl1 &=
 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
 
-	hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+	hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
 				  DKL_PLL_DIV0_FBPREDIV_MASK |
 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
 
-	hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+	hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
 
-	hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+	hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
 				 DKL_PLL_SSC_STEP_LEN_MASK |
 				 DKL_PLL_SSC_STEP_NUM_MASK |
 				 DKL_PLL_SSC_EN);
 
-	hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+	hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
 
 	hw_state->mg_pll_tdc_coldst_bias =
-		I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+		intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
 
@@ -3274,20 +3786,26 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
 	if (!wakeref)
 		return false;
 
-	val = I915_READ(enable_reg);
+	val = intel_de_read(dev_priv, enable_reg);
 	if (!(val & PLL_ENABLE))
 		goto out;
 
 	if (INTEL_GEN(dev_priv) >= 12) {
-		hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
-		hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+		hw_state->cfgcr0 = intel_de_read(dev_priv,
+						 TGL_DPLL_CFGCR0(id));
+		hw_state->cfgcr1 = intel_de_read(dev_priv,
+						 TGL_DPLL_CFGCR1(id));
 	} else {
 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
-			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
-			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+			hw_state->cfgcr0 = intel_de_read(dev_priv,
+							 ICL_DPLL_CFGCR0(4));
+			hw_state->cfgcr1 = intel_de_read(dev_priv,
+							 ICL_DPLL_CFGCR1(4));
 		} else {
-			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
-			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+			hw_state->cfgcr0 = intel_de_read(dev_priv,
+							 ICL_DPLL_CFGCR0(id));
+			hw_state->cfgcr1 = intel_de_read(dev_priv,
+							 ICL_DPLL_CFGCR1(id));
 		}
 	}
 
@@ -3338,9 +3856,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
-	I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
-	POSTING_READ(cfgcr1_reg);
+	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
+	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+	intel_de_posting_read(dev_priv, cfgcr1_reg);
 }
 
 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3356,41 +3874,42 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
 	 * during the calc/readout phase if the mask depends on some other HW
 	 * state like refclk, see icl_calc_mg_pll_state().
 	 */
-	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
+	val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
 	val |= hw_state->mg_refclkin_ctl;
-	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
+	intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
 
-	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+	val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
 	val |= hw_state->mg_clktop2_coreclkctl1;
-	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+	intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
 
-	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+	val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
 	val |= hw_state->mg_clktop2_hsclkctl;
-	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
+	intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
 
-	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
-	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
-	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
-	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
-	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
+		       hw_state->mg_pll_frac_lock);
+	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
 
-	val = I915_READ(MG_PLL_BIAS(tc_port));
+	val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
 	val &= ~hw_state->mg_pll_bias_mask;
 	val |= hw_state->mg_pll_bias;
-	I915_WRITE(MG_PLL_BIAS(tc_port), val);
+	intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
 
-	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+	val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
 	val |= hw_state->mg_pll_tdc_coldst_bias;
-	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+	intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
 
-	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
 }
 
 static void dkl_pll_write(struct drm_i915_private *dev_priv,
@@ -3404,62 +3923,63 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
 	 * All registers programmed here have the same HIP_INDEX_REG even
 	 * though on different building block
 	 */
-	I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+	intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+		       HIP_INDEX_VAL(tc_port, 0x2));
 
 	/* All the registers are RMW */
-	val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+	val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
 	val |= hw_state->mg_refclkin_ctl;
-	I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+	intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
 
-	val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+	val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
 	val |= hw_state->mg_clktop2_coreclkctl1;
-	I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+	intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
 
-	val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+	val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
 	val |= hw_state->mg_clktop2_hsclkctl;
-	I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+	intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
 
-	val = I915_READ(DKL_PLL_DIV0(tc_port));
+	val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
 		 DKL_PLL_DIV0_FBPREDIV_MASK |
 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
 	val |= hw_state->mg_pll_div0;
-	I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+	intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
 
-	val = I915_READ(DKL_PLL_DIV1(tc_port));
+	val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
 	val |= hw_state->mg_pll_div1;
-	I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+	intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
 
-	val = I915_READ(DKL_PLL_SSC(tc_port));
+	val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
 		 DKL_PLL_SSC_STEP_LEN_MASK |
 		 DKL_PLL_SSC_STEP_NUM_MASK |
 		 DKL_PLL_SSC_EN);
 	val |= hw_state->mg_pll_ssc;
-	I915_WRITE(DKL_PLL_SSC(tc_port), val);
+	intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
 
-	val = I915_READ(DKL_PLL_BIAS(tc_port));
+	val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
 	val |= hw_state->mg_pll_bias;
-	I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+	intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
 
-	val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+	val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
 	val |= hw_state->mg_pll_tdc_coldst_bias;
-	I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+	intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
 
-	POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+	intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
 }
 
 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
@@ -3468,16 +3988,17 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(enable_reg);
+	val = intel_de_read(dev_priv, enable_reg);
 	val |= PLL_POWER_ENABLE;
-	I915_WRITE(enable_reg, val);
+	intel_de_write(dev_priv, enable_reg, val);
 
 	/*
 	 * The spec says we need to "wait" but it also says it should be
 	 * immediate.
 	 */
 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
-		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
+			pll->info->id);
 }
 
 static void icl_pll_enable(struct drm_i915_private *dev_priv,
@@ -3486,13 +4007,13 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(enable_reg);
+	val = intel_de_read(dev_priv, enable_reg);
 	val |= PLL_ENABLE;
-	I915_WRITE(enable_reg, val);
+	intel_de_write(dev_priv, enable_reg, val);
 
 	/* Timeout is actually 600us. */
 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
-		DRM_ERROR("PLL %d not locked\n", pll->info->id);
+		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
 }
 
 static void combo_pll_enable(struct drm_i915_private *dev_priv,
@@ -3584,26 +4105,27 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
 	 * nothign here.
 	 */
 
-	val = I915_READ(enable_reg);
+	val = intel_de_read(dev_priv, enable_reg);
 	val &= ~PLL_ENABLE;
-	I915_WRITE(enable_reg, val);
+	intel_de_write(dev_priv, enable_reg, val);
 
 	/* Timeout is actually 1us. */
 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
-		DRM_ERROR("PLL %d locked\n", pll->info->id);
+		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
 
 	/* DVFS post sequence would be here. See the comment above. */
 
-	val = I915_READ(enable_reg);
+	val = intel_de_read(dev_priv, enable_reg);
 	val &= ~PLL_POWER_ENABLE;
-	I915_WRITE(enable_reg, val);
+	intel_de_write(dev_priv, enable_reg, val);
 
 	/*
 	 * The spec says we need to "wait" but it also says it should be
 	 * immediate.
 	 */
 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
-		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
+			pll->info->id);
 }
 
 static void combo_pll_disable(struct drm_i915_private *dev_priv,
@@ -3639,44 +4161,54 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
 	icl_pll_disable(dev_priv, pll, enable_reg);
 }
 
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+	/* No SSC ref */
+	i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
-		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
-		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
-		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
-		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
-		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
-		      hw_state->cfgcr0, hw_state->cfgcr1,
-		      hw_state->mg_refclkin_ctl,
-		      hw_state->mg_clktop2_coreclkctl1,
-		      hw_state->mg_clktop2_hsclkctl,
-		      hw_state->mg_pll_div0,
-		      hw_state->mg_pll_div1,
-		      hw_state->mg_pll_lf,
-		      hw_state->mg_pll_frac_lock,
-		      hw_state->mg_pll_ssc,
-		      hw_state->mg_pll_bias,
-		      hw_state->mg_pll_tdc_coldst_bias);
+	drm_dbg_kms(&dev_priv->drm,
+		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+		    hw_state->cfgcr0, hw_state->cfgcr1,
+		    hw_state->mg_refclkin_ctl,
+		    hw_state->mg_clktop2_coreclkctl1,
+		    hw_state->mg_clktop2_hsclkctl,
+		    hw_state->mg_pll_div0,
+		    hw_state->mg_pll_div1,
+		    hw_state->mg_pll_lf,
+		    hw_state->mg_pll_frac_lock,
+		    hw_state->mg_pll_ssc,
+		    hw_state->mg_pll_bias,
+		    hw_state->mg_pll_tdc_coldst_bias);
 }
 
 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
 	.enable = combo_pll_enable,
 	.disable = combo_pll_disable,
 	.get_hw_state = combo_pll_get_hw_state,
+	.get_freq = icl_ddi_combo_pll_get_freq,
 };
 
 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
 	.enable = tbt_pll_enable,
 	.disable = tbt_pll_disable,
 	.get_hw_state = tbt_pll_get_hw_state,
+	.get_freq = icl_ddi_tbt_pll_get_freq,
 };
 
 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
 	.enable = mg_pll_enable,
 	.disable = mg_pll_disable,
 	.get_hw_state = mg_pll_get_hw_state,
+	.get_freq = icl_ddi_mg_pll_get_freq,
 };
 
 static const struct dpll_info icl_plls[] = {
@@ -3695,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
 	.get_dplls = icl_get_dplls,
 	.put_dplls = icl_put_dplls,
 	.update_active_dpll = icl_update_active_dpll,
+	.update_ref_clks = icl_update_dpll_ref_clks,
 	.dump_hw_state = icl_dump_hw_state,
 };
 
@@ -3709,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
 	.dpll_info = ehl_plls,
 	.get_dplls = icl_get_dplls,
 	.put_dplls = icl_put_dplls,
+	.update_ref_clks = icl_update_dpll_ref_clks,
 	.dump_hw_state = icl_dump_hw_state,
 };
 
@@ -3716,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
 	.enable = mg_pll_enable,
 	.disable = mg_pll_disable,
 	.get_hw_state = dkl_pll_get_hw_state,
+	.get_freq = icl_ddi_mg_pll_get_freq,
 };
 
 static const struct dpll_info tgl_plls[] = {
@@ -3736,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
 	.get_dplls = icl_get_dplls,
 	.put_dplls = icl_put_dplls,
 	.update_active_dpll = icl_update_active_dpll,
+	.update_ref_clks = icl_update_dpll_ref_clks,
 	.dump_hw_state = icl_dump_hw_state,
 };
 
@@ -3770,22 +4306,22 @@ void intel_shared_dpll_init(struct drm_device *dev)
 		dpll_mgr = &pch_pll_mgr;
 
 	if (!dpll_mgr) {
-		dev_priv->num_shared_dpll = 0;
+		dev_priv->dpll.num_shared_dpll = 0;
 		return;
 	}
 
 	dpll_info = dpll_mgr->dpll_info;
 
 	for (i = 0; dpll_info[i].name; i++) {
-		WARN_ON(i != dpll_info[i].id);
-		dev_priv->shared_dplls[i].info = &dpll_info[i];
+		drm_WARN_ON(dev, i != dpll_info[i].id);
+		dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
 	}
 
-	dev_priv->dpll_mgr = dpll_mgr;
-	dev_priv->num_shared_dpll = i;
-	mutex_init(&dev_priv->dpll_lock);
+	dev_priv->dpll.mgr = dpll_mgr;
+	dev_priv->dpll.num_shared_dpll = i;
+	mutex_init(&dev_priv->dpll.lock);
 
-	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+	BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
 }
 
 /**
@@ -3812,9 +4348,9 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
 				struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
 
-	if (WARN_ON(!dpll_mgr))
+	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
 		return false;
 
 	return dpll_mgr->get_dplls(state, crtc, encoder);
@@ -3835,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
 				struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
 
 	/*
 	 * FIXME: this function is called for every platform having a
@@ -3864,35 +4400,114 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
 			      struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
 
-	if (WARN_ON(!dpll_mgr))
+	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
 		return;
 
 	dpll_mgr->update_active_dpll(state, crtc, encoder);
 }
 
 /**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's current state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+			const struct intel_shared_dpll *pll)
+{
+	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+		return 0;
+
+	return pll->info->funcs->get_freq(i915, pll);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+				  struct intel_shared_dpll *pll)
+{
+	struct intel_crtc *crtc;
+
+	pll->on = pll->info->funcs->get_hw_state(i915, pll,
+						 &pll->state.hw_state);
+
+	if (IS_ELKHARTLAKE(i915) && pll->on &&
+	    pll->info->id == DPLL_ID_EHL_DPLL4) {
+		pll->wakeref = intel_display_power_get(i915,
+						       POWER_DOMAIN_DPLL_DC_OFF);
+	}
+
+	pll->state.crtc_mask = 0;
+	for_each_intel_crtc(&i915->drm, crtc) {
+		struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+
+		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+			pll->state.crtc_mask |= 1 << crtc->pipe;
+	}
+	pll->active_mask = pll->state.crtc_mask;
+
+	drm_dbg_kms(&i915->drm,
+		    "%s hw state readout: crtc_mask 0x%08x, on %i\n",
+		    pll->info->name, pll->state.crtc_mask, pll->on);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+	int i;
+
+	if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
+		i915->dpll.mgr->update_ref_clks(i915);
+
+	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+		readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+				struct intel_shared_dpll *pll)
+{
+	if (!pll->on || pll->active_mask)
+		return;
+
+	drm_dbg_kms(&i915->drm,
+		    "%s enabled but not in use, disabling\n",
+		    pll->info->name);
+
+	pll->info->funcs->disable(i915, pll);
+	pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+	int i;
+
+	for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+		sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+/**
  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
  * @dev_priv: i915 drm device
  * @hw_state: hw state to be written to the log
  *
- * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
  */
 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state)
 {
-	if (dev_priv->dpll_mgr) {
-		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+	if (dev_priv->dpll.mgr) {
+		dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
 	} else {
 		/* fallback for platforms that don't use the shared dpll
 		 * infrastructure
 		 */
-		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
-			      "fp0: 0x%x, fp1: 0x%x\n",
-			      hw_state->dpll,
-			      hw_state->dpll_md,
-			      hw_state->fp0,
-			      hw_state->fp1);
+		drm_dbg_kms(&dev_priv->drm,
+			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+			    "fp0: 0x%x, fp1: 0x%x\n",
+			    hw_state->dpll,
+			    hw_state->dpll_md,
+			    hw_state->fp0,
+			    hw_state->fp1);
 	}
 }
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2a104c6..5d9a2bc 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs {
 	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
 			     struct intel_shared_dpll *pll,
 			     struct intel_dpll_hw_state *hw_state);
+
+	/**
+	 * @get_freq:
+	 *
+	 * Hook for calculating the pll's output frequency based on its
+	 * current state.
+	 */
+	int (*get_freq)(struct drm_i915_private *i915,
+			const struct intel_shared_dpll *pll);
 };
 
 /**
@@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
 void intel_update_active_dpll(struct intel_atomic_state *state,
 			      struct intel_crtc *crtc,
 			      struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+			const struct intel_shared_dpll *pll);
 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
 void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
 void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
 
 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
 			      const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
 bool intel_dpll_is_combophy(enum intel_dpll_id id);
 
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index ada006a..d7a6bf2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -40,7 +40,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
 
-	return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+	return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
 }
 
 static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
@@ -50,16 +50,16 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
 	enum pipe pipe = crtc->pipe;
 	u32 dsb_ctrl;
 
-	dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+	dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
 	if (DSB_STATUS & dsb_ctrl) {
-		DRM_DEBUG_KMS("DSB engine is busy.\n");
+		drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
 		return false;
 	}
 
 	dsb_ctrl |= DSB_ENABLE;
-	I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+	intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
 
-	POSTING_READ(DSB_CTRL(pipe, dsb->id));
+	intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
 	return true;
 }
 
@@ -70,16 +70,16 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
 	enum pipe pipe = crtc->pipe;
 	u32 dsb_ctrl;
 
-	dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+	dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
 	if (DSB_STATUS & dsb_ctrl) {
-		DRM_DEBUG_KMS("DSB engine is busy.\n");
+		drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
 		return false;
 	}
 
 	dsb_ctrl &= ~DSB_ENABLE;
-	I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+	intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
 
-	POSTING_READ(DSB_CTRL(pipe, dsb->id));
+	intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
 	return true;
 }
 
@@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc)
 
 	obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
 	if (IS_ERR(obj)) {
-		DRM_ERROR("Gem object creation failed\n");
+		drm_err(&i915->drm, "Gem object creation failed\n");
 		goto out;
 	}
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 	if (IS_ERR(vma)) {
-		DRM_ERROR("Vma creation failed\n");
+		drm_err(&i915->drm, "Vma creation failed\n");
 		i915_gem_object_put(obj);
 		goto out;
 	}
 
 	buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
 	if (IS_ERR(buf)) {
-		DRM_ERROR("Command buffer creation failed\n");
+		drm_err(&i915->drm, "Command buffer creation failed\n");
 		goto out;
 	}
 
@@ -165,7 +165,7 @@ void intel_dsb_put(struct intel_dsb *dsb)
 	if (!HAS_DSB(i915))
 		return;
 
-	if (WARN_ON(dsb->refcount == 0))
+	if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
 		return;
 
 	if (--dsb->refcount == 0) {
@@ -198,12 +198,12 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
 	u32 reg_val;
 
 	if (!buf) {
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 		return;
 	}
 
-	if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
-		DRM_DEBUG_KMS("DSB buffer overflow\n");
+	if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+		drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
 		return;
 	}
 
@@ -272,12 +272,12 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
 	u32 *buf = dsb->cmd_buf;
 
 	if (!buf) {
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 		return;
 	}
 
-	if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
-		DRM_DEBUG_KMS("DSB buffer overflow\n");
+	if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+		drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
 		return;
 	}
 
@@ -310,10 +310,12 @@ void intel_dsb_commit(struct intel_dsb *dsb)
 		goto reset;
 
 	if (is_dsb_busy(dsb)) {
-		DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+		drm_err(&dev_priv->drm,
+			"HEAD_PTR write failed - dsb engine is busy.\n");
 		goto reset;
 	}
-	I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+	intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
+		       i915_ggtt_offset(dsb->vma));
 
 	tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
 	if (tail > dsb->free_pos * 4)
@@ -321,14 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb)
 		       (tail - dsb->free_pos * 4));
 
 	if (is_dsb_busy(dsb)) {
-		DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+		drm_err(&dev_priv->drm,
+			"TAIL_PTR write failed - dsb engine is busy.\n");
 		goto reset;
 	}
-	DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
-		      i915_ggtt_offset(dsb->vma), tail);
-	I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+	drm_dbg_kms(&dev_priv->drm,
+		    "DSB execution started - head 0x%x, tail 0x%x\n",
+		    i915_ggtt_offset(dsb->vma), tail);
+	intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
+		       i915_ggtt_offset(dsb->vma) + tail);
 	if (wait_for(!is_dsb_busy(dsb), 1)) {
-		DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+		drm_err(&dev_priv->drm,
+			"Timed out waiting for DSB workload completion.\n");
 		goto reset;
 	}
 
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index c878388..b53c503 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -45,7 +45,7 @@
 
 static u32 dcs_get_backlight(struct intel_connector *connector)
 {
-	struct intel_encoder *encoder = connector->encoder;
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	struct mipi_dsi_device *dsi_device;
 	u8 data = 0;
@@ -160,13 +160,13 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
 {
 	struct drm_device *dev = intel_connector->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_encoder *encoder = intel_connector->encoder;
+	struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
 	struct intel_panel *panel = &intel_connector->panel;
 
 	if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
 		return -ENODEV;
 
-	if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+	if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
 		return -EINVAL;
 
 	panel->backlight.setup = dcs_setup_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 04f953b..574dcfe 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,7 +36,6 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
 
 #include <video/mipi_display.h>
 
@@ -136,7 +135,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 	u16 len;
 	enum port port;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	flags = *data++;
 	type = *data++;
@@ -158,7 +157,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 
 	dsi_device = intel_dsi->dsi_hosts[port]->device;
 	if (!dsi_device) {
-		DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+		drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+			    port_name(port));
 		goto out;
 	}
 
@@ -182,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 	case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
 	case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
 	case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
-		DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+		drm_dbg(&dev_priv->drm,
+			"Generic Read not yet implemented or used\n");
 		break;
 	case MIPI_DSI_GENERIC_LONG_WRITE:
 		mipi_dsi_generic_write(dsi_device, data, len);
@@ -194,7 +195,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 		mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
 		break;
 	case MIPI_DSI_DCS_READ:
-		DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+		drm_dbg(&dev_priv->drm,
+			"DCS Read not yet implemented or used\n");
 		break;
 	case MIPI_DSI_DCS_LONG_WRITE:
 		mipi_dsi_dcs_write_buffer(dsi_device, data, len);
@@ -212,9 +214,10 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
 
 static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
 {
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
 	u32 delay = *((const u32 *) data);
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&i915->drm, "\n");
 
 	usleep_range(delay, delay + 10);
 	data += 4;
@@ -231,7 +234,8 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
 	u8 port;
 
 	if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
-		DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+		drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
+			    gpio_index);
 		return;
 	}
 
@@ -244,10 +248,11 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
 		if (gpio_source == 0) {
 			port = IOSF_PORT_GPIO_NC;
 		} else if (gpio_source == 1) {
-			DRM_DEBUG_KMS("SC gpio not supported\n");
+			drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
 			return;
 		} else {
-			DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+			drm_dbg_kms(&dev_priv->drm,
+				    "unknown gpio source %u\n", gpio_source);
 			return;
 		}
 	}
@@ -291,13 +296,15 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
 	} else {
 		/* XXX: The spec is unclear about CHV GPIO on seq v2 */
 		if (gpio_source != 0) {
-			DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+			drm_dbg_kms(&dev_priv->drm,
+				    "unknown gpio source %u\n", gpio_source);
 			return;
 		}
 
 		if (gpio_index >= CHV_GPIO_IDX_START_E) {
-			DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
-				      gpio_index);
+			drm_dbg_kms(&dev_priv->drm,
+				    "invalid gpio index %u for GPIO N\n",
+				    gpio_index);
 			return;
 		}
 
@@ -332,8 +339,9 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
 						 GPIOD_OUT_HIGH);
 
 		if (IS_ERR_OR_NULL(gpio_desc)) {
-			DRM_ERROR("GPIO index %u request failed (%ld)\n",
-				  gpio_index, PTR_ERR(gpio_desc));
+			drm_err(&dev_priv->drm,
+				"GPIO index %u request failed (%ld)\n",
+				gpio_index, PTR_ERR(gpio_desc));
 			return;
 		}
 
@@ -346,7 +354,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
 static void icl_exec_gpio(struct drm_i915_private *dev_priv,
 			  u8 gpio_source, u8 gpio_index, bool value)
 {
-	DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+	drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
 }
 
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
@@ -356,7 +364,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 	u8 gpio_source, gpio_index = 0, gpio_number;
 	bool value;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	if (dev_priv->vbt.dsi.seq_version >= 3)
 		gpio_index = *data++;
@@ -494,13 +502,16 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
 
 static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
 {
-	DRM_DEBUG_KMS("Skipping SPI element execution\n");
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+	drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
 
 	return data + *(data + 5) + 6;
 }
 
 static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
 {
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
 #ifdef CONFIG_PMIC_OPREGION
 	u32 value, mask, reg_address;
 	u16 i2c_address;
@@ -516,9 +527,10 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
 							reg_address,
 							value, mask);
 	if (ret)
-		DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+		drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
 #else
-	DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+	drm_err(&i915->drm,
+		"Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
 #endif
 
 	return data + 15;
@@ -570,17 +582,18 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
 	const u8 *data;
 	fn_mipi_elem_exec mipi_elem_exec;
 
-	if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
 		return;
 
 	data = dev_priv->vbt.dsi.sequence[seq_id];
 	if (!data)
 		return;
 
-	WARN_ON(*data != seq_id);
+	drm_WARN_ON(&dev_priv->drm, *data != seq_id);
 
-	DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
-		      seq_id, sequence_name(seq_id));
+	drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+		    seq_id, sequence_name(seq_id));
 
 	/* Skip Sequence Byte. */
 	data++;
@@ -612,18 +625,21 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
 
 			/* Consistency check if we have size. */
 			if (operation_size && data != next) {
-				DRM_ERROR("Inconsistent operation size\n");
+				drm_err(&dev_priv->drm,
+					"Inconsistent operation size\n");
 				return;
 			}
 		} else if (operation_size) {
 			/* We have size, skip. */
-			DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
-				      operation_byte);
+			drm_dbg_kms(&dev_priv->drm,
+				    "Unsupported MIPI operation byte %u\n",
+				    operation_byte);
 			data += operation_size;
 		} else {
 			/* No size, can't skip without parsing. */
-			DRM_ERROR("Unsupported MIPI operation byte %u\n",
-				  operation_byte);
+			drm_err(&dev_priv->drm,
+				"Unsupported MIPI operation byte %u\n",
+				operation_byte);
 			return;
 		}
 	}
@@ -658,40 +674,54 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
 
 void intel_dsi_log_params(struct intel_dsi *intel_dsi)
 {
-	DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
-	DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
-	DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
-	DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
-	DRM_DEBUG_KMS("Video mode format %s\n",
-		      intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
-		      "non-burst with sync pulse" :
-		      intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
-		      "non-burst with sync events" :
-		      intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
-		      "burst" : "<unknown>");
-	DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
-	DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
-	DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
-	DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
-	DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+	drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
+	drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
+		    intel_dsi->pixel_overlap);
+	drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
+	drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+	drm_dbg_kms(&i915->drm, "Video mode format %s\n",
+		    intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+		    "non-burst with sync pulse" :
+		    intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+		    "non-burst with sync events" :
+		    intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+		    "burst" : "<unknown>");
+	drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
+		    intel_dsi->burst_mode_ratio);
+	drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
+	drm_dbg_kms(&i915->drm, "Eot %s\n",
+		    enableddisabled(intel_dsi->eotp_pkt));
+	drm_dbg_kms(&i915->drm, "Clockstop %s\n",
+		    enableddisabled(!intel_dsi->clock_stop));
+	drm_dbg_kms(&i915->drm, "Mode %s\n",
+		    intel_dsi->operation_mode ? "command" : "video");
 	if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
-		DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+		drm_dbg_kms(&i915->drm,
+			    "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
 	else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
-		DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+		drm_dbg_kms(&i915->drm,
+			    "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
 	else
-		DRM_DEBUG_KMS("Dual link: NONE\n");
-	DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
-	DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
-	DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
-	DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
-	DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
-	DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
-	DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
-	DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
-	DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
-	DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
-	DRM_DEBUG_KMS("BTA %s\n",
-			enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+		drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
+	drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
+	drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
+	drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
+		    intel_dsi->lp_rx_timeout);
+	drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
+		    intel_dsi->turn_arnd_val);
+	drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
+	drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
+		    intel_dsi->hs_to_lp_count);
+	drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+	drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+	drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
+		    intel_dsi->clk_lp_to_hs_count);
+	drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
+		    intel_dsi->clk_hs_to_lp_count);
+	drm_dbg_kms(&i915->drm, "BTA %s\n",
+		    enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
 }
 
 bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
@@ -704,7 +734,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 	u16 burst_mode_ratio;
 	enum port port;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
 	intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -763,7 +793,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 				mipi_config->target_burst_mode_freq = bitrate;
 
 			if (mipi_config->target_burst_mode_freq < bitrate) {
-				DRM_ERROR("Burst mode freq is less than computed\n");
+				drm_err(&dev_priv->drm,
+					"Burst mode freq is less than computed\n");
 				return false;
 			}
 
@@ -773,7 +804,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
 
 			intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
 		} else {
-			DRM_ERROR("Burst mode target is not set\n");
+			drm_err(&dev_priv->drm,
+				"Burst mode target is not set\n");
 			return false;
 		}
 	} else
@@ -856,17 +888,20 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
 		ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
 					     ARRAY_SIZE(soc_pwm_pinctrl_map));
 		if (ret)
-			DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+			drm_err(&dev_priv->drm,
+				"Failed to register pwm0 pinmux mapping\n");
 
 		pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
 		if (IS_ERR(pinctrl))
-			DRM_ERROR("Failed to set pinmux to PWM\n");
+			drm_err(&dev_priv->drm,
+				"Failed to set pinmux to PWM\n");
 	}
 
 	if (want_panel_gpio) {
 		intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
 		if (IS_ERR(intel_dsi->gpio_panel)) {
-			DRM_ERROR("Failed to own gpio for panel control\n");
+			drm_err(&dev_priv->drm,
+				"Failed to own gpio for panel control\n");
 			intel_dsi->gpio_panel = NULL;
 		}
 	}
@@ -875,7 +910,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
 		intel_dsi->gpio_backlight =
 			gpiod_get(dev->dev, "backlight", flags);
 		if (IS_ERR(intel_dsi->gpio_backlight)) {
-			DRM_ERROR("Failed to own gpio for backlight control\n");
+			drm_err(&dev_priv->drm,
+				"Failed to own gpio for backlight control\n");
 			intel_dsi->gpio_backlight = NULL;
 		}
 	}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 86a337c..341d5ce 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,7 +30,6 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_connector.h"
@@ -44,6 +43,7 @@
 #define INTEL_DVO_CHIP_LVDS	1
 #define INTEL_DVO_CHIP_TMDS	2
 #define INTEL_DVO_CHIP_TVOUT	4
+#define INTEL_DVO_CHIP_LVDS_NO_FIXED	5
 
 #define SIL164_ADDR	0x38
 #define CH7xxx_ADDR	0x76
@@ -101,13 +101,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
 		.dev_ops = &ch7017_ops,
 	},
 	{
-	        .type = INTEL_DVO_CHIP_TMDS,
+		.type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
 		.name = "ns2501",
 		.dvo_reg = DVOB,
 		.dvo_srcdim_reg = DVOB_SRCDIM,
 		.slave_addr = NS2501_ADDR,
 		.dev_ops = &ns2501_ops,
-       }
+	},
 };
 
 struct intel_dvo {
@@ -137,7 +137,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	u32 tmp;
 
-	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+	tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
 
 	if (!(tmp & DVO_ENABLE))
 		return false;
@@ -152,7 +152,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	u32 tmp;
 
-	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+	tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
 
 	*pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
 
@@ -168,7 +168,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
 
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
 
-	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+	tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
 	if (tmp & DVO_HSYNC_ACTIVE_HIGH)
 		flags |= DRM_MODE_FLAG_PHSYNC;
 	else
@@ -190,11 +190,11 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
-	u32 temp = I915_READ(dvo_reg);
+	u32 temp = intel_de_read(dev_priv, dvo_reg);
 
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-	I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
-	I915_READ(dvo_reg);
+	intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
+	intel_de_read(dev_priv, dvo_reg);
 }
 
 static void intel_enable_dvo(struct intel_encoder *encoder,
@@ -204,14 +204,14 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
 	i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
-	u32 temp = I915_READ(dvo_reg);
+	u32 temp = intel_de_read(dev_priv, dvo_reg);
 
 	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
 					 &pipe_config->hw.mode,
 					 &pipe_config->hw.adjusted_mode);
 
-	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
-	I915_READ(dvo_reg);
+	intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
+	intel_de_read(dev_priv, dvo_reg);
 
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
@@ -286,7 +286,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
 	i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
 
 	/* Save the data order, since I don't know what it should be set to. */
-	dvo_val = I915_READ(dvo_reg) &
+	dvo_val = intel_de_read(dev_priv, dvo_reg) &
 		  (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
 	dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
 		   DVO_BLANK_ACTIVE_HIGH;
@@ -301,11 +301,10 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
 	/*I915_WRITE(DVOB_SRCDIM,
 	  (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
 	  (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
-	I915_WRITE(dvo_srcdim_reg,
-		   (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
-		   (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+	intel_de_write(dev_priv, dvo_srcdim_reg,
+		       (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
 	/*I915_WRITE(DVOB, dvo_val);*/
-	I915_WRITE(dvo_reg, dvo_val);
+	intel_de_write(dev_priv, dvo_reg, dvo_val);
 }
 
 static enum drm_connector_status
@@ -481,15 +480,16 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
 		 * initialize the device.
 		 */
 		for_each_pipe(dev_priv, pipe) {
-			dpll[pipe] = I915_READ(DPLL(pipe));
-			I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
+			dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
+			intel_de_write(dev_priv, DPLL(pipe),
+				       dpll[pipe] | DPLL_DVO_2X_MODE);
 		}
 
 		dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
 
 		/* restore the DVO 2x clock state to original */
 		for_each_pipe(dev_priv, pipe) {
-			I915_WRITE(DPLL(pipe), dpll[pipe]);
+			intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
 		}
 
 		intel_gmbus_force_bit(i2c, false);
@@ -507,17 +507,21 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
 		intel_encoder->port = port;
 		intel_encoder->pipe_mask = ~0;
 
-		switch (dvo->type) {
-		case INTEL_DVO_CHIP_TMDS:
+		if (dvo->type != INTEL_DVO_CHIP_LVDS)
 			intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
 				(1 << INTEL_OUTPUT_DVO);
+
+		switch (dvo->type) {
+		case INTEL_DVO_CHIP_TMDS:
+			intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+				DRM_CONNECTOR_POLL_DISCONNECT;
 			drm_connector_init(&dev_priv->drm, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_DVII);
 			encoder_type = DRM_MODE_ENCODER_TMDS;
 			break;
+		case INTEL_DVO_CHIP_LVDS_NO_FIXED:
 		case INTEL_DVO_CHIP_LVDS:
-			intel_encoder->cloneable = 0;
 			drm_connector_init(&dev_priv->drm, connector,
 					   &intel_dvo_connector_funcs,
 					   DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a1048ec..2e5d835 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,15 +41,12 @@
 #include <drm/drm_fourcc.h>
 
 #include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
 #include "intel_display_types.h"
 #include "intel_fbc.h"
 #include "intel_frontbuffer.h"
 
-static inline bool fbc_supported(struct drm_i915_private *dev_priv)
-{
-	return HAS_FBC(dev_priv);
-}
-
 /*
  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -97,12 +94,12 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
 	u32 fbc_ctl;
 
 	/* Disable compression */
-	fbc_ctl = I915_READ(FBC_CONTROL);
+	fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
 	if ((fbc_ctl & FBC_CTL_EN) == 0)
 		return;
 
 	fbc_ctl &= ~FBC_CTL_EN;
-	I915_WRITE(FBC_CONTROL, fbc_ctl);
+	intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
 
 	/* Wait for compressing bit to clear */
 	if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
@@ -132,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 
 	/* Clear old tags */
 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
-		I915_WRITE(FBC_TAG(i), 0);
+		intel_de_write(dev_priv, FBC_TAG(i), 0);
 
 	if (IS_GEN(dev_priv, 4)) {
 		u32 fbc_ctl2;
@@ -142,12 +139,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
 		if (params->fence_id >= 0)
 			fbc_ctl2 |= FBC_CTL_CPU_FENCE;
-		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
+		intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
+		intel_de_write(dev_priv, FBC_FENCE_OFF,
+			       params->crtc.fence_y_offset);
 	}
 
 	/* enable it... */
-	fbc_ctl = I915_READ(FBC_CONTROL);
+	fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
 	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
 	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
 	if (IS_I945GM(dev_priv))
@@ -155,12 +153,12 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	if (params->fence_id >= 0)
 		fbc_ctl |= params->fence_id;
-	I915_WRITE(FBC_CONTROL, fbc_ctl);
+	intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
 }
 
 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
 {
-	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+	return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
 }
 
 static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
@@ -176,13 +174,14 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 
 	if (params->fence_id >= 0) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
-		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+		intel_de_write(dev_priv, DPFC_FENCE_YOFF,
+			       params->crtc.fence_y_offset);
 	} else {
-		I915_WRITE(DPFC_FENCE_YOFF, 0);
+		intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
 	}
 
 	/* enable it... */
-	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+	intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 }
 
 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -190,23 +189,27 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
 	u32 dpfc_ctl;
 
 	/* Disable compression */
-	dpfc_ctl = I915_READ(DPFC_CONTROL);
+	dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		dpfc_ctl &= ~DPFC_CTL_EN;
-		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+		intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
 	}
 }
 
 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
 {
-	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+	return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
 /* This function forces a CFB recompression through the nuke operation. */
 static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
-	POSTING_READ(MSG_FBC_REND_STATE);
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	trace_intel_fbc_nuke(fbc->crtc);
+
+	intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
+	intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
 }
 
 static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
@@ -237,22 +240,22 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 		if (IS_GEN(dev_priv, 5))
 			dpfc_ctl |= params->fence_id;
 		if (IS_GEN(dev_priv, 6)) {
-			I915_WRITE(SNB_DPFC_CTL_SA,
-				   SNB_CPU_FENCE_ENABLE |
-				   params->fence_id);
-			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
-				   params->crtc.fence_y_offset);
+			intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+				       SNB_CPU_FENCE_ENABLE | params->fence_id);
+			intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+				       params->crtc.fence_y_offset);
 		}
 	} else {
 		if (IS_GEN(dev_priv, 6)) {
-			I915_WRITE(SNB_DPFC_CTL_SA, 0);
-			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+			intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+			intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
 		}
 	}
 
-	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+	intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
+		       params->crtc.fence_y_offset);
 	/* enable it... */
-	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+	intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 	intel_fbc_recompress(dev_priv);
 }
@@ -262,16 +265,16 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
 	u32 dpfc_ctl;
 
 	/* Disable compression */
-	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+	dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		dpfc_ctl &= ~DPFC_CTL_EN;
-		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+		intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
 	}
 }
 
 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
 {
-	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+	return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
 static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
@@ -282,14 +285,14 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 
 	/* Display WA #0529: skl, kbl, bxt. */
 	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
-		u32 val = I915_READ(CHICKEN_MISC_4);
+		u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
 
 		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
 
 		if (params->gen9_wa_cfb_stride)
 			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
 
-		I915_WRITE(CHICKEN_MISC_4, val);
+		intel_de_write(dev_priv, CHICKEN_MISC_4, val);
 	}
 
 	dpfc_ctl = 0;
@@ -314,13 +317,13 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 
 	if (params->fence_id >= 0) {
 		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-		I915_WRITE(SNB_DPFC_CTL_SA,
-			   SNB_CPU_FENCE_ENABLE |
-			   params->fence_id);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
-	} else {
-		I915_WRITE(SNB_DPFC_CTL_SA,0);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+		intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+			       SNB_CPU_FENCE_ENABLE | params->fence_id);
+		intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+			       params->crtc.fence_y_offset);
+	} else if (dev_priv->ggtt.num_fences) {
+		intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+		intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
 	}
 
 	if (dev_priv->fbc.false_color)
@@ -328,21 +331,20 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 
 	if (IS_IVYBRIDGE(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
-		I915_WRITE(ILK_DISPLAY_CHICKEN1,
-			   I915_READ(ILK_DISPLAY_CHICKEN1) |
-			   ILK_FBCQ_DIS);
+		intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
+			       intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
-			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
-			   HSW_FBCQ_DIS);
+		intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
+			       intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
 	}
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		/* Wa_1409120013:icl,ehl,tgl */
-		I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+		intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
+			       ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
 
-	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+	intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
 	intel_fbc_recompress(dev_priv);
 }
@@ -361,6 +363,8 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
+	trace_intel_fbc_activate(fbc->crtc);
+
 	fbc->active = true;
 	fbc->activated = true;
 
@@ -378,6 +382,8 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
+	trace_intel_fbc_deactivate(fbc->crtc);
+
 	fbc->active = false;
 
 	if (INTEL_GEN(dev_priv) >= 5)
@@ -407,7 +413,7 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	WARN_ON(!mutex_is_locked(&fbc->lock));
+	drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
 
 	if (fbc->active)
 		intel_fbc_hw_deactivate(dev_priv);
@@ -471,7 +477,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
 	struct drm_mm_node *uninitialized_var(compressed_llb);
 	int ret;
 
-	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
+	drm_WARN_ON(&dev_priv->drm,
+		    drm_mm_node_allocated(&fbc->compressed_fb));
 
 	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
 					 size, fb_cpp);
@@ -485,9 +492,11 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
 	fbc->threshold = ret;
 
 	if (INTEL_GEN(dev_priv) >= 5)
-		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
+		intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
+			       fbc->compressed_fb.start);
 	else if (IS_GM45(dev_priv)) {
-		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
+		intel_de_write(dev_priv, DPFC_CB_BASE,
+			       fbc->compressed_fb.start);
 	} else {
 		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
 		if (!compressed_llb)
@@ -500,16 +509,16 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
 
 		fbc->compressed_llb = compressed_llb;
 
-		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
-					     fbc->compressed_fb.start,
-					     U32_MAX));
-		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
-					     fbc->compressed_llb->start,
-					     U32_MAX));
-		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->dsm.start + fbc->compressed_fb.start);
-		I915_WRITE(FBC_LL_BASE,
-			   dev_priv->dsm.start + compressed_llb->start);
+		GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+						 fbc->compressed_fb.start,
+						 U32_MAX));
+		GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+						 fbc->compressed_llb->start,
+						 U32_MAX));
+		intel_de_write(dev_priv, FBC_CFB_BASE,
+			       dev_priv->dsm.start + fbc->compressed_fb.start);
+		intel_de_write(dev_priv, FBC_LL_BASE,
+			       dev_priv->dsm.start + compressed_llb->start);
 	}
 
 	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -530,20 +539,22 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (drm_mm_node_allocated(&fbc->compressed_fb))
-		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+	if (!drm_mm_node_allocated(&fbc->compressed_fb))
+		return;
 
 	if (fbc->compressed_llb) {
 		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
 		kfree(fbc->compressed_llb);
 	}
+
+	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
 }
 
 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!HAS_FBC(dev_priv))
 		return;
 
 	mutex_lock(&fbc->lock);
@@ -555,7 +566,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
 			    unsigned int stride)
 {
 	/* This should have been caught earlier. */
-	if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
 		return false;
 
 	/* Below are the additional FBC restrictions. */
@@ -663,8 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
 	cache->fb.format = fb->format;
 	cache->fb.stride = fb->pitches[0];
 
-	WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
-		!plane_state->vma->fence);
+	drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
+		    !plane_state->vma->fence);
 
 	if (plane_state->flags & PLANE_HAS_FENCE &&
 	    plane_state->vma->fence)
@@ -681,12 +692,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
 		fbc->compressed_fb.size * fbc->threshold;
 }
 
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+	struct intel_fbc *fbc = &dev_priv->fbc;
+
+	if (intel_vgpu_active(dev_priv)) {
+		fbc->no_fbc_reason = "VGPU is active";
+		return false;
+	}
+
+	if (!i915_modparams.enable_fbc) {
+		fbc->no_fbc_reason = "disabled per module param or by default";
+		return false;
+	}
+
+	if (fbc->underrun_detected) {
+		fbc->no_fbc_reason = "underrun detected";
+		return false;
+	}
+
+	return true;
+}
+
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
+	if (!intel_fbc_can_enable(dev_priv))
+		return false;
+
 	if (!cache->plane.visible) {
 		fbc->no_fbc_reason = "primary plane not visible";
 		return false;
@@ -785,28 +821,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 	return true;
 }
 
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
-	struct intel_fbc *fbc = &dev_priv->fbc;
-
-	if (intel_vgpu_active(dev_priv)) {
-		fbc->no_fbc_reason = "VGPU is active";
-		return false;
-	}
-
-	if (!i915_modparams.enable_fbc) {
-		fbc->no_fbc_reason = "disabled per module param or by default";
-		return false;
-	}
-
-	if (fbc->underrun_detected) {
-		fbc->no_fbc_reason = "underrun detected";
-		return false;
-	}
-
-	return true;
-}
-
 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
 				     struct intel_fbc_reg_params *params)
 {
@@ -867,16 +881,20 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
 	return true;
 }
 
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
-			  const struct intel_crtc_state *crtc_state,
-			  const struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc)
 {
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct intel_plane_state *plane_state =
+		intel_atomic_get_new_plane_state(state, plane);
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	const char *reason = "update pending";
 	bool need_vblank_wait = false;
 
-	if (!fbc_supported(dev_priv))
+	if (!plane->has_fbc || !plane_state)
 		return need_vblank_wait;
 
 	mutex_lock(&fbc->lock);
@@ -926,9 +944,9 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_crtc *crtc = fbc->crtc;
 
-	WARN_ON(!mutex_is_locked(&fbc->lock));
-	WARN_ON(!fbc->crtc);
-	WARN_ON(fbc->active);
+	drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
+	drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
+	drm_WARN_ON(&dev_priv->drm, fbc->active);
 
 	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
@@ -942,7 +960,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	WARN_ON(!mutex_is_locked(&fbc->lock));
+	drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
 
 	if (fbc->crtc != crtc)
 		return;
@@ -967,12 +985,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 		intel_fbc_deactivate(dev_priv, "frontbuffer write");
 }
 
-void intel_fbc_post_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_atomic_state *state,
+			   struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+	const struct intel_plane_state *plane_state =
+		intel_atomic_get_new_plane_state(state, plane);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!plane->has_fbc || !plane_state)
 		return;
 
 	mutex_lock(&fbc->lock);
@@ -994,7 +1016,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!HAS_FBC(dev_priv))
 		return;
 
 	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
@@ -1015,7 +1037,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!HAS_FBC(dev_priv))
 		return;
 
 	mutex_lock(&fbc->lock);
@@ -1099,24 +1121,26 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
 /**
  * intel_fbc_enable: tries to enable FBC on the CRTC
  * @crtc: the CRTC
- * @crtc_state: corresponding &drm_crtc_state for @crtc
- * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
+ * @state: corresponding &drm_crtc_state for @crtc
  *
  * This function checks if the given CRTC was chosen for FBC, then enables it if
  * possible. Notice that it doesn't activate FBC. It is valid to call
  * intel_fbc_enable multiple times for the same pipe without an
  * intel_fbc_disable in the middle, as long as it is deactivated.
  */
-void intel_fbc_enable(struct intel_crtc *crtc,
-		      const struct intel_crtc_state *crtc_state,
-		      const struct intel_plane_state *plane_state)
+void intel_fbc_enable(struct intel_atomic_state *state,
+		      struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+	const struct intel_crtc_state *crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct intel_plane_state *plane_state =
+		intel_atomic_get_new_plane_state(state, plane);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 	struct intel_fbc_state_cache *cache = &fbc->state_cache;
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
 
-	if (!fbc_supported(dev_priv))
+	if (!plane->has_fbc || !plane_state)
 		return;
 
 	mutex_lock(&fbc->lock);
@@ -1129,7 +1153,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
 		__intel_fbc_disable(dev_priv);
 	}
 
-	WARN_ON(fbc->active);
+	drm_WARN_ON(&dev_priv->drm, fbc->active);
 
 	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
 
@@ -1139,14 +1163,14 @@ void intel_fbc_enable(struct intel_crtc *crtc,
 
 	if (intel_fbc_alloc_cfb(dev_priv,
 				intel_fbc_calculate_cfb_size(dev_priv, cache),
-				fb->format->cpp[0])) {
+				plane_state->hw.fb->format->cpp[0])) {
 		cache->plane.visible = false;
 		fbc->no_fbc_reason = "not enough stolen memory";
 		goto out;
 	}
 
 	if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
-	    fb->modifier != I915_FORMAT_MOD_X_TILED)
+	    plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
 		cache->gen9_wa_cfb_stride =
 			DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
 	else
@@ -1169,9 +1193,10 @@ void intel_fbc_enable(struct intel_crtc *crtc,
 void intel_fbc_disable(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!plane->has_fbc)
 		return;
 
 	mutex_lock(&fbc->lock);
@@ -1190,12 +1215,12 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!HAS_FBC(dev_priv))
 		return;
 
 	mutex_lock(&fbc->lock);
 	if (fbc->crtc) {
-		WARN_ON(fbc->crtc->active);
+		drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
 		__intel_fbc_disable(dev_priv);
 	}
 	mutex_unlock(&fbc->lock);
@@ -1267,7 +1292,7 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
 {
 	struct intel_fbc *fbc = &dev_priv->fbc;
 
-	if (!fbc_supported(dev_priv))
+	if (!HAS_FBC(dev_priv))
 		return;
 
 	/* There's no guarantee that underrun_detected won't be set to true
@@ -1348,7 +1373,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 
 	/* This value was pulled out of someone's hat */
 	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
-		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+		intel_de_write(dev_priv, FBC_CONTROL,
+		               500 << FBC_CTL_INTERVAL_SHIFT);
 
 	/* We still don't have any sort of hardware state readout for FBC, so
 	 * deactivate it in case the BIOS activated it to make sure software
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index c8a5e50..6dc1ede 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,14 +19,13 @@ struct intel_plane_state;
 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
 			   struct intel_atomic_state *state);
 bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
-			  const struct intel_crtc_state *crtc_state,
-			  const struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+			  struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_atomic_state *state,
+			   struct intel_crtc *crtc);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
-		      const struct intel_crtc_state *crtc_state,
-		      const struct intel_plane_state *plane_state);
+void intel_fbc_enable(struct intel_atomic_state *state,
+		      struct intel_crtc *crtc);
 void intel_fbc_disable(struct intel_crtc *crtc);
 void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1e98e43..3bc8042 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,7 +40,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_display_types.h"
@@ -191,7 +190,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 		drm_framebuffer_put(&intel_fb->base);
 		intel_fb = ifbdev->fb = NULL;
 	}
-	if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
+	if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
 		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
 		ret = intelfb_alloc(helper, sizes);
 		if (ret)
@@ -410,9 +409,9 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 		if (!crtc->state->active)
 			continue;
 
-		WARN(!crtc->primary->state->fb,
-		     "re-used BIOS config but lost an fb on crtc %d\n",
-		     crtc->base.id);
+		drm_WARN(dev, !crtc->primary->state->fb,
+			 "re-used BIOS config but lost an fb on crtc %d\n",
+			 crtc->base.id);
 	}
 
 
@@ -439,7 +438,8 @@ int intel_fbdev_init(struct drm_device *dev)
 	struct intel_fbdev *ifbdev;
 	int ret;
 
-	if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
+	if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) ||
+			!INTEL_DISPLAY_ENABLED(dev_priv)))
 		return -ENODEV;
 
 	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -452,7 +452,7 @@ int intel_fbdev_init(struct drm_device *dev)
 	if (!intel_fbdev_init_bios(dev, ifbdev))
 		ifbdev->preferred_bpp = 32;
 
-	ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
+	ret = drm_fb_helper_init(dev, &ifbdev->helper);
 	if (ret) {
 		kfree(ifbdev);
 		return ret;
@@ -461,8 +461,6 @@ int intel_fbdev_init(struct drm_device *dev)
 	dev_priv->fbdev = ifbdev;
 	INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
 
-	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-
 	return 0;
 }
 
@@ -569,7 +567,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 		 * to all the printk activity.  Try to keep it out of the hot
 		 * path of resume if possible.
 		 */
-		WARN_ON(state != FBINFO_STATE_RUNNING);
+		drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
 		if (!console_trylock()) {
 			/* Don't block our own workqueue as this can
 			 * be run in parallel with other i915.ko tasks.
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 6c83b35..813a4f7 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -95,15 +95,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
-	if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+	if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
 		return;
 
 	enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
-	I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+	intel_de_posting_read(dev_priv, reg);
 
 	trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
-	DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+	drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
 }
 
 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -118,11 +118,13 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
 	if (enable) {
 		u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
 
-		I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg,
+			       enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+		intel_de_posting_read(dev_priv, reg);
 	} else {
-		if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
-			DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+		if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
+			drm_err(&dev_priv->drm, "pipe %c underrun\n",
+				pipe_name(pipe));
 	}
 }
 
@@ -143,18 +145,18 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pipe = crtc->pipe;
-	u32 err_int = I915_READ(GEN7_ERR_INT);
+	u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
 	if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
 		return;
 
-	I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
-	POSTING_READ(GEN7_ERR_INT);
+	intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+	intel_de_posting_read(dev_priv, GEN7_ERR_INT);
 
 	trace_intel_cpu_fifo_underrun(dev_priv, pipe);
-	DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+	drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
 }
 
 static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -163,7 +165,8 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	if (enable) {
-		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+		intel_de_write(dev_priv, GEN7_ERR_INT,
+			       ERR_INT_FIFO_UNDERRUN(pipe));
 
 		if (!ivb_can_enable_err_int(dev))
 			return;
@@ -173,9 +176,10 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
 		ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
 
 		if (old &&
-		    I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
-			DRM_ERROR("uncleared fifo underrun on pipe %c\n",
-				  pipe_name(pipe));
+		    intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+			drm_err(&dev_priv->drm,
+				"uncleared fifo underrun on pipe %c\n",
+				pipe_name(pipe));
 		}
 	}
 }
@@ -209,19 +213,20 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	enum pipe pch_transcoder = crtc->pipe;
-	u32 serr_int = I915_READ(SERR_INT);
+	u32 serr_int = intel_de_read(dev_priv, SERR_INT);
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
 	if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
 		return;
 
-	I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
-	POSTING_READ(SERR_INT);
+	intel_de_write(dev_priv, SERR_INT,
+		       SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+	intel_de_posting_read(dev_priv, SERR_INT);
 
 	trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
-	DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
-		  pipe_name(pch_transcoder));
+	drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+		pipe_name(pch_transcoder));
 }
 
 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -231,8 +236,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	if (enable) {
-		I915_WRITE(SERR_INT,
-			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+		intel_de_write(dev_priv, SERR_INT,
+			       SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
 
 		if (!cpt_can_enable_serr_int(dev))
 			return;
@@ -241,10 +246,11 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
 	} else {
 		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
 
-		if (old && I915_READ(SERR_INT) &
+		if (old && intel_de_read(dev_priv, SERR_INT) &
 		    SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
-			DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
-				  pipe_name(pch_transcoder));
+			drm_err(&dev_priv->drm,
+				"uncleared pch fifo underrun on pch transcoder %c\n",
+				pipe_name(pch_transcoder));
 		}
 	}
 }
@@ -378,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
 
 	if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
 		trace_intel_cpu_fifo_underrun(dev_priv, pipe);
-		DRM_ERROR("CPU pipe %c FIFO underrun\n",
-			  pipe_name(pipe));
+		drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
+			pipe_name(pipe));
 	}
 
 	intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -400,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
 	if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
 						  false)) {
 		trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
-		DRM_ERROR("PCH transcoder %c FIFO underrun\n",
-			  pipe_name(pch_transcoder));
+		drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+			pipe_name(pch_transcoder));
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
new file mode 100644
index 0000000..a0cc894
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_global_state.h"
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+				  struct intel_global_obj *obj,
+				  struct intel_global_state *state,
+				  const struct intel_global_state_funcs *funcs)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	obj->state = state;
+	obj->funcs = funcs;
+	list_add_tail(&obj->head, &dev_priv->global_obj_list);
+}
+
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+{
+	struct intel_global_obj *obj, *next;
+
+	list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
+		list_del(&obj->head);
+		obj->funcs->atomic_destroy_state(obj, obj->state);
+	}
+}
+
+static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+{
+	struct intel_crtc *crtc;
+
+	for_each_intel_crtc(&dev_priv->drm, crtc)
+		drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
+static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
+				 struct drm_modeset_lock *lock)
+{
+	struct drm_modeset_lock *l;
+
+	list_for_each_entry(l, &ctx->locked, head) {
+		if (lock == l)
+			return true;
+	}
+
+	return false;
+}
+
+static void assert_global_state_read_locked(struct intel_atomic_state *state)
+{
+	struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_crtc *crtc;
+
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		if (modeset_lock_is_held(ctx, &crtc->base.mutex))
+			return;
+	}
+
+	WARN(1, "Global state not read locked\n");
+}
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+				  struct intel_global_obj *obj)
+{
+	int index, num_objs, i;
+	size_t size;
+	struct __intel_global_objs_state *arr;
+	struct intel_global_state *obj_state;
+
+	for (i = 0; i < state->num_global_objs; i++)
+		if (obj == state->global_objs[i].ptr)
+			return state->global_objs[i].state;
+
+	assert_global_state_read_locked(state);
+
+	num_objs = state->num_global_objs + 1;
+	size = sizeof(*state->global_objs) * num_objs;
+	arr = krealloc(state->global_objs, size, GFP_KERNEL);
+	if (!arr)
+		return ERR_PTR(-ENOMEM);
+
+	state->global_objs = arr;
+	index = state->num_global_objs;
+	memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
+
+	obj_state = obj->funcs->atomic_duplicate_state(obj);
+	if (!obj_state)
+		return ERR_PTR(-ENOMEM);
+
+	obj_state->changed = false;
+
+	state->global_objs[index].state = obj_state;
+	state->global_objs[index].old_state = obj->state;
+	state->global_objs[index].new_state = obj_state;
+	state->global_objs[index].ptr = obj;
+	obj_state->state = state;
+
+	state->num_global_objs = num_objs;
+
+	DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
+			 obj, obj_state, state);
+
+	return obj_state;
+}
+
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+				      struct intel_global_obj *obj)
+{
+	int i;
+
+	for (i = 0; i < state->num_global_objs; i++)
+		if (obj == state->global_objs[i].ptr)
+			return state->global_objs[i].old_state;
+
+	return NULL;
+}
+
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+				      struct intel_global_obj *obj)
+{
+	int i;
+
+	for (i = 0; i < state->num_global_objs; i++)
+		if (obj == state->global_objs[i].ptr)
+			return state->global_objs[i].new_state;
+
+	return NULL;
+}
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_global_state *old_obj_state, *new_obj_state;
+	struct intel_global_obj *obj;
+	int i;
+
+	for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+					    new_obj_state, i) {
+		WARN_ON(obj->state != old_obj_state);
+
+		/*
+		 * If the new state wasn't modified (and properly
+		 * locked for write access) we throw it away.
+		 */
+		if (!new_obj_state->changed)
+			continue;
+
+		assert_global_state_write_locked(dev_priv);
+
+		old_obj_state->state = state;
+		new_obj_state->state = NULL;
+
+		state->global_objs[i].state = old_obj_state;
+		obj->state = new_obj_state;
+	}
+}
+
+void intel_atomic_clear_global_state(struct intel_atomic_state *state)
+{
+	int i;
+
+	for (i = 0; i < state->num_global_objs; i++) {
+		struct intel_global_obj *obj = state->global_objs[i].ptr;
+
+		obj->funcs->atomic_destroy_state(obj,
+						 state->global_objs[i].state);
+		state->global_objs[i].ptr = NULL;
+		state->global_objs[i].state = NULL;
+		state->global_objs[i].old_state = NULL;
+		state->global_objs[i].new_state = NULL;
+	}
+	state->num_global_objs = 0;
+}
+
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
+{
+	struct intel_atomic_state *state = obj_state->state;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_crtc *crtc;
+
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		int ret;
+
+		ret = drm_modeset_lock(&crtc->base.mutex,
+				       state->base.acquire_ctx);
+		if (ret)
+			return ret;
+	}
+
+	obj_state->changed = true;
+
+	return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
+{
+	struct intel_atomic_state *state = obj_state->state;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	struct intel_crtc *crtc;
+
+	for_each_intel_crtc(&dev_priv->drm, crtc) {
+		struct intel_crtc_state *crtc_state;
+
+		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+	}
+
+	obj_state->changed = true;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
new file mode 100644
index 0000000..e6163a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GLOBAL_STATE_H__
+#define __INTEL_GLOBAL_STATE_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_global_obj;
+struct intel_global_state;
+
+struct intel_global_state_funcs {
+	struct intel_global_state *(*atomic_duplicate_state)(struct intel_global_obj *obj);
+	void (*atomic_destroy_state)(struct intel_global_obj *obj,
+				     struct intel_global_state *state);
+};
+
+struct intel_global_obj {
+	struct list_head head;
+	struct intel_global_state *state;
+	const struct intel_global_state_funcs *funcs;
+};
+
+#define intel_for_each_global_obj(obj, dev_priv) \
+	list_for_each_entry(obj, &(dev_priv)->global_obj_list, head)
+
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->num_global_objs && \
+		     ((obj) = (__state)->global_objs[__i].ptr, \
+		      (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+	     (__i)++) \
+		for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->num_global_objs && \
+		     ((obj) = (__state)->global_objs[__i].ptr, \
+		      (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+	     (__i)++) \
+		for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+	for ((__i) = 0; \
+	     (__i) < (__state)->num_global_objs && \
+		     ((obj) = (__state)->global_objs[__i].ptr, \
+		      (old_obj_state) = (__state)->global_objs[__i].old_state, \
+		      (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+	     (__i)++) \
+		for_each_if(obj)
+
+struct intel_global_state {
+	struct intel_atomic_state *state;
+	bool changed;
+};
+
+struct __intel_global_objs_state {
+	struct intel_global_obj *ptr;
+	struct intel_global_state *state, *old_state, *new_state;
+};
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+				  struct intel_global_obj *obj,
+				  struct intel_global_state *state,
+				  const struct intel_global_state_funcs *funcs);
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+				  struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+				      struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+				      struct intel_global_obj *obj);
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state);
+void intel_atomic_clear_global_state(struct intel_atomic_state *state);
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 3d4d19a..1fd3a5a 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -32,7 +32,6 @@
 #include <linux/i2c.h>
 
 #include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_display_types.h"
@@ -143,8 +142,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
 void
 intel_gmbus_reset(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(GMBUS0, 0);
-	I915_WRITE(GMBUS4, 0);
+	intel_de_write(dev_priv, GMBUS0, 0);
+	intel_de_write(dev_priv, GMBUS4, 0);
 }
 
 static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -153,12 +152,12 @@ static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
 	u32 val;
 
 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
-	val = I915_READ(DSPCLK_GATE_D);
+	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
 	if (!enable)
 		val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
 	else
 		val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
-	I915_WRITE(DSPCLK_GATE_D, val);
+	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
 }
 
 static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -166,12 +165,12 @@ static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(SOUTH_DSPCLK_GATE_D);
+	val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
 	if (!enable)
 		val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
 	else
 		val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
-	I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+	intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
 }
 
 static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -179,12 +178,12 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(GEN9_CLKGATE_DIS_4);
+	val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
 	if (!enable)
 		val |= BXT_GMBUS_GATING_DIS;
 	else
 		val &= ~BXT_GMBUS_GATING_DIS;
-	I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+	intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
 }
 
 static u32 get_reserved(struct intel_gmbus *bus)
@@ -337,14 +336,16 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
 		irq_en = 0;
 
 	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
-	I915_WRITE_FW(GMBUS4, irq_en);
+	intel_de_write_fw(dev_priv, GMBUS4, irq_en);
 
 	status |= GMBUS_SATOER;
-	ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+	ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+			  2);
 	if (ret)
-		ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
+		ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+			       50);
 
-	I915_WRITE_FW(GMBUS4, 0);
+	intel_de_write_fw(dev_priv, GMBUS4, 0);
 	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 
 	if (gmbus2 & GMBUS_SATOER)
@@ -366,13 +367,13 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
 		irq_enable = GMBUS_IDLE_EN;
 
 	add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
-	I915_WRITE_FW(GMBUS4, irq_enable);
+	intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
 
 	ret = intel_wait_for_register_fw(&dev_priv->uncore,
 					 GMBUS2, GMBUS_ACTIVE, 0,
 					 10);
 
-	I915_WRITE_FW(GMBUS4, 0);
+	intel_de_write_fw(dev_priv, GMBUS4, 0);
 	remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
 
 	return ret;
@@ -404,15 +405,12 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
 			len++;
 		}
 		size = len % 256 + 256;
-		I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+		intel_de_write_fw(dev_priv, GMBUS0,
+				  gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
 	}
 
-	I915_WRITE_FW(GMBUS1,
-		      gmbus1_index |
-		      GMBUS_CYCLE_WAIT |
-		      (size << GMBUS_BYTE_COUNT_SHIFT) |
-		      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-		      GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+	intel_de_write_fw(dev_priv, GMBUS1,
+			  gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
 	while (len) {
 		int ret;
 		u32 val, loop = 0;
@@ -421,7 +419,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
 		if (ret)
 			return ret;
 
-		val = I915_READ_FW(GMBUS3);
+		val = intel_de_read_fw(dev_priv, GMBUS3);
 		do {
 			if (extra_byte_added && len == 1)
 				break;
@@ -432,7 +430,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
 
 		if (burst_read && len == size - 4)
 			/* Reset the override bit */
-			I915_WRITE_FW(GMBUS0, gmbus0_reg);
+			intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
 	}
 
 	return 0;
@@ -489,12 +487,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
 		len -= 1;
 	}
 
-	I915_WRITE_FW(GMBUS3, val);
-	I915_WRITE_FW(GMBUS1,
-		      gmbus1_index | GMBUS_CYCLE_WAIT |
-		      (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
-		      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
-		      GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+	intel_de_write_fw(dev_priv, GMBUS3, val);
+	intel_de_write_fw(dev_priv, GMBUS1,
+			  gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
 	while (len) {
 		int ret;
 
@@ -503,7 +498,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
 			val |= *buf++ << (8 * loop);
 		} while (--len && ++loop < 4);
 
-		I915_WRITE_FW(GMBUS3, val);
+		intel_de_write_fw(dev_priv, GMBUS3, val);
 
 		ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
 		if (ret)
@@ -568,7 +563,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
 
 	/* GMBUS5 holds 16-bit index */
 	if (gmbus5)
-		I915_WRITE_FW(GMBUS5, gmbus5);
+		intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
 
 	if (msgs[1].flags & I2C_M_RD)
 		ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
@@ -578,7 +573,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
 
 	/* Clear GMBUS5 after each index transfer */
 	if (gmbus5)
-		I915_WRITE_FW(GMBUS5, 0);
+		intel_de_write_fw(dev_priv, GMBUS5, 0);
 
 	return ret;
 }
@@ -601,7 +596,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
 		pch_gmbus_clock_gating(dev_priv, false);
 
 retry:
-	I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
+	intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
 
 	for (; i < num; i += inc) {
 		inc = 1;
@@ -629,18 +624,19 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
 	 * a STOP on the very first cycle. To simplify the code we
 	 * unconditionally generate the STOP condition with an additional gmbus
 	 * cycle. */
-	I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+	intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
 	/* Mark the GMBUS interface as disabled after waiting for idle.
 	 * We will re-enable it at the start of the next xfer,
 	 * till then let it sleep.
 	 */
 	if (gmbus_wait_idle(dev_priv)) {
-		DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
-			 adapter->name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "GMBUS [%s] timed out waiting for idle\n",
+			    adapter->name);
 		ret = -ETIMEDOUT;
 	}
-	I915_WRITE_FW(GMBUS0, 0);
+	intel_de_write_fw(dev_priv, GMBUS0, 0);
 	ret = ret ?: i;
 	goto out;
 
@@ -660,8 +656,9 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
 	 */
 	ret = -ENXIO;
 	if (gmbus_wait_idle(dev_priv)) {
-		DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
-			      adapter->name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "GMBUS [%s] timed out after NAK\n",
+			    adapter->name);
 		ret = -ETIMEDOUT;
 	}
 
@@ -669,13 +666,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
 	 * of resetting the GMBUS controller and so clearing the
 	 * BUS_ERROR raised by the slave's NAK.
 	 */
-	I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
-	I915_WRITE_FW(GMBUS1, 0);
-	I915_WRITE_FW(GMBUS0, 0);
+	intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
+	intel_de_write_fw(dev_priv, GMBUS1, 0);
+	intel_de_write_fw(dev_priv, GMBUS0, 0);
 
-	DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
-			 adapter->name, msgs[i].addr,
-			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+	drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+		    adapter->name, msgs[i].addr,
+		    (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
 	/*
 	 * Passive adapters sometimes NAK the first probe. Retry the first
@@ -684,17 +681,19 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
 	 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
 	 */
 	if (ret == -ENXIO && i == 0 && try++ == 0) {
-		DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
-			      adapter->name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "GMBUS [%s] NAK on first message, retry\n",
+			    adapter->name);
 		goto retry;
 	}
 
 	goto out;
 
 timeout:
-	DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
-		      bus->adapter.name, bus->reg0 & 0xff);
-	I915_WRITE_FW(GMBUS0, 0);
+	drm_dbg_kms(&dev_priv->drm,
+		    "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+		    bus->adapter.name, bus->reg0 & 0xff);
+	intel_de_write_fw(dev_priv, GMBUS0, 0);
 
 	/*
 	 * Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -908,7 +907,8 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
 struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
 					    unsigned int pin)
 {
-	if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+	if (drm_WARN_ON(&dev_priv->drm,
+			!intel_gmbus_is_valid_pin(dev_priv, pin)))
 		return NULL;
 
 	return &dev_priv->gmbus[pin].adapter;
@@ -929,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 	mutex_lock(&dev_priv->gmbus_mutex);
 
 	bus->force_bit += force_bit ? 1 : -1;
-	DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
-		      force_bit ? "en" : "dis", adapter->name,
-		      bus->force_bit);
+	drm_dbg_kms(&dev_priv->drm,
+		    "%sabling bit-banging on %s. force bit now %d\n",
+		    force_bit ? "en" : "dis", adapter->name,
+		    bus->force_bit);
 
 	mutex_unlock(&dev_priv->gmbus_mutex);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 0fdbd39..ee0f27e 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -43,6 +43,7 @@ static
 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
 			       const struct intel_hdcp_shim *shim, u8 *bksv)
 {
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
 	int ret, i, tries = 2;
 
 	/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
 			break;
 	}
 	if (i == tries) {
-		DRM_DEBUG_KMS("Bksv is invalid\n");
+		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
 		return -ENODEV;
 	}
 
@@ -64,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
 /* Is HDCP1.4 capable on Platform and Sink */
 bool intel_hdcp_capable(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
 	bool capable = false;
 	u8 bksv[5];
@@ -85,8 +86,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
 /* Is HDCP2.2 capable on Platform and Sink */
 bool intel_hdcp2_capable(struct intel_connector *connector)
 {
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	bool capable = false;
 
@@ -112,7 +113,8 @@ static inline
 bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
 		       enum transcoder cpu_transcoder, enum port port)
 {
-	return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+	return intel_de_read(dev_priv,
+	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 	       HDCP_STATUS_ENC;
 }
 
@@ -120,7 +122,8 @@ static inline
 bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
 			enum transcoder cpu_transcoder, enum port port)
 {
-	return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+	return intel_de_read(dev_priv,
+	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
 	       LINK_ENCRYPTION_STATUS;
 }
 
@@ -184,9 +187,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
 
 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
-	I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
-		   HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+	intel_de_write(dev_priv, HDCP_KEY_STATUS,
+		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
 }
 
 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
@@ -194,7 +197,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 	int ret;
 	u32 val;
 
-	val = I915_READ(HDCP_KEY_STATUS);
+	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
 		return 0;
 
@@ -203,7 +206,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 	 * out of reset. So if Key is not already loaded, its an error state.
 	 */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
 			return -ENXIO;
 
 	/*
@@ -217,12 +220,13 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 		ret = sandybridge_pcode_write(dev_priv,
 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
 		if (ret) {
-			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
-			          ret);
+			drm_err(&dev_priv->drm,
+				"Failed to initiate HDCP key load (%d)\n",
+				ret);
 			return ret;
 		}
 	} else {
-		I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
 	}
 
 	/* Wait for the keys to load (500us) */
@@ -235,7 +239,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 		return -ENXIO;
 
 	/* Send Aksv over to PCH display for use in authentication */
-	I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
 
 	return 0;
 }
@@ -243,9 +247,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
 /* Returns updated SHA-1 index */
 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
 {
-	I915_WRITE(HDCP_SHA_TEXT, sha_text);
+	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
-		DRM_ERROR("Timed out waiting for SHA1 ready\n");
+		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
 		return -ETIMEDOUT;
 	}
 	return 0;
@@ -270,7 +274,8 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
 			return HDCP_TRANSD_REP_PRESENT |
 			       HDCP_TRANSD_SHA1_M0;
 		default:
-			DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
+				cpu_transcoder);
 			return -EINVAL;
 		}
 	}
@@ -287,7 +292,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
 	case PORT_E:
 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
 	default:
-		DRM_ERROR("Unknown port %d\n", port);
+		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
 		return -EINVAL;
 	}
 }
@@ -297,21 +302,19 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 				const struct intel_hdcp_shim *shim,
 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
-	struct drm_i915_private *dev_priv;
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
 	enum port port = intel_dig_port->base.port;
 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
 	int ret, i, j, sha_idx;
 
-	dev_priv = intel_dig_port->base.base.dev->dev_private;
-
 	/* Process V' values from the receiver */
 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
 		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
 		if (ret)
 			return ret;
-		I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
 	}
 
 	/*
@@ -328,7 +331,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 	sha_text = 0;
 	sha_leftovers = 0;
 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
-	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 	for (i = 0; i < num_downstream; i++) {
 		unsigned int sha_empty;
 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
@@ -345,7 +348,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		/* Programming guide writes this every 64 bytes */
 		sha_idx += sizeof(sha_text);
 		if (!(sha_idx % 64))
-			I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+			intel_de_write(dev_priv, HDCP_REP_CTL,
+				       rep_ctl | HDCP_SHA1_TEXT_32);
 
 		/* Store the leftover bytes from the ksv in sha_text */
 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
@@ -377,7 +381,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 	 */
 	if (sha_leftovers == 0) {
 		/* Write 16 bits of text, 16 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_16);
 		ret = intel_write_sha_text(dev_priv,
 					   bstatus[0] << 8 | bstatus[1]);
 		if (ret < 0)
@@ -385,14 +390,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		sha_idx += sizeof(sha_text);
 
 		/* Write 32 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_0);
 		ret = intel_write_sha_text(dev_priv, 0);
 		if (ret < 0)
 			return ret;
 		sha_idx += sizeof(sha_text);
 
 		/* Write 16 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_16);
 		ret = intel_write_sha_text(dev_priv, 0);
 		if (ret < 0)
 			return ret;
@@ -400,7 +407,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 
 	} else if (sha_leftovers == 1) {
 		/* Write 24 bits of text, 8 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_24);
 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
 		/* Only 24-bits of data, must be in the LSB */
 		sha_text = (sha_text & 0xffffff00) >> 8;
@@ -410,14 +418,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		sha_idx += sizeof(sha_text);
 
 		/* Write 32 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_0);
 		ret = intel_write_sha_text(dev_priv, 0);
 		if (ret < 0)
 			return ret;
 		sha_idx += sizeof(sha_text);
 
 		/* Write 24 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_8);
 		ret = intel_write_sha_text(dev_priv, 0);
 		if (ret < 0)
 			return ret;
@@ -425,7 +435,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 
 	} else if (sha_leftovers == 2) {
 		/* Write 32 bits of text */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_32);
 		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
 		ret = intel_write_sha_text(dev_priv, sha_text);
 		if (ret < 0)
@@ -433,7 +444,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		sha_idx += sizeof(sha_text);
 
 		/* Write 64 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_0);
 		for (i = 0; i < 2; i++) {
 			ret = intel_write_sha_text(dev_priv, 0);
 			if (ret < 0)
@@ -442,7 +454,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		}
 	} else if (sha_leftovers == 3) {
 		/* Write 32 bits of text */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_32);
 		sha_text |= bstatus[0] << 24;
 		ret = intel_write_sha_text(dev_priv, sha_text);
 		if (ret < 0)
@@ -450,32 +463,35 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		sha_idx += sizeof(sha_text);
 
 		/* Write 8 bits of text, 24 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_8);
 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
 		if (ret < 0)
 			return ret;
 		sha_idx += sizeof(sha_text);
 
 		/* Write 32 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_0);
 		ret = intel_write_sha_text(dev_priv, 0);
 		if (ret < 0)
 			return ret;
 		sha_idx += sizeof(sha_text);
 
 		/* Write 8 bits of M0 */
-		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       rep_ctl | HDCP_SHA1_TEXT_24);
 		ret = intel_write_sha_text(dev_priv, 0);
 		if (ret < 0)
 			return ret;
 		sha_idx += sizeof(sha_text);
 	} else {
-		DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
-			      sha_leftovers);
+		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+			    sha_leftovers);
 		return -EINVAL;
 	}
 
-	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
 		ret = intel_write_sha_text(dev_priv, 0);
@@ -495,14 +511,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 		return ret;
 
 	/* Tell the HW we're done with the hash and wait for it to ACK */
-	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+	intel_de_write(dev_priv, HDCP_REP_CTL,
+		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
 				  HDCP_SHA1_COMPLETE, 1)) {
-		DRM_ERROR("Timed out waiting for SHA1 complete\n");
+		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
 		return -ETIMEDOUT;
 	}
-	if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
-		DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
 		return -ENXIO;
 	}
 
@@ -513,15 +530,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
 static
 int intel_hdcp_auth_downstream(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
-	struct drm_device *dev = connector->base.dev;
 	u8 bstatus[2], num_downstream, *ksv_fifo;
 	int ret, i, tries = 3;
 
 	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
 	if (ret) {
-		DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "KSV list failed to become ready (%d)\n", ret);
 		return ret;
 	}
 
@@ -531,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
 
 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
-		DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
 		return -EPERM;
 	}
 
@@ -544,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
 	 */
 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 	if (num_downstream == 0) {
-		DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Repeater with zero downstream devices\n");
 		return -EINVAL;
 	}
 
 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 	if (!ksv_fifo) {
-		DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
+		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
 		return -ENOMEM;
 	}
 
@@ -558,8 +577,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
 	if (ret)
 		goto err;
 
-	if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
-		DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
+					num_downstream)) {
+		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
 		ret = -EPERM;
 		goto err;
 	}
@@ -577,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
 	}
 
 	if (i == tries) {
-		DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "V Prime validation failed.(%d)\n", ret);
 		goto err;
 	}
 
-	DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
-		      num_downstream);
+	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+		    num_downstream);
 	ret = 0;
 err:
 	kfree(ksv_fifo);
@@ -592,13 +613,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
 /* Implements Part 1 of the HDCP authorization procedure */
 static int intel_hdcp_auth(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_device *dev = connector->base.dev;
 	const struct intel_hdcp_shim *shim = hdcp->shim;
-	struct drm_i915_private *dev_priv;
 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
-	enum port port;
+	enum port port = intel_dig_port->base.port;
 	unsigned long r0_prime_gen_start;
 	int ret, i, tries = 2;
 	union {
@@ -615,10 +635,6 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 	} ri;
 	bool repeater_present, hdcp_capable;
 
-	dev_priv = intel_dig_port->base.base.dev->dev_private;
-
-	port = intel_dig_port->base.port;
-
 	/*
 	 * Detects whether the display is HDCP capable. Although we check for
 	 * valid Bksv below, the HDCP over DP spec requires that we check
@@ -630,28 +646,32 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 		if (ret)
 			return ret;
 		if (!hdcp_capable) {
-			DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Panel is not HDCP capable\n");
 			return -EINVAL;
 		}
 	}
 
 	/* Initialize An with 2 random values and acquire it */
 	for (i = 0; i < 2; i++)
-		I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
-			   get_random_u32());
-	I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
-		   HDCP_CONF_CAPTURE_AN);
+		intel_de_write(dev_priv,
+			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+			       get_random_u32());
+	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+		       HDCP_CONF_CAPTURE_AN);
 
 	/* Wait for An to be acquired */
 	if (intel_de_wait_for_set(dev_priv,
 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 				  HDCP_STATUS_AN_READY, 1)) {
-		DRM_ERROR("Timed out waiting for An\n");
+		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
 		return -ETIMEDOUT;
 	}
 
-	an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
-	an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
+	an.reg[0] = intel_de_read(dev_priv,
+				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
+	an.reg[1] = intel_de_read(dev_priv,
+				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
 	ret = shim->write_an_aksv(intel_dig_port, an.shim);
 	if (ret)
 		return ret;
@@ -664,33 +684,34 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 	if (ret < 0)
 		return ret;
 
-	if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
-		DRM_ERROR("BKSV is revoked\n");
+	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+		drm_err(&dev_priv->drm, "BKSV is revoked\n");
 		return -EPERM;
 	}
 
-	I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
-	I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
+	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
+		       bksv.reg[0]);
+	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
+		       bksv.reg[1]);
 
 	ret = shim->repeater_present(intel_dig_port, &repeater_present);
 	if (ret)
 		return ret;
 	if (repeater_present)
-		I915_WRITE(HDCP_REP_CTL,
-			   intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
-						       port));
+		intel_de_write(dev_priv, HDCP_REP_CTL,
+			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
 
 	ret = shim->toggle_signalling(intel_dig_port, true);
 	if (ret)
 		return ret;
 
-	I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
-		   HDCP_CONF_AUTH_AND_ENC);
+	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+		       HDCP_CONF_AUTH_AND_ENC);
 
 	/* Wait for R0 ready */
-	if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
-		DRM_ERROR("Timed out waiting for R0 ready\n");
+		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
 		return -ETIMEDOUT;
 	}
 
@@ -716,19 +737,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 		ret = shim->read_ri_prime(intel_dig_port, ri.shim);
 		if (ret)
 			return ret;
-		I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+		intel_de_write(dev_priv,
+			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
+			       ri.reg);
 
 		/* Wait for Ri prime match */
-		if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
-						    port)) &
-		    (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
 			break;
 	}
 
 	if (i == tries) {
-		DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
-			      I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
-						    port)));
+		drm_dbg_kms(&dev_priv->drm,
+			    "Timed out waiting for Ri prime match (%x)\n",
+			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+					  cpu_transcoder, port)));
 		return -ETIMEDOUT;
 	}
 
@@ -737,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
 				  HDCP_STATUS_ENC,
 				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
-		DRM_ERROR("Timed out waiting for encryption\n");
+		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
 		return -ETIMEDOUT;
 	}
 
@@ -749,52 +772,53 @@ static int intel_hdcp_auth(struct intel_connector *connector)
 	if (repeater_present)
 		return intel_hdcp_auth_downstream(connector);
 
-	DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
 	return 0;
 }
 
 static int _intel_hdcp_disable(struct intel_connector *connector)
 {
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
 	enum port port = intel_dig_port->base.port;
 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 	int ret;
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
-		      connector->base.name, connector->base.base.id);
+	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
+		    connector->base.name, connector->base.base.id);
 
 	hdcp->hdcp_encrypted = false;
-	I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
 	if (intel_de_wait_for_clear(dev_priv,
 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
 				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
-		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+		drm_err(&dev_priv->drm,
+			"Failed to disable HDCP, timeout clearing status\n");
 		return -ETIMEDOUT;
 	}
 
 	ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
 	if (ret) {
-		DRM_ERROR("Failed to disable HDCP signalling\n");
+		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
 		return ret;
 	}
 
-	DRM_DEBUG_KMS("HDCP is disabled\n");
+	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
 	return 0;
 }
 
 static int _intel_hdcp_enable(struct intel_connector *connector)
 {
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
 	int i, ret, tries = 3;
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
-		      connector->base.name, connector->base.base.id);
+	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
+		    connector->base.name, connector->base.base.id);
 
 	if (!hdcp_key_loadable(dev_priv)) {
-		DRM_ERROR("HDCP key Load is not possible\n");
+		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
 		return -ENXIO;
 	}
 
@@ -805,7 +829,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 		intel_hdcp_clear_keys(dev_priv);
 	}
 	if (ret) {
-		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
+			ret);
 		return ret;
 	}
 
@@ -817,13 +842,14 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
 			return 0;
 		}
 
-		DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
 
 		/* Ensuring HDCP encryption and signalling are stopped. */
 		_intel_hdcp_disable(connector);
 	}
 
-	DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+	drm_dbg_kms(&dev_priv->drm,
+		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
 	return ret;
 }
 
@@ -836,9 +862,9 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
 /* Implements Part 3 of the HDCP authorization procedure */
 static int intel_hdcp_check_link(struct intel_connector *connector)
 {
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
 	enum port port = intel_dig_port->base.port;
 	enum transcoder cpu_transcoder;
 	int ret = 0;
@@ -853,11 +879,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
 		goto out;
 	}
 
-	if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
-		DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
-			  connector->base.name, connector->base.base.id,
-			  I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
-						port)));
+	if (drm_WARN_ON(&dev_priv->drm,
+			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
+		drm_err(&dev_priv->drm,
+			"%s:%d HDCP link stopped encryption,%x\n",
+			connector->base.name, connector->base.base.id,
+			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
 		ret = -ENXIO;
 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 		schedule_work(&hdcp->prop_work);
@@ -872,12 +899,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
 		goto out;
 	}
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
-		      connector->base.name, connector->base.base.id);
+	drm_dbg_kms(&dev_priv->drm,
+		    "[%s:%d] HDCP link failed, retrying authentication\n",
+		    connector->base.name, connector->base.base.id);
 
 	ret = _intel_hdcp_disable(connector);
 	if (ret) {
-		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 		schedule_work(&hdcp->prop_work);
 		goto out;
@@ -885,7 +913,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
 
 	ret = _intel_hdcp_enable(connector);
 	if (ret) {
-		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 		schedule_work(&hdcp->prop_work);
 		goto out;
@@ -901,9 +929,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
 					       prop_work);
 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
-	struct drm_device *dev = connector->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
 	mutex_lock(&hdcp->mutex);
 
 	/*
@@ -916,13 +944,13 @@ static void intel_hdcp_prop_work(struct work_struct *work)
 						   hdcp->value);
 
 	mutex_unlock(&hdcp->mutex);
-	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
 }
 
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
 {
-	/* PORT E doesn't have HDCP, and PORT F is disabled */
-	return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
+	return INTEL_INFO(dev_priv)->display.has_hdcp &&
+			(INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
 }
 
 static int
@@ -944,7 +972,8 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
 
 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
 	if (ret)
-		DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -974,7 +1003,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
 							 rx_cert, paired,
 							 ek_pub_km, msg_sz);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -998,7 +1028,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
 
 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1023,7 +1053,8 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
 
 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1048,7 +1079,8 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
 
 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1073,7 +1105,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
 
 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1097,7 +1130,8 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
 
 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1126,7 +1160,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
 							 rep_topology,
 							 rep_send_ack);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Verify rep topology failed. %d\n", ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1151,7 +1186,7 @@ hdcp2_verify_mprime(struct intel_connector *connector,
 
 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1174,7 +1209,8 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
 
 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
 	if (ret < 0)
-		DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
+			    ret);
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 
 	return ret;
@@ -1209,9 +1245,9 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
 /* Authentication flow starts from here */
 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_device *dev = connector->base.dev;
 	union {
 		struct hdcp2_ake_init ake_init;
 		struct hdcp2_ake_send_cert send_cert;
@@ -1242,15 +1278,16 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 		return ret;
 
 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
-		DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
+		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
 		return -EINVAL;
 	}
 
 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
 
-	if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+					msgs.send_cert.cert_rx.receiver_id,
 					1)) {
-		DRM_ERROR("Receiver ID is revoked\n");
+		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
 		return -EPERM;
 	}
 
@@ -1297,7 +1334,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
 
 static int hdcp2_locality_check(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	union {
 		struct hdcp2_lc_init lc_init;
@@ -1333,7 +1370,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
 
 static int hdcp2_session_key_exchange(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	struct hdcp2_ske_send_eks send_eks;
 	int ret;
@@ -1353,7 +1390,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
 static
 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	union {
 		struct hdcp2_rep_stream_manage stream_manage;
@@ -1404,9 +1441,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
 static
 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	struct drm_device *dev = connector->base.dev;
 	union {
 		struct hdcp2_rep_send_receiverid_list recvid_list;
 		struct hdcp2_rep_send_ack rep_ack;
@@ -1425,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
 
 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
-		DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
 		return -EINVAL;
 	}
 
@@ -1433,17 +1470,24 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
 	seq_num_v =
 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
 
+	if (!hdcp->hdcp2_encrypted && seq_num_v) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Non zero Seq_num_v at first RecvId_List msg\n");
+		return -EINVAL;
+	}
+
 	if (seq_num_v < hdcp->seq_num_v) {
 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
-		DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
 		return -EINVAL;
 	}
 
 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
-	if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+					msgs.recvid_list.receiver_ids,
 					device_cnt)) {
-		DRM_ERROR("Revoked receiver ID(s) is in list\n");
+		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
 		return -EPERM;
 	}
 
@@ -1475,26 +1519,28 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
 
 static int hdcp2_authenticate_sink(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	const struct intel_hdcp_shim *shim = hdcp->shim;
 	int ret;
 
 	ret = hdcp2_authentication_key_exchange(connector);
 	if (ret < 0) {
-		DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
 		return ret;
 	}
 
 	ret = hdcp2_locality_check(connector);
 	if (ret < 0) {
-		DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+		drm_dbg_kms(&i915->drm,
+			    "Locality Check failed. Err : %d\n", ret);
 		return ret;
 	}
 
 	ret = hdcp2_session_key_exchange(connector);
 	if (ret < 0) {
-		DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
 		return ret;
 	}
 
@@ -1509,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
 	if (hdcp->is_repeater) {
 		ret = hdcp2_authenticate_repeater(connector);
 		if (ret < 0) {
-			DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+			drm_dbg_kms(&i915->drm,
+				    "Repeater Auth Failed. Err: %d\n", ret);
 			return ret;
 		}
 	}
@@ -1524,31 +1571,32 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
 
 static int hdcp2_enable_encryption(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	enum port port = connector->encoder->port;
+	enum port port = intel_dig_port->base.port;
 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 	int ret;
 
-	WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
-		LINK_ENCRYPTION_STATUS);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+		    LINK_ENCRYPTION_STATUS);
 	if (hdcp->shim->toggle_signalling) {
 		ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
 		if (ret) {
-			DRM_ERROR("Failed to enable HDCP signalling. %d\n",
-				  ret);
+			drm_err(&dev_priv->drm,
+				"Failed to enable HDCP signalling. %d\n",
+				ret);
 			return ret;
 		}
 	}
 
-	if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
 	    LINK_AUTH_STATUS) {
 		/* Link is Authenticated. Now set for Encryption */
-		I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
-			   I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
-					       port)) |
-			   CTL_LINK_ENCRYPTION_REQ);
+		intel_de_write(dev_priv,
+			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
+			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
 	}
 
 	ret = intel_de_wait_for_set(dev_priv,
@@ -1562,19 +1610,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
 
 static int hdcp2_disable_encryption(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	enum port port = connector->encoder->port;
+	enum port port = intel_dig_port->base.port;
 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
 	int ret;
 
-	WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
-			    LINK_ENCRYPTION_STATUS));
+	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+				      LINK_ENCRYPTION_STATUS));
 
-	I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
-		   I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
-		   ~CTL_LINK_ENCRYPTION_REQ);
+	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
 
 	ret = intel_de_wait_for_clear(dev_priv,
 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1582,13 +1629,14 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
 				      LINK_ENCRYPTION_STATUS,
 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
 	if (ret == -ETIMEDOUT)
-		DRM_DEBUG_KMS("Disable Encryption Timedout");
+		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
 
 	if (hdcp->shim->toggle_signalling) {
 		ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
 		if (ret) {
-			DRM_ERROR("Failed to disable HDCP signalling. %d\n",
-				  ret);
+			drm_err(&dev_priv->drm,
+				"Failed to disable HDCP signalling. %d\n",
+				ret);
 			return ret;
 		}
 	}
@@ -1598,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
 
 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 {
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	int ret, i, tries = 3;
 
 	for (i = 0; i < tries; i++) {
@@ -1606,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 			break;
 
 		/* Clearing the mei hdcp session */
-		DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
-			      i + 1, tries, ret);
+		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+			    i + 1, tries, ret);
 		if (hdcp2_deauthenticate_port(connector) < 0)
-			DRM_DEBUG_KMS("Port deauth failed.\n");
+			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
 	}
 
 	if (i != tries) {
@@ -1620,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
 		ret = hdcp2_enable_encryption(connector);
 		if (ret < 0) {
-			DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+			drm_dbg_kms(&i915->drm,
+				    "Encryption Enable Failed.(%d)\n", ret);
 			if (hdcp2_deauthenticate_port(connector) < 0)
-				DRM_DEBUG_KMS("Port deauth failed.\n");
+				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
 		}
 	}
 
@@ -1631,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
 
 static int _intel_hdcp2_enable(struct intel_connector *connector)
 {
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
-		      connector->base.name, connector->base.base.id,
-		      hdcp->content_type);
+	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+		    connector->base.name, connector->base.base.id,
+		    hdcp->content_type);
 
 	ret = hdcp2_authenticate_and_encrypt(connector);
 	if (ret) {
-		DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
-			      hdcp->content_type, ret);
+		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
+			    hdcp->content_type, ret);
 		return ret;
 	}
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
-		      connector->base.name, connector->base.base.id,
-		      hdcp->content_type);
+	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+		    connector->base.name, connector->base.base.id,
+		    hdcp->content_type);
 
 	hdcp->hdcp2_encrypted = true;
 	return 0;
@@ -1655,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
 
 static int _intel_hdcp2_disable(struct intel_connector *connector)
 {
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	int ret;
 
-	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
-		      connector->base.name, connector->base.base.id);
+	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+		    connector->base.name, connector->base.base.id);
 
 	ret = hdcp2_disable_encryption(connector);
 
 	if (hdcp2_deauthenticate_port(connector) < 0)
-		DRM_DEBUG_KMS("Port deauth failed.\n");
+		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
 
 	connector->hdcp.hdcp2_encrypted = false;
 
@@ -1673,10 +1725,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
 /* Implements the Link Integrity Check for HDCP2.2 */
 static int intel_hdcp2_check_link(struct intel_connector *connector)
 {
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
-	enum port port = connector->encoder->port;
+	enum port port = intel_dig_port->base.port;
 	enum transcoder cpu_transcoder;
 	int ret = 0;
 
@@ -1690,10 +1742,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 		goto out;
 	}
 
-	if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
-		DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
-			  I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
-						 port)));
+	if (drm_WARN_ON(&dev_priv->drm,
+			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
+		drm_err(&dev_priv->drm,
+			"HDCP2.2 link stopped the encryption, %x\n",
+			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
 		ret = -ENXIO;
 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 		schedule_work(&hdcp->prop_work);
@@ -1713,25 +1766,29 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
 			goto out;
 
-		DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "HDCP2.2 Downstream topology change\n");
 		ret = hdcp2_authenticate_repeater_topology(connector);
 		if (!ret) {
 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
 			schedule_work(&hdcp->prop_work);
 			goto out;
 		}
-		DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
-			      connector->base.name, connector->base.base.id,
-			      ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[%s:%d] Repeater topology auth failed.(%d)\n",
+			    connector->base.name, connector->base.base.id,
+			    ret);
 	} else {
-		DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
-			      connector->base.name, connector->base.base.id);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
+			    connector->base.name, connector->base.base.id);
 	}
 
 	ret = _intel_hdcp2_disable(connector);
 	if (ret) {
-		DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
-			  connector->base.name, connector->base.base.id, ret);
+		drm_err(&dev_priv->drm,
+			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+			connector->base.name, connector->base.base.id, ret);
 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 		schedule_work(&hdcp->prop_work);
 		goto out;
@@ -1739,9 +1796,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 
 	ret = _intel_hdcp2_enable(connector);
 	if (ret) {
-		DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
-			      connector->base.name, connector->base.base.id,
-			      ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+			    connector->base.name, connector->base.base.id,
+			    ret);
 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 		schedule_work(&hdcp->prop_work);
 		goto out;
@@ -1772,7 +1830,7 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 
-	DRM_DEBUG("I915 HDCP comp bind\n");
+	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
 	mutex_lock(&dev_priv->hdcp_comp_mutex);
 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
 	dev_priv->hdcp_master->mei_dev = mei_kdev;
@@ -1786,7 +1844,7 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
 
-	DRM_DEBUG("I915 HDCP comp unbind\n");
+	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
 	mutex_lock(&dev_priv->hdcp_comp_mutex);
 	dev_priv->hdcp_master = NULL;
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1830,7 +1888,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
 
 	if (INTEL_GEN(dev_priv) < 12)
 		data->fw_ddi =
-			intel_get_mei_fw_ddi_index(connector->encoder->port);
+			intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
 	else
 		/*
 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
@@ -1854,7 +1912,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
 					sizeof(struct hdcp2_streamid_type),
 					GFP_KERNEL);
 	if (!data->streams) {
-		DRM_ERROR("Out of Memory\n");
+		drm_err(&dev_priv->drm, "Out of Memory\n");
 		return -ENOMEM;
 	}
 
@@ -1881,14 +1939,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
 		return;
 
 	mutex_lock(&dev_priv->hdcp_comp_mutex);
-	WARN_ON(dev_priv->hdcp_comp_added);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
 
 	dev_priv->hdcp_comp_added = true;
 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
 				  I915_COMPONENT_HDCP);
 	if (ret < 0) {
-		DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+			    ret);
 		mutex_lock(&dev_priv->hdcp_comp_mutex);
 		dev_priv->hdcp_comp_added = false;
 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1899,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
 static void intel_hdcp2_init(struct intel_connector *connector,
 			     const struct intel_hdcp_shim *shim)
 {
+	struct drm_i915_private *i915 = to_i915(connector->base.dev);
 	struct intel_hdcp *hdcp = &connector->hdcp;
 	int ret;
 
 	ret = initialize_hdcp_port_data(connector, shim);
 	if (ret) {
-		DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
 		return;
 	}
 
@@ -1954,7 +2014,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
 		return -ENOENT;
 
 	mutex_lock(&hdcp->mutex);
-	WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+	drm_WARN_ON(&dev_priv->drm,
+		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
 	hdcp->content_type = content_type;
 
 	if (INTEL_GEN(dev_priv) >= 12) {
@@ -2014,6 +2075,46 @@ int intel_hdcp_disable(struct intel_connector *connector)
 	return ret;
 }
 
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+			    const struct intel_crtc_state *crtc_state,
+			    const struct drm_connector_state *conn_state)
+{
+	struct intel_connector *connector =
+				to_intel_connector(conn_state->connector);
+	struct intel_hdcp *hdcp = &connector->hdcp;
+	bool content_protection_type_changed =
+		(conn_state->hdcp_content_type != hdcp->content_type &&
+		 conn_state->content_protection !=
+		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+	/*
+	 * During the HDCP encryption session if Type change is requested,
+	 * disable the HDCP and reenable it with new TYPE value.
+	 */
+	if (conn_state->content_protection ==
+	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+	    content_protection_type_changed)
+		intel_hdcp_disable(connector);
+
+	/*
+	 * Mark the hdcp state as DESIRED after the hdcp disable of type
+	 * change procedure.
+	 */
+	if (content_protection_type_changed) {
+		mutex_lock(&hdcp->mutex);
+		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&hdcp->prop_work);
+		mutex_unlock(&hdcp->mutex);
+	}
+
+	if (conn_state->content_protection ==
+	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+	    content_protection_type_changed)
+		intel_hdcp_enable(connector,
+				  crtc_state->cpu_transcoder,
+				  (u8)conn_state->hdcp_content_type);
+}
+
 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
 {
 	mutex_lock(&dev_priv->hdcp_comp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index f3c3272..7c12ad6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,12 +8,12 @@
 
 #include <linux/types.h>
 
-#include <drm/i915_drm.h>
-
 struct drm_connector;
 struct drm_connector_state;
 struct drm_i915_private;
 struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
 struct intel_hdcp_shim;
 enum port;
 enum transcoder;
@@ -26,6 +26,9 @@ int intel_hdcp_init(struct intel_connector *connector,
 int intel_hdcp_enable(struct intel_connector *connector,
 		      enum transcoder cpu_transcoder, u8 content_type);
 int intel_hdcp_disable(struct intel_connector *connector);
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+			    const struct intel_crtc_state *crtc_state,
+			    const struct drm_connector_state *conn_state);
 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
 bool intel_hdcp_capable(struct intel_connector *connector);
 bool intel_hdcp2_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 93ac0f2..3993023 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -36,7 +36,6 @@
 #include <drm/drm_edid.h>
 #include <drm/drm_hdcp.h>
 #include <drm/drm_scdc_helper.h>
-#include <drm/i915_drm.h>
 #include <drm/intel_lpe_audio.h>
 
 #include "i915_debugfs.h"
@@ -45,6 +44,7 @@
 #include "intel_audio.h"
 #include "intel_connector.h"
 #include "intel_ddi.h"
+#include "intel_display_debugfs.h"
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_dpio_phy.h"
@@ -72,17 +72,19 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 
 	enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
-	WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
-	     "HDMI port enabled, expecting disabled\n");
+	drm_WARN(dev,
+		 intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+		 "HDMI port enabled, expecting disabled\n");
 }
 
 static void
 assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
 				     enum transcoder cpu_transcoder)
 {
-	WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
-	     TRANS_DDI_FUNC_ENABLE,
-	     "HDMI transcoder function enabled, expecting disabled\n");
+	drm_WARN(&dev_priv->drm,
+		 intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+		 TRANS_DDI_FUNC_ENABLE,
+		 "HDMI transcoder function enabled, expecting disabled\n");
 }
 
 struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
@@ -215,32 +217,33 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
 {
 	const u32 *data = frame;
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	u32 val = I915_READ(VIDEO_DIP_CTL);
+	u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
 	int i;
 
-	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+	drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+		 "Writing DIP with CTL reg disabled\n");
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
 	val &= ~g4x_infoframe_enable(type);
 
-	I915_WRITE(VIDEO_DIP_CTL, val);
+	intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
 
 	for (i = 0; i < len; i += 4) {
-		I915_WRITE(VIDEO_DIP_DATA, *data);
+		intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
 		data++;
 	}
 	/* Write every possible data byte to force correct ECC calculation. */
 	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
-		I915_WRITE(VIDEO_DIP_DATA, 0);
+		intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
 
 	val |= g4x_infoframe_enable(type);
 	val &= ~VIDEO_DIP_FREQ_MASK;
 	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(VIDEO_DIP_CTL, val);
-	POSTING_READ(VIDEO_DIP_CTL);
+	intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+	intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
 }
 
 static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -252,22 +255,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
 	u32 val, *data = frame;
 	int i;
 
-	val = I915_READ(VIDEO_DIP_CTL);
+	val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
-	I915_WRITE(VIDEO_DIP_CTL, val);
+	intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
 
 	for (i = 0; i < len; i += 4)
-		*data++ = I915_READ(VIDEO_DIP_DATA);
+		*data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
 }
 
 static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	u32 val = I915_READ(VIDEO_DIP_CTL);
+	u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return 0;
@@ -288,32 +291,34 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 	int i;
 
-	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+	drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+		 "Writing DIP with CTL reg disabled\n");
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
 	val &= ~g4x_infoframe_enable(type);
 
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 
 	for (i = 0; i < len; i += 4) {
-		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+			       *data);
 		data++;
 	}
 	/* Write every possible data byte to force correct ECC calculation. */
 	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
-		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+		intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 
 	val |= g4x_infoframe_enable(type);
 	val &= ~VIDEO_DIP_FREQ_MASK;
 	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 }
 
 static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -326,15 +331,15 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
 	u32 val, *data = frame;
 	int i;
 
-	val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+	val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
-	I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+	intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
 
 	for (i = 0; i < len; i += 4)
-		*data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+		*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
 }
 
 static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
@@ -343,7 +348,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
 	i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return 0;
@@ -365,10 +370,11 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 	int i;
 
-	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+	drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+		 "Writing DIP with CTL reg disabled\n");
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
@@ -378,22 +384,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
 	if (type != HDMI_INFOFRAME_TYPE_AVI)
 		val &= ~g4x_infoframe_enable(type);
 
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 
 	for (i = 0; i < len; i += 4) {
-		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+			       *data);
 		data++;
 	}
 	/* Write every possible data byte to force correct ECC calculation. */
 	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
-		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+		intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 
 	val |= g4x_infoframe_enable(type);
 	val &= ~VIDEO_DIP_FREQ_MASK;
 	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 }
 
 static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -406,15 +413,15 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
 	u32 val, *data = frame;
 	int i;
 
-	val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+	val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
-	I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+	intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
 
 	for (i = 0; i < len; i += 4)
-		*data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+		*data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
 }
 
 static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
@@ -422,7 +429,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
-	u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
+	u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return 0;
@@ -441,32 +448,35 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 	int i;
 
-	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+	drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+		 "Writing DIP with CTL reg disabled\n");
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
 	val &= ~g4x_infoframe_enable(type);
 
-	I915_WRITE(reg, val);
+	intel_de_write(dev_priv, reg, val);
 
 	for (i = 0; i < len; i += 4) {
-		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		intel_de_write(dev_priv,
+			       VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
 		data++;
 	}
 	/* Write every possible data byte to force correct ECC calculation. */
 	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
-		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+		intel_de_write(dev_priv,
+			       VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 
 	val |= g4x_infoframe_enable(type);
 	val &= ~VIDEO_DIP_FREQ_MASK;
 	val |= VIDEO_DIP_FREQ_VSYNC;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 }
 
 static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -479,15 +489,16 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
 	u32 val, *data = frame;
 	int i;
 
-	val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+	val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
 
 	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 	val |= g4x_infoframe_index(type);
 
-	I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+	intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
 
 	for (i = 0; i < len; i += 4)
-		*data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+		*data++ = intel_de_read(dev_priv,
+				        VLV_TVIDEO_DIP_DATA(crtc->pipe));
 }
 
 static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
@@ -495,7 +506,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
-	u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
+	u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
 
 	if ((val & VIDEO_DIP_ENABLE) == 0)
 		return 0;
@@ -519,28 +530,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
 	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
 	int data_size;
 	int i;
-	u32 val = I915_READ(ctl_reg);
+	u32 val = intel_de_read(dev_priv, ctl_reg);
 
 	data_size = hsw_dip_data_size(dev_priv, type);
 
-	WARN_ON(len > data_size);
+	drm_WARN_ON(&dev_priv->drm, len > data_size);
 
 	val &= ~hsw_infoframe_enable(type);
-	I915_WRITE(ctl_reg, val);
+	intel_de_write(dev_priv, ctl_reg, val);
 
 	for (i = 0; i < len; i += 4) {
-		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
-					    type, i >> 2), *data);
+		intel_de_write(dev_priv,
+			       hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+			       *data);
 		data++;
 	}
 	/* Write every possible data byte to force correct ECC calculation. */
 	for (; i < data_size; i += 4)
-		I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
-					    type, i >> 2), 0);
+		intel_de_write(dev_priv,
+			       hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+			       0);
 
 	val |= hsw_infoframe_enable(type);
-	I915_WRITE(ctl_reg, val);
-	POSTING_READ(ctl_reg);
+	intel_de_write(dev_priv, ctl_reg, val);
+	intel_de_posting_read(dev_priv, ctl_reg);
 }
 
 static void hsw_read_infoframe(struct intel_encoder *encoder,
@@ -553,18 +566,19 @@ static void hsw_read_infoframe(struct intel_encoder *encoder,
 	u32 val, *data = frame;
 	int i;
 
-	val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+	val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(cpu_transcoder));
 
 	for (i = 0; i < len; i += 4)
-		*data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
-						     type, i >> 2));
+		*data++ = intel_de_read(dev_priv,
+				        hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
 }
 
 static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
 				  const struct intel_crtc_state *pipe_config)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+	u32 val = intel_de_read(dev_priv,
+				HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
 	u32 mask;
 
 	mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -655,12 +669,12 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
 	     intel_hdmi_infoframe_enable(type)) == 0)
 		return;
 
-	if (WARN_ON(frame->any.type != type))
+	if (drm_WARN_ON(encoder->base.dev, frame->any.type != type))
 		return;
 
 	/* see comment above for the reason for this offset */
 	len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
-	if (WARN_ON(len < 0))
+	if (drm_WARN_ON(encoder->base.dev, len < 0))
 		return;
 
 	/* Insert the 'hole' (see big comment above) at position 3 */
@@ -734,8 +748,8 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
 	drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
 
 	/* nonsense combination */
-	WARN_ON(crtc_state->limited_color_range &&
-		crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+	drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
 
 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
 		drm_hdmi_avi_infoframe_quant_range(frame, connector,
@@ -753,7 +767,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
 	/* TODO: handle pixel repetition for YCBCR420 outputs */
 
 	ret = hdmi_avi_infoframe_check(frame);
-	if (WARN_ON(ret))
+	if (drm_WARN_ON(encoder->base.dev, ret))
 		return false;
 
 	return true;
@@ -774,13 +788,13 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
 		intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
 
 	ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
-	if (WARN_ON(ret))
+	if (drm_WARN_ON(encoder->base.dev, ret))
 		return false;
 
 	frame->sdi = HDMI_SPD_SDI_PC;
 
 	ret = hdmi_spd_infoframe_check(frame);
-	if (WARN_ON(ret))
+	if (drm_WARN_ON(encoder->base.dev, ret))
 		return false;
 
 	return true;
@@ -806,11 +820,11 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
 	ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
 							  conn_state->connector,
 							  &crtc_state->hw.adjusted_mode);
-	if (WARN_ON(ret))
+	if (drm_WARN_ON(encoder->base.dev, ret))
 		return false;
 
 	ret = hdmi_vendor_infoframe_check(frame);
-	if (WARN_ON(ret))
+	if (drm_WARN_ON(encoder->base.dev, ret))
 		return false;
 
 	return true;
@@ -844,7 +858,7 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
 	}
 
 	ret = hdmi_drm_infoframe_check(frame);
-	if (WARN_ON(ret))
+	if (drm_WARN_ON(&dev_priv->drm, ret))
 		return false;
 
 	return true;
@@ -859,7 +873,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	i915_reg_t reg = VIDEO_DIP_CTL;
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 	u32 port = VIDEO_DIP_PORT(encoder->port);
 
 	assert_hdmi_port_disabled(intel_hdmi);
@@ -885,8 +899,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
 		}
 		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
 			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
-		I915_WRITE(reg, val);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, val);
+		intel_de_posting_read(dev_priv, reg);
 		return;
 	}
 
@@ -904,8 +918,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
 	val &= ~(VIDEO_DIP_ENABLE_AVI |
 		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 
 	intel_write_infoframe(encoder, crtc_state,
 			      HDMI_INFOFRAME_TYPE_AVI,
@@ -982,7 +996,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
 	else
 		return false;
 
-	I915_WRITE(reg, crtc_state->infoframes.gcp);
+	intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
 
 	return true;
 }
@@ -1007,7 +1021,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
 	else
 		return;
 
-	crtc_state->infoframes.gcp = I915_READ(reg);
+	crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
 }
 
 static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1042,7 +1056,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 	u32 port = VIDEO_DIP_PORT(encoder->port);
 
 	assert_hdmi_port_disabled(intel_hdmi);
@@ -1056,15 +1070,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
 		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
 			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
 			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
-		I915_WRITE(reg, val);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, val);
+		intel_de_posting_read(dev_priv, reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		WARN(val & VIDEO_DIP_ENABLE,
-		     "DIP already enabled on port %c\n",
-		     (val & VIDEO_DIP_PORT_MASK) >> 29);
+		drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+			 "DIP already enabled on port %c\n",
+			 (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
@@ -1077,8 +1091,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
 	if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
 		val |= VIDEO_DIP_ENABLE_GCP;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 
 	intel_write_infoframe(encoder, crtc_state,
 			      HDMI_INFOFRAME_TYPE_AVI,
@@ -1100,7 +1114,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
@@ -1113,8 +1127,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
 		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
 			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
 			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
-		I915_WRITE(reg, val);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, val);
+		intel_de_posting_read(dev_priv, reg);
 		return;
 	}
 
@@ -1126,8 +1140,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
 	if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
 		val |= VIDEO_DIP_ENABLE_GCP;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 
 	intel_write_infoframe(encoder, crtc_state,
 			      HDMI_INFOFRAME_TYPE_AVI,
@@ -1149,7 +1163,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 	u32 port = VIDEO_DIP_PORT(encoder->port);
 
 	assert_hdmi_port_disabled(intel_hdmi);
@@ -1163,15 +1177,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
 		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
 			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
 			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
-		I915_WRITE(reg, val);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, val);
+		intel_de_posting_read(dev_priv, reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		WARN(val & VIDEO_DIP_ENABLE,
-		     "DIP already enabled on port %c\n",
-		     (val & VIDEO_DIP_PORT_MASK) >> 29);
+		drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+			 "DIP already enabled on port %c\n",
+			 (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
@@ -1184,8 +1198,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
 	if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
 		val |= VIDEO_DIP_ENABLE_GCP;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 
 	intel_write_infoframe(encoder, crtc_state,
 			      HDMI_INFOFRAME_TYPE_AVI,
@@ -1205,7 +1219,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
-	u32 val = I915_READ(reg);
+	u32 val = intel_de_read(dev_priv, reg);
 
 	assert_hdmi_transcoder_func_disabled(dev_priv,
 					     crtc_state->cpu_transcoder);
@@ -1216,16 +1230,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
 		 VIDEO_DIP_ENABLE_DRM_GLK);
 
 	if (!enable) {
-		I915_WRITE(reg, val);
-		POSTING_READ(reg);
+		intel_de_write(dev_priv, reg, val);
+		intel_de_posting_read(dev_priv, reg);
 		return;
 	}
 
 	if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
 		val |= VIDEO_DIP_ENABLE_GCP_HSW;
 
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
+	intel_de_write(dev_priv, reg, val);
+	intel_de_posting_read(dev_priv, reg);
 
 	intel_write_infoframe(encoder, crtc_state,
 			      HDMI_INFOFRAME_TYPE_AVI,
@@ -1260,10 +1274,9 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
 static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
 				unsigned int offset, void *buffer, size_t size)
 {
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
 	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv =
-		intel_dig_port->base.base.dev->dev_private;
-	struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+	struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
 							      hdmi->ddc_bus);
 	int ret;
 	u8 start = offset & 0xff;
@@ -1290,10 +1303,9 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
 static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
 				 unsigned int offset, void *buffer, size_t size)
 {
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
 	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv =
-		intel_dig_port->base.base.dev->dev_private;
-	struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+	struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
 							      hdmi->ddc_bus);
 	int ret;
 	u8 *write_buf;
@@ -1325,10 +1337,9 @@ static
 int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
 				  u8 *an)
 {
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
 	struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
-	struct drm_i915_private *dev_priv =
-		intel_dig_port->base.base.dev->dev_private;
-	struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+	struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
 							      hdmi->ddc_bus);
 	int ret;
 
@@ -1447,7 +1458,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
 static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
 	struct drm_crtc *crtc = connector->base.state->crtc;
 	struct intel_crtc *intel_crtc = container_of(crtc,
 						     struct intel_crtc, base);
@@ -1455,7 +1466,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
 	int ret;
 
 	for (;;) {
-		scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+		scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe));
 		if (scanline > 100 && scanline < 200)
 			break;
 		usleep_range(25, 50);
@@ -1507,8 +1518,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
 static
 bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
 {
-	struct drm_i915_private *dev_priv =
-		intel_dig_port->base.base.dev->dev_private;
+	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
 	struct intel_connector *connector =
 		intel_dig_port->hdmi.attached_connector;
 	enum port port = intel_dig_port->base.port;
@@ -1523,14 +1533,13 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
 	if (ret)
 		return false;
 
-	I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+	intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
 
 	/* Wait for Ri prime match */
-	if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+	if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
 		     (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
 		DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
-			  I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
-						port)));
+			  intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
 		return false;
 	}
 	return true;
@@ -1767,8 +1776,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
 	else
 		hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
 
-	I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
-	POSTING_READ(intel_hdmi->hdmi_reg);
+	intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
+	intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 }
 
 static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -1802,7 +1811,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
 
-	tmp = I915_READ(intel_hdmi->hdmi_reg);
+	tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
 		flags |= DRM_MODE_FLAG_PHSYNC;
@@ -1863,7 +1872,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
 {
 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
 
-	WARN_ON(!pipe_config->has_hdmi_sink);
+	drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
 	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
 			 pipe_name(crtc->pipe));
 	intel_audio_codec_enable(encoder, pipe_config, conn_state);
@@ -1878,14 +1887,14 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 temp;
 
-	temp = I915_READ(intel_hdmi->hdmi_reg);
+	temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	temp |= SDVO_ENABLE;
 	if (pipe_config->has_audio)
 		temp |= HDMI_AUDIO_ENABLE;
 
-	I915_WRITE(intel_hdmi->hdmi_reg, temp);
-	POSTING_READ(intel_hdmi->hdmi_reg);
+	intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+	intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	if (pipe_config->has_audio)
 		intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
@@ -1900,7 +1909,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	u32 temp;
 
-	temp = I915_READ(intel_hdmi->hdmi_reg);
+	temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	temp |= SDVO_ENABLE;
 	if (pipe_config->has_audio)
@@ -1910,10 +1919,10 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
 	 * HW workaround, need to write this twice for issue
 	 * that may result in first write getting masked.
 	 */
-	I915_WRITE(intel_hdmi->hdmi_reg, temp);
-	POSTING_READ(intel_hdmi->hdmi_reg);
-	I915_WRITE(intel_hdmi->hdmi_reg, temp);
-	POSTING_READ(intel_hdmi->hdmi_reg);
+	intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+	intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+	intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+	intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	/*
 	 * HW workaround, need to toggle enable bit off and on
@@ -1924,17 +1933,18 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
 	 */
 	if (pipe_config->pipe_bpp > 24 &&
 	    pipe_config->pixel_multiplier > 1) {
-		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
-		POSTING_READ(intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+		               temp & ~SDVO_ENABLE);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 		/*
 		 * HW workaround, need to write this twice for issue
 		 * that may result in first write getting masked.
 		 */
-		I915_WRITE(intel_hdmi->hdmi_reg, temp);
-		POSTING_READ(intel_hdmi->hdmi_reg);
-		I915_WRITE(intel_hdmi->hdmi_reg, temp);
-		POSTING_READ(intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 	}
 
 	if (pipe_config->has_audio)
@@ -1952,7 +1962,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 	enum pipe pipe = crtc->pipe;
 	u32 temp;
 
-	temp = I915_READ(intel_hdmi->hdmi_reg);
+	temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	temp |= SDVO_ENABLE;
 	if (pipe_config->has_audio)
@@ -1969,27 +1979,25 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
 	 */
 
 	if (pipe_config->pipe_bpp > 24) {
-		I915_WRITE(TRANS_CHICKEN1(pipe),
-			   I915_READ(TRANS_CHICKEN1(pipe)) |
-			   TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+		intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+		               intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
 
 		temp &= ~SDVO_COLOR_FORMAT_MASK;
 		temp |= SDVO_COLOR_FORMAT_8bpc;
 	}
 
-	I915_WRITE(intel_hdmi->hdmi_reg, temp);
-	POSTING_READ(intel_hdmi->hdmi_reg);
+	intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+	intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	if (pipe_config->pipe_bpp > 24) {
 		temp &= ~SDVO_COLOR_FORMAT_MASK;
 		temp |= HDMI_COLOR_FORMAT_12bpc;
 
-		I915_WRITE(intel_hdmi->hdmi_reg, temp);
-		POSTING_READ(intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
-		I915_WRITE(TRANS_CHICKEN1(pipe),
-			   I915_READ(TRANS_CHICKEN1(pipe)) &
-			   ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+		intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+		               intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
 	}
 
 	if (pipe_config->has_audio)
@@ -2014,11 +2022,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
 	u32 temp;
 
-	temp = I915_READ(intel_hdmi->hdmi_reg);
+	temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
-	I915_WRITE(intel_hdmi->hdmi_reg, temp);
-	POSTING_READ(intel_hdmi->hdmi_reg);
+	intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+	intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 	/*
 	 * HW workaround for IBX, we need to move the port
@@ -2039,14 +2047,14 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
 		 * HW workaround, need to write this twice for issue
 		 * that may result in first write getting masked.
 		 */
-		I915_WRITE(intel_hdmi->hdmi_reg, temp);
-		POSTING_READ(intel_hdmi->hdmi_reg);
-		I915_WRITE(intel_hdmi->hdmi_reg, temp);
-		POSTING_READ(intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 		temp &= ~SDVO_ENABLE;
-		I915_WRITE(intel_hdmi->hdmi_reg, temp);
-		POSTING_READ(intel_hdmi->hdmi_reg);
+		intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+		intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
 
 		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
 		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -2090,9 +2098,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
 static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[encoder->port];
-	int max_tmds_clock;
+	int max_tmds_clock, vbt_max_tmds_clock;
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		max_tmds_clock = 594000;
@@ -2103,15 +2109,23 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
 	else
 		max_tmds_clock = 165000;
 
-	if (info->max_tmds_clock)
-		max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+	vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+	if (vbt_max_tmds_clock)
+		max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
 
 	return max_tmds_clock;
 }
 
+static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
+				const struct drm_connector_state *conn_state)
+{
+	return hdmi->has_hdmi_sink &&
+		READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
 static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
 				 bool respect_downstream_limits,
-				 bool force_dvi)
+				 bool has_hdmi_sink)
 {
 	struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
 	int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
@@ -2127,7 +2141,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
 		if (info->max_tmds_clock)
 			max_tmds_clock = min(max_tmds_clock,
 					     info->max_tmds_clock);
-		else if (!hdmi->has_hdmi_sink || force_dvi)
+		else if (!has_hdmi_sink)
 			max_tmds_clock = min(max_tmds_clock, 165000);
 	}
 
@@ -2137,13 +2151,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
 static enum drm_mode_status
 hdmi_port_clock_valid(struct intel_hdmi *hdmi,
 		      int clock, bool respect_downstream_limits,
-		      bool force_dvi)
+		      bool has_hdmi_sink)
 {
 	struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
 
 	if (clock < 25000)
 		return MODE_CLOCK_LOW;
-	if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
+	if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits,
+					  has_hdmi_sink))
 		return MODE_CLOCK_HIGH;
 
 	/* BXT DPLL can't generate 223-240 MHz */
@@ -2165,16 +2180,13 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	enum drm_mode_status status;
-	int clock;
+	int clock = mode->clock;
 	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-	bool force_dvi =
-		READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+	bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
-	clock = mode->clock;
-
 	if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
 		clock *= 2;
 
@@ -2188,18 +2200,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 		clock /= 2;
 
 	/* check if we can do 8bpc */
-	status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
+	status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
 
-	if (hdmi->has_hdmi_sink && !force_dvi) {
+	if (has_hdmi_sink) {
 		/* if we can't do 8bpc we may still be able to do 12bpc */
 		if (status != MODE_OK && !HAS_GMCH(dev_priv))
 			status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
-						       true, force_dvi);
+						       true, has_hdmi_sink);
 
 		/* if we can't do 8,12bpc we may still be able to do 10bpc */
 		if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
 			status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
-						       true, force_dvi);
+						       true, has_hdmi_sink);
 	}
 	if (status != MODE_OK)
 		return status;
@@ -2263,14 +2275,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
 		}
 	}
 
-	/* Display WA #1139: glk */
-	if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
-	    adjusted_mode->htotal > 5460)
-		return false;
-
-	/* Display Wa_1405510057:icl */
+	/* Display Wa_1405510057:icl,ehl */
 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
-	    bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+	    bpc == 10 && IS_GEN(dev_priv, 11) &&
 	    (adjusted_mode->crtc_hblank_end -
 	     adjusted_mode->crtc_hblank_start) % 8 == 2)
 		return false;
@@ -2315,7 +2322,7 @@ static int intel_hdmi_port_clock(int clock, int bpc)
 
 static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
 				  struct intel_crtc_state *crtc_state,
-				  int clock, bool force_dvi)
+				  int clock)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	int bpc;
@@ -2324,7 +2331,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
 		if (hdmi_deep_color_possible(crtc_state, bpc) &&
 		    hdmi_port_clock_valid(intel_hdmi,
 					  intel_hdmi_port_clock(clock, bpc),
-					  true, force_dvi) == MODE_OK)
+					  true, crtc_state->has_hdmi_sink) == MODE_OK)
 			return bpc;
 	}
 
@@ -2332,8 +2339,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
 }
 
 static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
-				    struct intel_crtc_state *crtc_state,
-				    bool force_dvi)
+				    struct intel_crtc_state *crtc_state)
 {
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 	const struct drm_display_mode *adjusted_mode =
@@ -2347,8 +2353,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 		clock /= 2;
 
-	bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
-				     clock, force_dvi);
+	bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
 
 	crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
 
@@ -2364,7 +2369,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
 		      bpc, crtc_state->pipe_bpp);
 
 	if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
-				  false, force_dvi) != MODE_OK) {
+				  false, crtc_state->has_hdmi_sink) != MODE_OK) {
 		DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
 			      crtc_state->port_clock);
 		return -EINVAL;
@@ -2412,14 +2417,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
 	struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
 	struct intel_digital_connector_state *intel_conn_state =
 		to_intel_digital_connector_state(conn_state);
-	bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 	int ret;
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return -EINVAL;
 
 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
-	pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
+	pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_hdmi,
+							 conn_state);
 
 	if (pipe_config->has_hdmi_sink)
 		pipe_config->has_infoframe = true;
@@ -2448,7 +2453,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
 				intel_conn_state->force_audio == HDMI_AUDIO_ON;
 	}
 
-	ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+	ret = intel_hdmi_compute_clock(encoder, pipe_config);
 	if (ret)
 		return ret;
 
@@ -2808,7 +2813,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
 	if (ret)
 		return ret;
 
-	i915_debugfs_connector_add(connector);
+	intel_connector_debugfs_add(connector);
 
 	intel_hdmi_create_i2c_symlink(connector);
 
@@ -3002,7 +3007,7 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
 	else if (intel_phy_is_tc(dev_priv, phy))
 		return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
 
-	WARN(1, "Unknown port:%c\n", port_name(port));
+	drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
 	return GMBUS_PIN_2_BXT;
 }
 
@@ -3052,17 +3057,17 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
 	return ddc_pin;
 }
 
-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
-			     enum port port)
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
 {
-	const struct ddi_vbt_port_info *info =
-		&dev_priv->vbt.ddi_port_info[port];
+	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+	enum port port = encoder->port;
 	u8 ddc_pin;
 
-	if (info->alternate_ddc_pin) {
+	ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+	if (ddc_pin) {
 		DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
-			      info->alternate_ddc_pin, port_name(port));
-		return info->alternate_ddc_pin;
+			      ddc_pin, port_name(port));
+		return ddc_pin;
 	}
 
 	if (HAS_PCH_MCC(dev_priv))
@@ -3139,16 +3144,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
 		      intel_encoder->base.base.id, intel_encoder->base.name);
 
-	if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+	if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
 		return;
 
-	if (WARN(intel_dig_port->max_lanes < 4,
-		 "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
-		 intel_dig_port->max_lanes, intel_encoder->base.base.id,
-		 intel_encoder->base.name))
+	if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+		     "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+		     intel_dig_port->max_lanes, intel_encoder->base.base.id,
+		     intel_encoder->base.name))
 		return;
 
-	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+	intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
 	ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
 
 	drm_connector_init_with_ddc(dev, connector,
@@ -3165,6 +3170,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 		connector->ycbcr_420_allowed = true;
 
 	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+	intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	if (HAS_DDI(dev_priv))
 		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -3188,8 +3194,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 	 * generated on the port when a cable is not attached.
 	 */
 	if (IS_G45(dev_priv)) {
-		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
-		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+		u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+		intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+		               (temp & ~0xf) | 0xd);
 	}
 
 	cec_fill_conn_info_from_drm(&conn_info, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d3659d0..8ff1f76 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,8 +9,6 @@
 #include <linux/hdmi.h>
 #include <linux/types.h>
 
-#include <drm/i915_drm.h>
-
 #include "i915_reg.h"
 
 struct drm_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 99d3a3c..a091442 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -23,8 +23,6 @@
 
 #include <linux/kernel.h>
 
-#include <drm/i915_drm.h>
-
 #include "i915_drv.h"
 #include "intel_display_types.h"
 #include "intel_hotplug.h"
@@ -89,29 +87,16 @@
 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
 				   enum port port)
 {
-	switch (port) {
-	case PORT_A:
-		return HPD_PORT_A;
-	case PORT_B:
-		return HPD_PORT_B;
-	case PORT_C:
-		return HPD_PORT_C;
-	case PORT_D:
-		return HPD_PORT_D;
-	case PORT_E:
-		return HPD_PORT_E;
-	case PORT_F:
-		if (IS_CNL_WITH_PORT_F(dev_priv))
-			return HPD_PORT_E;
-		return HPD_PORT_F;
-	case PORT_G:
-		return HPD_PORT_G;
-	case PORT_H:
-		return HPD_PORT_H;
-	case PORT_I:
-		return HPD_PORT_I;
+	enum phy phy = intel_port_to_phy(dev_priv, port);
+
+	switch (phy) {
+	case PHY_F:
+		return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
+	case PHY_A ... PHY_E:
+	case PHY_G ... PHY_I:
+		return HPD_PORT_A + phy - PHY_A;
 	default:
-		MISSING_CASE(port);
+		MISSING_CASE(phy);
 		return HPD_NONE;
 	}
 }
@@ -120,6 +105,20 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
 #define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
 #define HPD_RETRY_DELAY			1000
 
+static enum hpd_pin
+intel_connector_hpd_pin(struct intel_connector *connector)
+{
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+	/*
+	 * MST connectors get their encoder attached dynamically
+	 * so need to make sure we have an encoder here. But since
+	 * MST encoders have their hpd_pin set to HPD_NONE we don't
+	 * have to special case them beyond that.
+	 */
+	return encoder ? encoder->hpd_pin : HPD_NONE;
+}
+
 /**
  * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
  * @dev_priv: private driver data pointer
@@ -171,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 	hpd->stats[pin].count += increment;
 	if (hpd->stats[pin].count > threshold) {
 		hpd->stats[pin].state = HPD_MARK_DISABLED;
-		DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+		drm_dbg_kms(&dev_priv->drm,
+			    "HPD interrupt storm detected on PIN %d\n", pin);
 		storm = true;
 	} else {
-		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+		drm_dbg_kms(&dev_priv->drm,
+			    "Received HPD interrupt on PIN %d - cnt: %d\n",
+			      pin,
 			      hpd->stats[pin].count);
 	}
 
@@ -185,37 +187,32 @@ static void
 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_connector *intel_connector;
-	struct intel_encoder *intel_encoder;
-	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
-	enum hpd_pin pin;
+	struct intel_connector *connector;
 	bool hpd_disabled = false;
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
 	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		enum hpd_pin pin;
+
+		if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
 			continue;
 
-		intel_connector = to_intel_connector(connector);
-		intel_encoder = intel_connector->encoder;
-		if (!intel_encoder)
-			continue;
-
-		pin = intel_encoder->hpd_pin;
+		pin = intel_connector_hpd_pin(connector);
 		if (pin == HPD_NONE ||
 		    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
 			continue;
 
-		DRM_INFO("HPD interrupt storm detected on connector %s: "
+		drm_info(&dev_priv->drm,
+			 "HPD interrupt storm detected on connector %s: "
 			 "switching from hotplug detection to polling\n",
-			 connector->name);
+			 connector->base.name);
 
 		dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
-		connector->polled = DRM_CONNECTOR_POLL_CONNECT
-			| DRM_CONNECTOR_POLL_DISCONNECT;
+		connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+			DRM_CONNECTOR_POLL_DISCONNECT;
 		hpd_disabled = true;
 	}
 	drm_connector_list_iter_end(&conn_iter);
@@ -234,40 +231,38 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
 		container_of(work, typeof(*dev_priv),
 			     hotplug.reenable_work.work);
 	struct drm_device *dev = &dev_priv->drm;
+	struct drm_connector_list_iter conn_iter;
+	struct intel_connector *connector;
 	intel_wakeref_t wakeref;
 	enum hpd_pin pin;
 
 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 
 	spin_lock_irq(&dev_priv->irq_lock);
-	for_each_hpd_pin(pin) {
-		struct drm_connector *connector;
-		struct drm_connector_list_iter conn_iter;
 
-		if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		pin = intel_connector_hpd_pin(connector);
+		if (pin == HPD_NONE ||
+		    dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
 			continue;
 
-		dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-
-		drm_connector_list_iter_begin(dev, &conn_iter);
-		drm_for_each_connector_iter(connector, &conn_iter) {
-			struct intel_connector *intel_connector = to_intel_connector(connector);
-
-			/* Don't check MST ports, they don't have pins */
-			if (!intel_connector->mst_port &&
-			    intel_connector->encoder->hpd_pin == pin) {
-				if (connector->polled != intel_connector->polled)
-					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-							 connector->name);
-				connector->polled = intel_connector->polled;
-				if (!connector->polled)
-					connector->polled = DRM_CONNECTOR_POLL_HPD;
-			}
-		}
-		drm_connector_list_iter_end(&conn_iter);
+		if (connector->base.polled != connector->polled)
+			drm_dbg(&dev_priv->drm,
+				"Reenabling HPD on connector %s\n",
+				connector->base.name);
+		connector->base.polled = connector->polled;
 	}
+	drm_connector_list_iter_end(&conn_iter);
+
+	for_each_hpd_pin(pin) {
+		if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
+			dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+	}
+
 	if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
 		dev_priv->display.hpd_irq_setup(dev_priv);
+
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -281,7 +276,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
 	struct drm_device *dev = connector->base.dev;
 	enum drm_connector_status old_status;
 
-	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
 	old_status = connector->base.status;
 
 	connector->base.status =
@@ -290,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
 	if (old_status == connector->base.status)
 		return INTEL_HOTPLUG_UNCHANGED;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-		      connector->base.base.id,
-		      connector->base.name,
-		      drm_get_connector_status_name(old_status),
-		      drm_get_connector_status_name(connector->base.status));
+	drm_dbg_kms(&to_i915(dev)->drm,
+		    "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+		    connector->base.base.id,
+		    connector->base.name,
+		    drm_get_connector_status_name(old_status),
+		    drm_get_connector_status_name(connector->base.status));
 
 	return INTEL_HOTPLUG_CHANGED;
 }
@@ -361,16 +357,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
 		container_of(work, struct drm_i915_private,
 			     hotplug.hotplug_work.work);
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_connector *intel_connector;
-	struct intel_encoder *intel_encoder;
-	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
+	struct intel_connector *connector;
 	u32 changed = 0, retry = 0;
 	u32 hpd_event_bits;
 	u32 hpd_retry_bits;
 
 	mutex_lock(&dev->mode_config.mutex);
-	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+	drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
 
 	spin_lock_irq(&dev_priv->irq_lock);
 
@@ -385,21 +379,25 @@ static void i915_hotplug_work_func(struct work_struct *work)
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		enum hpd_pin pin;
 		u32 hpd_bit;
 
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
+		pin = intel_connector_hpd_pin(connector);
+		if (pin == HPD_NONE)
 			continue;
-		intel_encoder = intel_connector->encoder;
-		hpd_bit = BIT(intel_encoder->hpd_pin);
-		if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
-			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-				      connector->name, intel_encoder->hpd_pin);
 
-			switch (intel_encoder->hotplug(intel_encoder,
-						       intel_connector,
-						       hpd_event_bits & hpd_bit)) {
+		hpd_bit = BIT(pin);
+		if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
+			struct intel_encoder *encoder =
+				intel_attached_encoder(connector);
+
+			drm_dbg_kms(&dev_priv->drm,
+				    "Connector %s (pin %i) received hotplug event.\n",
+				    connector->base.name, pin);
+
+			switch (encoder->hotplug(encoder, connector,
+						 hpd_event_bits & hpd_bit)) {
 			case INTEL_HOTPLUG_UNCHANGED:
 				break;
 			case INTEL_HOTPLUG_CHANGED:
@@ -481,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 
 		long_hpd = long_mask & BIT(pin);
 
-		DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
-				 encoder->base.base.id, encoder->base.name,
-				 long_hpd ? "long" : "short");
+		drm_dbg(&dev_priv->drm,
+			"digital hpd on [ENCODER:%d:%s] - %s\n",
+			encoder->base.base.id, encoder->base.name,
+			long_hpd ? "long" : "short");
 		queue_dig = true;
 
 		if (long_hpd) {
@@ -509,8 +508,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
 			 * hotplug bits itself. So only WARN about unexpected
 			 * interrupts on saner platforms.
 			 */
-			WARN_ONCE(!HAS_GMCH(dev_priv),
-				  "Received HPD interrupt on pin %d although disabled\n", pin);
+			drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+				      "Received HPD interrupt on pin %d although disabled\n",
+				      pin);
 			continue;
 		}
 
@@ -601,8 +601,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
 		container_of(work, struct drm_i915_private,
 			     hotplug.poll_init_work);
 	struct drm_device *dev = &dev_priv->drm;
-	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
+	struct intel_connector *connector;
 	bool enabled;
 
 	mutex_lock(&dev->mode_config.mutex);
@@ -610,23 +610,18 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
 	enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
 
 	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		struct intel_connector *intel_connector =
-			to_intel_connector(connector);
-		connector->polled = intel_connector->polled;
+	for_each_intel_connector_iter(connector, &conn_iter) {
+		enum hpd_pin pin;
 
-		/* MST has a dynamic intel_connector->encoder and it's reprobing
-		 * is all handled by the MST helpers. */
-		if (intel_connector->mst_port)
+		pin = intel_connector_hpd_pin(connector);
+		if (pin == HPD_NONE)
 			continue;
 
-		if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
-		    intel_connector->encoder->hpd_pin > HPD_NONE) {
-			connector->polled = enabled ?
-				DRM_CONNECTOR_POLL_CONNECT |
-				DRM_CONNECTOR_POLL_DISCONNECT :
-				DRM_CONNECTOR_POLL_HPD;
-		}
+		connector->base.polled = connector->polled;
+
+		if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
+			connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+				DRM_CONNECTOR_POLL_DISCONNECT;
 	}
 	drm_connector_list_iter_end(&conn_iter);
 
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 087b5f5..1e6b4fd 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,8 +8,6 @@
 
 #include <linux/types.h>
 
-#include <drm/i915_drm.h>
-
 struct drm_i915_private;
 struct intel_connector;
 struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 0b67f788..ad5cc13 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -71,6 +71,7 @@
 #include <drm/intel_lpe_audio.h>
 
 #include "i915_drv.h"
+#include "intel_de.h"
 #include "intel_lpe_audio.h"
 
 #define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
@@ -126,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
 	kfree(pdata);
 
 	if (IS_ERR(platdev)) {
-		DRM_ERROR("Failed to allocate LPE audio platform device\n");
+		drm_err(&dev_priv->drm,
+			"Failed to allocate LPE audio platform device\n");
 		return platdev;
 	}
 
@@ -166,7 +168,7 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
 {
 	int irq = dev_priv->lpe_audio.irq;
 
-	WARN_ON(!intel_irqs_enabled(dev_priv));
+	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 	irq_set_chip_and_handler_name(irq,
 				&lpe_audio_irqchip,
 				handle_simple_irq,
@@ -189,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
 		};
 
 		if (!pci_dev_present(atom_hdaudio_ids)) {
-			DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+			drm_info(&dev_priv->drm,
+				 "HDaudio controller not detected, using LPE audio instead\n");
 			lpe_present = true;
 		}
 	}
@@ -202,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
 
 	dev_priv->lpe_audio.irq = irq_alloc_desc(0);
 	if (dev_priv->lpe_audio.irq < 0) {
-		DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+		drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
 			dev_priv->lpe_audio.irq);
 		ret = dev_priv->lpe_audio.irq;
 		goto err;
 	}
 
-	DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+	drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
 
 	ret = lpe_audio_irq_init(dev_priv);
 
 	if (ret) {
-		DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+		drm_err(&dev_priv->drm,
+			"Failed to initialize irqchip for lpe audio: %d\n",
 			ret);
 		goto err_free_irq;
 	}
@@ -222,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
 
 	if (IS_ERR(dev_priv->lpe_audio.platdev)) {
 		ret = PTR_ERR(dev_priv->lpe_audio.platdev);
-		DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+		drm_err(&dev_priv->drm,
+			"Failed to create lpe audio platform device: %d\n",
 			ret);
 		goto err_free_irq;
 	}
@@ -230,7 +235,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
 	/* enable chicken bit; at least this is required for Dell Wyse 3040
 	 * with DP outputs (but only sometimes by some reason!)
 	 */
-	I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE);
+	intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+		       VLV_CHICKEN_BIT_DBG_ENABLE);
 
 	return 0;
 err_free_irq:
@@ -257,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
 
 	ret = generic_handle_irq(dev_priv->lpe_audio.irq);
 	if (ret)
-		DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
-				ret);
+		drm_err_ratelimited(&dev_priv->drm,
+				    "error handling LPE audio irq: %d\n", ret);
 }
 
 /**
@@ -276,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
 	if (lpe_audio_detect(dev_priv)) {
 		ret = lpe_audio_setup(dev_priv);
 		if (ret < 0)
-			DRM_ERROR("failed to setup LPE Audio bridge\n");
+			drm_err(&dev_priv->drm,
+				"failed to setup LPE Audio bridge\n");
 	}
 	return ret;
 }
@@ -334,7 +341,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
 
 	spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
 
-	audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
+	audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
 
 	if (eld != NULL) {
 		memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
@@ -343,8 +350,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
 		ppdata->dp_output = dp_output;
 
 		/* Unmute the amp for both DP and HDMI */
-		I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
-			   audio_enable & ~VLV_AMP_MUTE);
+		intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+			       audio_enable & ~VLV_AMP_MUTE);
 	} else {
 		memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
 		ppdata->pipe = -1;
@@ -352,8 +359,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
 		ppdata->dp_output = false;
 
 		/* Mute the amp for both DP and HDMI */
-		I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
-			   audio_enable | VLV_AMP_MUTE);
+		intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+			       audio_enable | VLV_AMP_MUTE);
 	}
 
 	if (pdata->notify_audio_lpe)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 10696bb..9a067ef 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,7 +37,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_atomic.h"
@@ -85,7 +84,7 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(lvds_reg);
+	val = intel_de_read(dev_priv, lvds_reg);
 
 	/* asserts want to know the pipe even if the port is disabled */
 	if (HAS_PCH_CPT(dev_priv))
@@ -125,7 +124,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
 
-	tmp = I915_READ(lvds_encoder->reg);
+	tmp = intel_de_read(dev_priv, lvds_encoder->reg);
 	if (tmp & LVDS_HSYNC_POLARITY)
 		flags |= DRM_MODE_FLAG_NHSYNC;
 	else
@@ -143,7 +142,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
 	/* gen2/3 store dither state in pfit control, needs to match */
 	if (INTEL_GEN(dev_priv) < 4) {
-		tmp = I915_READ(PFIT_CONTROL);
+		tmp = intel_de_read(dev_priv, PFIT_CONTROL);
 
 		pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
 	}
@@ -156,18 +155,18 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+	pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET;
 
-	val = I915_READ(PP_ON_DELAYS(0));
+	val = intel_de_read(dev_priv, PP_ON_DELAYS(0));
 	pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
 	pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
 	pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
 
-	val = I915_READ(PP_OFF_DELAYS(0));
+	val = intel_de_read(dev_priv, PP_OFF_DELAYS(0));
 	pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
 	pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
 
-	val = I915_READ(PP_DIVISOR(0));
+	val = intel_de_read(dev_priv, PP_DIVISOR(0));
 	pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
 	val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
 	/*
@@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
 
 	if (INTEL_GEN(dev_priv) <= 4 &&
 	    pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
-		DRM_DEBUG_KMS("Panel power timings uninitialized, "
-			      "setting defaults\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Panel power timings uninitialized, "
+			    "setting defaults\n");
 		/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
 		pps->t1_t2 = 40 * 10;
 		pps->t5 = 200 * 10;
@@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
 		pps->tx = 200 * 10;
 	}
 
-	DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
-			 "divider %d port %d powerdown_on_reset %d\n",
-			 pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
-			 pps->divider, pps->port, pps->powerdown_on_reset);
+	drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+		"divider %d port %d powerdown_on_reset %d\n",
+		pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+		pps->divider, pps->port, pps->powerdown_on_reset);
 }
 
 static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -203,25 +203,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(PP_CONTROL(0));
-	WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+	val = intel_de_read(dev_priv, PP_CONTROL(0));
+	drm_WARN_ON(&dev_priv->drm,
+		    (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
 	if (pps->powerdown_on_reset)
 		val |= PANEL_POWER_RESET;
-	I915_WRITE(PP_CONTROL(0), val);
+	intel_de_write(dev_priv, PP_CONTROL(0), val);
 
-	I915_WRITE(PP_ON_DELAYS(0),
-		   REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
-		   REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
-		   REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+	intel_de_write(dev_priv, PP_ON_DELAYS(0),
+		       REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
 
-	I915_WRITE(PP_OFF_DELAYS(0),
-		   REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
-		   REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+	intel_de_write(dev_priv, PP_OFF_DELAYS(0),
+		       REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
 
-	I915_WRITE(PP_DIVISOR(0),
-		   REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
-		   REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
-				  DIV_ROUND_UP(pps->t4, 1000) + 1));
+	intel_de_write(dev_priv, PP_DIVISOR(0),
+		       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
 }
 
 static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -299,7 +295,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
 		temp |= LVDS_VSYNC_POLARITY;
 
-	I915_WRITE(lvds_encoder->reg, temp);
+	intel_de_write(dev_priv, lvds_encoder->reg, temp);
 }
 
 /*
@@ -313,13 +309,16 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+	intel_de_write(dev_priv, lvds_encoder->reg,
+		       intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
 
-	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
-	POSTING_READ(lvds_encoder->reg);
+	intel_de_write(dev_priv, PP_CONTROL(0),
+		       intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+	intel_de_posting_read(dev_priv, lvds_encoder->reg);
 
 	if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
-		DRM_ERROR("timed out waiting for panel to power on\n");
+		drm_err(&dev_priv->drm,
+			"timed out waiting for panel to power on\n");
 
 	intel_panel_enable_backlight(pipe_config, conn_state);
 }
@@ -331,12 +330,15 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+	intel_de_write(dev_priv, PP_CONTROL(0),
+		       intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
 	if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
-		DRM_ERROR("timed out waiting for panel to power off\n");
+		drm_err(&dev_priv->drm,
+			"timed out waiting for panel to power off\n");
 
-	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
-	POSTING_READ(lvds_encoder->reg);
+	intel_de_write(dev_priv, lvds_encoder->reg,
+		       intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+	intel_de_posting_read(dev_priv, lvds_encoder->reg);
 }
 
 static void gmch_disable_lvds(struct intel_encoder *encoder,
@@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 
 	/* Should never happen!! */
 	if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
-		DRM_ERROR("Can't support LVDS on pipe A\n");
+		drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
 		return -EINVAL;
 	}
 
@@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 		lvds_bpp = 6*3;
 
 	if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
-		DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
-			      pipe_config->pipe_bpp, lvds_bpp);
+		drm_dbg_kms(&dev_priv->drm,
+			    "forcing display bpp (was %d) to LVDS (%d)\n",
+			    pipe_config->pipe_bpp, lvds_bpp);
 		pipe_config->pipe_bpp = lvds_bpp;
 	}
 
@@ -791,7 +794,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
 	 * we need to check "the value to be set" in VBT when LVDS
 	 * register is uninitialized.
 	 */
-	val = I915_READ(lvds_encoder->reg);
+	val = intel_de_read(dev_priv, lvds_encoder->reg);
 	if (HAS_PCH_CPT(dev_priv))
 		val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
 	else
@@ -827,13 +830,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 
 	/* Skip init on machines we know falsely report LVDS */
 	if (dmi_check_system(intel_no_lvds)) {
-		WARN(!dev_priv->vbt.int_lvds_support,
-		     "Useless DMI match. Internal LVDS support disabled by VBT\n");
+		drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+			 "Useless DMI match. Internal LVDS support disabled by VBT\n");
 		return;
 	}
 
 	if (!dev_priv->vbt.int_lvds_support) {
-		DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Internal LVDS support disabled by VBT\n");
 		return;
 	}
 
@@ -842,7 +846,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	else
 		lvds_reg = LVDS;
 
-	lvds = I915_READ(lvds_reg);
+	lvds = intel_de_read(dev_priv, lvds_reg);
 
 	if (HAS_PCH_SPLIT(dev_priv)) {
 		if ((lvds & LVDS_DETECTED) == 0)
@@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	pin = GMBUS_PIN_PANEL;
 	if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
 		if ((lvds & LVDS_PORT_EN) == 0) {
-			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "LVDS is not present in VBT\n");
 			return;
 		}
-		DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "LVDS is not present in VBT, but enabled anyway\n");
 	}
 
 	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
@@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	 */
 	fixed_mode = intel_encoder_current_mode(intel_encoder);
 	if (fixed_mode) {
-		DRM_DEBUG_KMS("using current (BIOS) mode: ");
+		drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
 		drm_mode_debug_printmodeline(fixed_mode);
 		fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 	}
@@ -985,8 +991,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
 	lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
-	DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
-		      lvds_encoder->is_dual_link ? "dual" : "single");
+	drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+		    lvds_encoder->is_dual_link ? "dual" : "single");
 
 	lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
 
@@ -995,7 +1001,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
 failed:
 	mutex_unlock(&dev->mode_config.mutex);
 
-	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+	drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
 	drm_connector_cleanup(connector);
 	drm_encoder_cleanup(encoder);
 	kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e59b499..cc6b009 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,11 +30,10 @@
 #include <linux/firmware.h>
 #include <acpi/video.h>
 
-#include <drm/i915_drm.h>
-
 #include "display/intel_panel.h"
 
 #include "i915_drv.h"
+#include "intel_acpi.h"
 #include "intel_display_types.h"
 #include "intel_opregion.h"
 
@@ -242,29 +241,6 @@ struct opregion_asle_ext {
 #define SWSCI_SBCB_POST_VBE_PM		SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
 #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
 
-/*
- * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
- * Attached to the Display Adapter).
- */
-#define ACPI_DISPLAY_INDEX_SHIFT		0
-#define ACPI_DISPLAY_INDEX_MASK			(0xf << 0)
-#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT	4
-#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK	(0xf << 4)
-#define ACPI_DISPLAY_TYPE_SHIFT			8
-#define ACPI_DISPLAY_TYPE_MASK			(0xf << 8)
-#define ACPI_DISPLAY_TYPE_OTHER			(0 << 8)
-#define ACPI_DISPLAY_TYPE_VGA			(1 << 8)
-#define ACPI_DISPLAY_TYPE_TV			(2 << 8)
-#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL	(3 << 8)
-#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL	(4 << 8)
-#define ACPI_VENDOR_SPECIFIC_SHIFT		12
-#define ACPI_VENDOR_SPECIFIC_MASK		(0xf << 12)
-#define ACPI_BIOS_CAN_DETECT			(1 << 16)
-#define ACPI_DEPENDS_ON_VGA			(1 << 17)
-#define ACPI_PIPE_ID_SHIFT			18
-#define ACPI_PIPE_ID_MASK			(7 << 18)
-#define ACPI_DEVICE_ID_SCHEME			(1 << 31)
-
 #define MAX_DSLP	1500
 
 static int swsci(struct drm_i915_private *dev_priv,
@@ -311,7 +287,7 @@ static int swsci(struct drm_i915_private *dev_priv,
 	/* The spec tells us to do this, but we are the only user... */
 	scic = swsci->scic;
 	if (scic & SWSCI_SCIC_INDICATOR) {
-		DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+		drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
 		return -EBUSY;
 	}
 
@@ -335,7 +311,7 @@ static int swsci(struct drm_i915_private *dev_priv,
 	/* Poll for the result. */
 #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
 	if (wait_for(C, dslp)) {
-		DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+		drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
 		return -ETIMEDOUT;
 	}
 
@@ -344,7 +320,7 @@ static int swsci(struct drm_i915_private *dev_priv,
 
 	/* Note: scic == 0 is an error! */
 	if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
-		DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+		drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
 		return -EIO;
 	}
 
@@ -403,8 +379,9 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
 		type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
 		break;
 	default:
-		WARN_ONCE(1, "unsupported intel_encoder type %d\n",
-			  intel_encoder->type);
+		drm_WARN_ONCE(&dev_priv->drm, 1,
+			      "unsupported intel_encoder type %d\n",
+			      intel_encoder->type);
 		return -EINVAL;
 	}
 
@@ -448,10 +425,11 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct drm_device *dev = &dev_priv->drm;
 
-	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+	drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
 
 	if (acpi_video_get_backlight_type() == acpi_backlight_native) {
-		DRM_DEBUG_KMS("opregion backlight request ignored\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "opregion backlight request ignored\n");
 		return 0;
 	}
 
@@ -468,7 +446,8 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
 	 * Update backlight on all connectors that support backlight (usually
 	 * only one).
 	 */
-	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+	drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+		    bclp);
 	drm_connector_list_iter_begin(dev, &conn_iter);
 	for_each_intel_connector_iter(connector, &conn_iter)
 		intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
@@ -485,13 +464,13 @@ static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
 {
 	/* alsi is the current ALS reading in lux. 0 indicates below sensor
 	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
-	DRM_DEBUG_DRIVER("Illum is not supported\n");
+	drm_dbg(&dev_priv->drm, "Illum is not supported\n");
 	return ASLC_ALS_ILLUM_FAILED;
 }
 
 static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
 {
-	DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+	drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
 	return ASLC_PWM_FREQ_FAILED;
 }
 
@@ -499,30 +478,36 @@ static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
 {
 	/* Panel fitting is currently controlled by the X code, so this is a
 	   noop until modesetting support works fully */
-	DRM_DEBUG_DRIVER("Pfit is not supported\n");
+	drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
 	return ASLC_PFIT_FAILED;
 }
 
 static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
 {
-	DRM_DEBUG_DRIVER("SROT is not supported\n");
+	drm_dbg(&dev_priv->drm, "SROT is not supported\n");
 	return ASLC_ROTATION_ANGLES_FAILED;
 }
 
 static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
 {
 	if (!iuer)
-		DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+		drm_dbg(&dev_priv->drm,
+			"Button array event is not supported (nothing)\n");
 	if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
-		DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+		drm_dbg(&dev_priv->drm,
+			"Button array event is not supported (rotation lock)\n");
 	if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
-		DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+		drm_dbg(&dev_priv->drm,
+			"Button array event is not supported (volume down)\n");
 	if (iuer & ASLE_IUER_VOLUME_UP_BTN)
-		DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+		drm_dbg(&dev_priv->drm,
+			"Button array event is not supported (volume up)\n");
 	if (iuer & ASLE_IUER_WINDOWS_BTN)
-		DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+		drm_dbg(&dev_priv->drm,
+			"Button array event is not supported (windows)\n");
 	if (iuer & ASLE_IUER_POWER_BTN)
-		DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+		drm_dbg(&dev_priv->drm,
+			"Button array event is not supported (power)\n");
 
 	return ASLC_BUTTON_ARRAY_FAILED;
 }
@@ -530,9 +515,11 @@ static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
 static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
 {
 	if (iuer & ASLE_IUER_CONVERTIBLE)
-		DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+		drm_dbg(&dev_priv->drm,
+			"Convertible is not supported (clamshell)\n");
 	else
-		DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+		drm_dbg(&dev_priv->drm,
+			"Convertible is not supported (slate)\n");
 
 	return ASLC_CONVERTIBLE_FAILED;
 }
@@ -540,16 +527,17 @@ static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
 static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
 {
 	if (iuer & ASLE_IUER_DOCKING)
-		DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+		drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
 	else
-		DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+		drm_dbg(&dev_priv->drm,
+			"Docking is not supported (undocked)\n");
 
 	return ASLC_DOCKING_FAILED;
 }
 
 static u32 asle_isct_state(struct drm_i915_private *dev_priv)
 {
-	DRM_DEBUG_DRIVER("ISCT is not supported\n");
+	drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
 	return ASLC_ISCT_STATE_FAILED;
 }
 
@@ -569,8 +557,8 @@ static void asle_work(struct work_struct *work)
 	aslc_req = asle->aslc;
 
 	if (!(aslc_req & ASLC_REQ_MSK)) {
-		DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
-				 aslc_req);
+		drm_dbg(&dev_priv->drm,
+			"No request on ASLC interrupt 0x%08x\n", aslc_req);
 		return;
 	}
 
@@ -662,54 +650,12 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
 	}
 }
 
-static u32 acpi_display_type(struct intel_connector *connector)
-{
-	u32 display_type;
-
-	switch (connector->base.connector_type) {
-	case DRM_MODE_CONNECTOR_VGA:
-	case DRM_MODE_CONNECTOR_DVIA:
-		display_type = ACPI_DISPLAY_TYPE_VGA;
-		break;
-	case DRM_MODE_CONNECTOR_Composite:
-	case DRM_MODE_CONNECTOR_SVIDEO:
-	case DRM_MODE_CONNECTOR_Component:
-	case DRM_MODE_CONNECTOR_9PinDIN:
-	case DRM_MODE_CONNECTOR_TV:
-		display_type = ACPI_DISPLAY_TYPE_TV;
-		break;
-	case DRM_MODE_CONNECTOR_DVII:
-	case DRM_MODE_CONNECTOR_DVID:
-	case DRM_MODE_CONNECTOR_DisplayPort:
-	case DRM_MODE_CONNECTOR_HDMIA:
-	case DRM_MODE_CONNECTOR_HDMIB:
-		display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
-		break;
-	case DRM_MODE_CONNECTOR_LVDS:
-	case DRM_MODE_CONNECTOR_eDP:
-	case DRM_MODE_CONNECTOR_DSI:
-		display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
-		break;
-	case DRM_MODE_CONNECTOR_Unknown:
-	case DRM_MODE_CONNECTOR_VIRTUAL:
-		display_type = ACPI_DISPLAY_TYPE_OTHER;
-		break;
-	default:
-		MISSING_CASE(connector->base.connector_type);
-		display_type = ACPI_DISPLAY_TYPE_OTHER;
-		break;
-	}
-
-	return display_type;
-}
-
 static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 {
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	struct intel_connector *connector;
 	struct drm_connector_list_iter conn_iter;
 	int i = 0, max_outputs;
-	int display_index[16] = {};
 
 	/*
 	 * In theory, did2, the extended didl, gets added at opregion version
@@ -721,29 +667,22 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
 		ARRAY_SIZE(opregion->acpi->did2);
 
+	intel_acpi_device_id_update(dev_priv);
+
 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
 	for_each_intel_connector_iter(connector, &conn_iter) {
-		u32 device_id, type;
-
-		device_id = acpi_display_type(connector);
-
-		/* Use display type specific display index. */
-		type = (device_id & ACPI_DISPLAY_TYPE_MASK)
-			>> ACPI_DISPLAY_TYPE_SHIFT;
-		device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
-
-		connector->acpi_device_id = device_id;
 		if (i < max_outputs)
-			set_did(opregion, i, device_id);
+			set_did(opregion, i, connector->acpi_device_id);
 		i++;
 	}
 	drm_connector_list_iter_end(&conn_iter);
 
-	DRM_DEBUG_KMS("%d outputs detected\n", i);
+	drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
 
 	if (i > max_outputs)
-		DRM_ERROR("More than %d outputs in connector list\n",
-			  max_outputs);
+		drm_err(&dev_priv->drm,
+			"More than %d outputs in connector list\n",
+			max_outputs);
 
 	/* If fewer than max outputs, the list must be null terminated */
 	if (i < max_outputs)
@@ -823,7 +762,9 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
 		if (requested_callbacks) {
 			u32 req = opregion->swsci_sbcb_sub_functions;
 			if ((req & tmp) != req)
-				DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+				drm_dbg(&dev_priv->drm,
+					"SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
+					req, tmp);
 			/* XXX: for now, trust the requested callbacks */
 			/* opregion->swsci_sbcb_sub_functions &= tmp; */
 		} else {
@@ -831,9 +772,10 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
 		}
 	}
 
-	DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
-			 opregion->swsci_gbda_sub_functions,
-			 opregion->swsci_sbcb_sub_functions);
+	drm_dbg(&dev_priv->drm,
+		"SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+		opregion->swsci_gbda_sub_functions,
+		opregion->swsci_sbcb_sub_functions);
 }
 
 static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -867,15 +809,17 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
 
 	ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
 	if (ret) {
-		DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
-			  name, ret);
+		drm_err(&dev_priv->drm,
+			"Requesting VBT firmware \"%s\" failed (%d)\n",
+			name, ret);
 		return ret;
 	}
 
 	if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
 		opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
 		if (opregion->vbt_firmware) {
-			DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
+			drm_dbg_kms(&dev_priv->drm,
+				    "Found valid VBT firmware \"%s\"\n", name);
 			opregion->vbt = opregion->vbt_firmware;
 			opregion->vbt_size = fw->size;
 			ret = 0;
@@ -883,7 +827,8 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
 			ret = -ENOMEM;
 		}
 	} else {
-		DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
+		drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
+			    name);
 		ret = -EINVAL;
 	}
 
@@ -910,9 +855,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 	BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
 
 	pci_read_config_dword(pdev, ASLS, &asls);
-	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+	drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+		asls);
 	if (asls == 0) {
-		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+		drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
 		return -ENOTSUPP;
 	}
 
@@ -925,21 +871,21 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 	memcpy(buf, base, sizeof(buf));
 
 	if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
-		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+		drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
 		err = -EINVAL;
 		goto err_out;
 	}
 	opregion->header = base;
 	opregion->lid_state = base + ACPI_CLID;
 
-	DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
-			 opregion->header->over.major,
-			 opregion->header->over.minor,
-			 opregion->header->over.revision);
+	drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+		opregion->header->over.major,
+		opregion->header->over.minor,
+		opregion->header->over.revision);
 
 	mboxes = opregion->header->mboxes;
 	if (mboxes & MBOX_ACPI) {
-		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+		drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
 		/*
 		 * Indicate we handle monitor hotplug events ourselves so we do
@@ -951,20 +897,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 	}
 
 	if (mboxes & MBOX_SWSCI) {
-		DRM_DEBUG_DRIVER("SWSCI supported\n");
+		drm_dbg(&dev_priv->drm, "SWSCI supported\n");
 		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
 		swsci_setup(dev_priv);
 	}
 
 	if (mboxes & MBOX_ASLE) {
-		DRM_DEBUG_DRIVER("ASLE supported\n");
+		drm_dbg(&dev_priv->drm, "ASLE supported\n");
 		opregion->asle = base + OPREGION_ASLE_OFFSET;
 
 		opregion->asle->ardy = ASLE_ARDY_NOT_READY;
 	}
 
 	if (mboxes & MBOX_ASLE_EXT)
-		DRM_DEBUG_DRIVER("ASLE extension supported\n");
+		drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
 
 	if (intel_load_vbt_firmware(dev_priv) == 0)
 		goto out;
@@ -984,7 +930,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 		 */
 		if (opregion->header->over.major > 2 ||
 		    opregion->header->over.minor >= 1) {
-			WARN_ON(rvda < OPREGION_SIZE);
+			drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
 
 			rvda += asls;
 		}
@@ -995,12 +941,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 		vbt = opregion->rvda;
 		vbt_size = opregion->asle->rvds;
 		if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
-			DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Found valid VBT in ACPI OpRegion (RVDA)\n");
 			opregion->vbt = vbt;
 			opregion->vbt_size = vbt_size;
 			goto out;
 		} else {
-			DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Invalid VBT in ACPI OpRegion (RVDA)\n");
 			memunmap(opregion->rvda);
 			opregion->rvda = NULL;
 		}
@@ -1018,11 +966,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 		OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
 	vbt_size -= OPREGION_VBT_OFFSET;
 	if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
-		DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
 		opregion->vbt = vbt;
 		opregion->vbt_size = vbt_size;
 	} else {
-		DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
 	}
 
 out:
@@ -1058,20 +1008,22 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 
 	ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
 	if (ret) {
-		DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
-			      ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Failed to get panel details from OpRegion (%d)\n",
+			    ret);
 		return ret;
 	}
 
 	ret = (panel_details >> 8) & 0xff;
 	if (ret > 0x10) {
-		DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Invalid OpRegion panel type 0x%x\n", ret);
 		return -EINVAL;
 	}
 
 	/* fall back to VBT panel type? */
 	if (ret == 0x0) {
-		DRM_DEBUG_KMS("No panel type in OpRegion\n");
+		drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
 		return -ENODEV;
 	}
 
@@ -1081,7 +1033,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 	 * via a quirk list :(
 	 */
 	if (!dmi_check_system(intel_use_opregion_panel_type)) {
-		DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+		drm_dbg_kms(&dev_priv->drm,
+			    "Ignoring OpRegion panel type (%d)\n", ret - 1);
 		return -ENODEV;
 	}
 
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e40c3a0..4811872 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,7 +27,6 @@
  */
 
 #include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
 
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_ring.h"
@@ -204,9 +203,10 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
 
 	/* WA_OVERLAY_CLKGATE:alm */
 	if (enable)
-		I915_WRITE(DSPCLK_GATE_D, 0);
+		intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
 	else
-		I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+		intel_de_write(dev_priv, DSPCLK_GATE_D,
+			       OVRUNIT_CLOCK_GATE_DISABLE);
 
 	/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
 	pci_bus_read_config_byte(pdev->bus,
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct i915_request *rq;
 	u32 *cs;
 
-	WARN_ON(overlay->active);
+	drm_WARN_ON(&dev_priv->drm, overlay->active);
 
 	rq = alloc_request(overlay, NULL);
 	if (IS_ERR(rq))
@@ -315,15 +315,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp, *cs;
 
-	WARN_ON(!overlay->active);
+	drm_WARN_ON(&dev_priv->drm, !overlay->active);
 
 	if (load_polyphase_filter)
 		flip_addr |= OFC_UPDATE;
 
 	/* check for underruns */
-	tmp = I915_READ(DOVSTA);
+	tmp = intel_de_read(dev_priv, DOVSTA);
 	if (tmp & (1 << 17))
-		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+		drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
 
 	rq = alloc_request(overlay, NULL);
 	if (IS_ERR(rq))
@@ -456,7 +456,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (!overlay->old_vma)
 		return 0;
 
-	if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+	if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
 		intel_overlay_release_old_vid_tail(overlay);
 		return 0;
 	}
@@ -759,7 +759,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	struct i915_vma *vma;
 	int ret, tmp_width;
 
-	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+	drm_WARN_ON(&dev_priv->drm,
+		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
 	ret = intel_overlay_release_old_vid(overlay);
 	if (ret != 0)
@@ -857,7 +858,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	int ret;
 
-	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+	drm_WARN_ON(&dev_priv->drm,
+		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
 	ret = intel_overlay_recover_from_interrupt(overlay);
 	if (ret != 0)
@@ -891,7 +893,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
 static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	u32 pfit_control = I915_READ(PFIT_CONTROL);
+	u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL);
 	u32 ratio;
 
 	/* XXX: This is not the same logic as in the xorg driver, but more in
@@ -899,12 +901,12 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
 	 */
 	if (INTEL_GEN(dev_priv) >= 4) {
 		/* on i965 use the PGM reg to read out the autoscaler values */
-		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+		ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
 	} else {
 		if (pfit_control & VERT_AUTO_SCALE)
-			ratio = I915_READ(PFIT_AUTO_RATIOS);
+			ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
 		else
-			ratio = I915_READ(PFIT_PGM_RATIOS);
+			ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
 		ratio >>= PFIT_VERT_SCALE_SHIFT;
 	}
 
@@ -1066,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
 
 	overlay = dev_priv->overlay;
 	if (!overlay) {
-		DRM_DEBUG("userspace bug: no overlay\n");
+		drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
 		return -ENODEV;
 	}
 
@@ -1090,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
 	drm_modeset_lock_all(dev);
 
 	if (i915_gem_object_is_tiled(new_bo)) {
-		DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "buffer used for overlay image can not be tiled\n");
 		ret = -EINVAL;
 		goto out_unlock;
 	}
@@ -1225,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
 
 	overlay = dev_priv->overlay;
 	if (!overlay) {
-		DRM_DEBUG("userspace bug: no overlay\n");
+		drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
 		return -ENODEV;
 	}
 
@@ -1239,12 +1242,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
 		attrs->saturation = overlay->saturation;
 
 		if (!IS_GEN(dev_priv, 2)) {
-			attrs->gamma0 = I915_READ(OGAMC0);
-			attrs->gamma1 = I915_READ(OGAMC1);
-			attrs->gamma2 = I915_READ(OGAMC2);
-			attrs->gamma3 = I915_READ(OGAMC3);
-			attrs->gamma4 = I915_READ(OGAMC4);
-			attrs->gamma5 = I915_READ(OGAMC5);
+			attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
+			attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
+			attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
+			attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
+			attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
+			attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
 		}
 	} else {
 		if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1274,12 +1277,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
 			if (ret)
 				goto out_unlock;
 
-			I915_WRITE(OGAMC0, attrs->gamma0);
-			I915_WRITE(OGAMC1, attrs->gamma1);
-			I915_WRITE(OGAMC2, attrs->gamma2);
-			I915_WRITE(OGAMC3, attrs->gamma3);
-			I915_WRITE(OGAMC4, attrs->gamma4);
-			I915_WRITE(OGAMC5, attrs->gamma5);
+			intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
+			intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
+			intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
+			intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
+			intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
+			intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
 		}
 	}
 	overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1369,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
 	update_reg_attrs(overlay, overlay->regs);
 
 	dev_priv->overlay = overlay;
-	DRM_INFO("Initialized overlay support.\n");
+	drm_info(&dev_priv->drm, "Initialized overlay support.\n");
 	return;
 
 out_free:
@@ -1389,7 +1392,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
 	 * Furthermore modesetting teardown happens beforehand so the
 	 * hardware should be off already.
 	 */
-	WARN_ON(overlay->active);
+	drm_WARN_ON(&dev_priv->drm, overlay->active);
 
 	i915_gem_object_put(overlay->reg_bo);
 	i915_active_fini(&overlay->last_flip);
@@ -1419,8 +1422,8 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 	if (error == NULL)
 		return NULL;
 
-	error->dovsta = I915_READ(DOVSTA);
-	error->isr = I915_READ(GEN2_ISR);
+	error->dovsta = intel_de_read(dev_priv, DOVSTA);
+	error->isr = intel_de_read(dev_priv, GEN2_ISR);
 	error->base = overlay->flip_addr;
 
 	memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 7b3ec6eb..276f438 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -96,8 +96,9 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector,
 	if (!downclock_mode)
 		return NULL;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
-		      connector->base.base.id, connector->base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+		    connector->base.base.id, connector->base.name);
 	drm_mode_debug_printmodeline(downclock_mode);
 
 	return downclock_mode;
@@ -122,8 +123,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
 		if (!fixed_mode)
 			return NULL;
 
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
-			      connector->base.base.id, connector->base.name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+			    connector->base.base.id, connector->base.name);
 		drm_mode_debug_printmodeline(fixed_mode);
 
 		return fixed_mode;
@@ -138,8 +140,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
 
 	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
-		      connector->base.base.id, connector->base.name);
+	drm_dbg_kms(&dev_priv->drm,
+		    "[CONNECTOR:%d:%s] using first mode from EDID: ",
+		    connector->base.base.id, connector->base.name);
 	drm_mode_debug_printmodeline(fixed_mode);
 
 	return fixed_mode;
@@ -162,8 +165,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
 
 	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
-		      connector->base.base.id, connector->base.name);
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using mode from VBT: ",
+		    connector->base.base.id, connector->base.name);
 	drm_mode_debug_printmodeline(fixed_mode);
 
 	info->width_mm = fixed_mode->width_mm;
@@ -423,15 +426,15 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
 		}
 		break;
 	default:
-		WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+		drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
+			 fitting_mode);
 		return;
 	}
 
 	/* 965+ wants fuzzy fitting */
 	/* FIXME: handle multiple panels by failing gracefully */
 	if (INTEL_GEN(dev_priv) >= 4)
-		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
-				 PFIT_FILTER_FUZZY);
+		pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
 
 out:
 	if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -520,7 +523,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 
-	WARN_ON(panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
 
 	if (i915_modparams.invert_brightness < 0)
 		return val;
@@ -537,14 +540,14 @@ static u32 lpt_get_backlight(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+	return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
 static u32 pch_get_backlight(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
 static u32 i9xx_get_backlight(struct intel_connector *connector)
@@ -553,7 +556,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
 	struct intel_panel *panel = &connector->panel;
 	u32 val;
 
-	val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 	if (INTEL_GEN(dev_priv) < 4)
 		val >>= 1;
 
@@ -569,10 +572,10 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
 
 static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 		return 0;
 
-	return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+	return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
 }
 
 static u32 vlv_get_backlight(struct intel_connector *connector)
@@ -588,7 +591,8 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 
-	return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
+	return intel_de_read(dev_priv,
+			     BXT_BLC_PWM_DUTY(panel->backlight.controller));
 }
 
 static u32 pwm_get_backlight(struct intel_connector *connector)
@@ -605,8 +609,8 @@ static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32
 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+	u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
 }
 
 static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -615,8 +619,8 @@ static void pch_set_backlight(const struct drm_connector_state *conn_state, u32
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	u32 tmp;
 
-	tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+	tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
 }
 
 static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -626,7 +630,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
 	struct intel_panel *panel = &connector->panel;
 	u32 tmp, mask;
 
-	WARN_ON(panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
 
 	if (panel->backlight.combination_mode) {
 		u8 lbpc;
@@ -643,8 +647,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
 		mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
 	}
 
-	tmp = I915_READ(BLC_PWM_CTL) & ~mask;
-	I915_WRITE(BLC_PWM_CTL, tmp | level);
+	tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+	intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
 }
 
 static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -654,8 +658,8 @@ static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32
 	enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
 	u32 tmp;
 
-	tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+	tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
 }
 
 static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -664,7 +668,8 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	struct intel_panel *panel = &connector->panel;
 
-	I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+	intel_de_write(dev_priv,
+		       BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
 }
 
 static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -709,7 +714,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
 
 	mutex_lock(&dev_priv->backlight_lock);
 
-	WARN_ON(panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
 
 	hw_level = clamp_user_to_hw(connector, user_level, user_max);
 	panel->backlight.level = hw_level;
@@ -742,14 +747,16 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
 	 * This needs rework if we need to add support for CPU PWM on PCH split
 	 * platforms.
 	 */
-	tmp = I915_READ(BLC_PWM_CPU_CTL2);
+	tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
 	if (tmp & BLM_PWM_ENABLE) {
-		DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
-		I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+		drm_dbg_kms(&dev_priv->drm,
+			    "cpu backlight was enabled, disabling\n");
+		intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+			       tmp & ~BLM_PWM_ENABLE);
 	}
 
-	tmp = I915_READ(BLC_PWM_PCH_CTL1);
-	I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
 }
 
 static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -760,11 +767,11 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
 
 	intel_panel_actually_set_backlight(old_conn_state, 0);
 
-	tmp = I915_READ(BLC_PWM_CPU_CTL2);
-	I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+	intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
 
-	tmp = I915_READ(BLC_PWM_PCH_CTL1);
-	I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
 }
 
 static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -779,8 +786,8 @@ static void i965_disable_backlight(const struct drm_connector_state *old_conn_st
 
 	intel_panel_actually_set_backlight(old_conn_state, 0);
 
-	tmp = I915_READ(BLC_PWM_CTL2);
-	I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+	intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
 }
 
 static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -792,8 +799,9 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
 
 	intel_panel_actually_set_backlight(old_conn_state, 0);
 
-	tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
-	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+	intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+		       tmp & ~BLM_PWM_ENABLE);
 }
 
 static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -805,14 +813,15 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
 
 	intel_panel_actually_set_backlight(old_conn_state, 0);
 
-	tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
-	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
-			tmp & ~BXT_BLC_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv,
+			    BXT_BLC_PWM_CTL(panel->backlight.controller));
+	intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+		       tmp & ~BXT_BLC_PWM_ENABLE);
 
 	if (panel->backlight.controller == 1) {
-		val = I915_READ(UTIL_PIN_CTL);
+		val = intel_de_read(dev_priv, UTIL_PIN_CTL);
 		val &= ~UTIL_PIN_ENABLE;
-		I915_WRITE(UTIL_PIN_CTL, val);
+		intel_de_write(dev_priv, UTIL_PIN_CTL, val);
 	}
 }
 
@@ -825,9 +834,10 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
 
 	intel_panel_actually_set_backlight(old_conn_state, 0);
 
-	tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
-	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
-		   tmp & ~BXT_BLC_PWM_ENABLE);
+	tmp = intel_de_read(dev_priv,
+			    BXT_BLC_PWM_CTL(panel->backlight.controller));
+	intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+		       tmp & ~BXT_BLC_PWM_ENABLE);
 }
 
 static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -857,7 +867,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
 	 * another client is not activated.
 	 */
 	if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
-		DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+		drm_dbg(&dev_priv->drm,
+			"Skipping backlight disable on vga switch\n");
 		return;
 	}
 
@@ -879,31 +890,31 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
 	struct intel_panel *panel = &connector->panel;
 	u32 pch_ctl1, pch_ctl2, schicken;
 
-	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
 	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
-		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
 		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
-		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+		intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
 	}
 
 	if (HAS_PCH_LPT(dev_priv)) {
-		schicken = I915_READ(SOUTH_CHICKEN2);
+		schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
 		if (panel->backlight.alternate_pwm_increment)
 			schicken |= LPT_PWM_GRANULARITY;
 		else
 			schicken &= ~LPT_PWM_GRANULARITY;
-		I915_WRITE(SOUTH_CHICKEN2, schicken);
+		intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
 	} else {
-		schicken = I915_READ(SOUTH_CHICKEN1);
+		schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
 		if (panel->backlight.alternate_pwm_increment)
 			schicken |= SPT_PWM_GRANULARITY;
 		else
 			schicken &= ~SPT_PWM_GRANULARITY;
-		I915_WRITE(SOUTH_CHICKEN1, schicken);
+		intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
 	}
 
 	pch_ctl2 = panel->backlight.max << 16;
-	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
 
 	pch_ctl1 = 0;
 	if (panel->backlight.active_low_pwm)
@@ -913,9 +924,10 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
 	if (HAS_PCH_LPT(dev_priv))
 		pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
 
-	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
-	POSTING_READ(BLC_PWM_PCH_CTL1);
-	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+	intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+		       pch_ctl1 | BLM_PCH_PWM_ENABLE);
 
 	/* This won't stick until the above enable. */
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -930,41 +942,42 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 	u32 cpu_ctl2, pch_ctl1, pch_ctl2;
 
-	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
 	if (cpu_ctl2 & BLM_PWM_ENABLE) {
-		DRM_DEBUG_KMS("cpu backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
 		cpu_ctl2 &= ~BLM_PWM_ENABLE;
-		I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+		intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
 	}
 
-	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
 	if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
-		DRM_DEBUG_KMS("pch backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
 		pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
-		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+		intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
 	}
 
 	if (cpu_transcoder == TRANSCODER_EDP)
 		cpu_ctl2 = BLM_TRANSCODER_EDP;
 	else
 		cpu_ctl2 = BLM_PIPE(cpu_transcoder);
-	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
-	POSTING_READ(BLC_PWM_CPU_CTL2);
-	I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+	intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+	intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+	intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
 
 	/* This won't stick until the above enable. */
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
 
 	pch_ctl2 = panel->backlight.max << 16;
-	I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
 
 	pch_ctl1 = 0;
 	if (panel->backlight.active_low_pwm)
 		pch_ctl1 |= BLM_PCH_POLARITY;
 
-	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
-	POSTING_READ(BLC_PWM_PCH_CTL1);
-	I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+	intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+	intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+		       pch_ctl1 | BLM_PCH_PWM_ENABLE);
 }
 
 static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -975,10 +988,10 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
 	struct intel_panel *panel = &connector->panel;
 	u32 ctl, freq;
 
-	ctl = I915_READ(BLC_PWM_CTL);
+	ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
 	if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
-		DRM_DEBUG_KMS("backlight already enabled\n");
-		I915_WRITE(BLC_PWM_CTL, 0);
+		drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+		intel_de_write(dev_priv, BLC_PWM_CTL, 0);
 	}
 
 	freq = panel->backlight.max;
@@ -991,8 +1004,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
 	if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
 		ctl |= BLM_POLARITY_PNV;
 
-	I915_WRITE(BLC_PWM_CTL, ctl);
-	POSTING_READ(BLC_PWM_CTL);
+	intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+	intel_de_posting_read(dev_priv, BLC_PWM_CTL);
 
 	/* XXX: combine this into above write? */
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1003,7 +1016,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
 	 * that has backlight.
 	 */
 	if (IS_GEN(dev_priv, 2))
-		I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+		intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
 }
 
 static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1015,11 +1028,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
 	enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
 	u32 ctl, ctl2, freq;
 
-	ctl2 = I915_READ(BLC_PWM_CTL2);
+	ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
 	if (ctl2 & BLM_PWM_ENABLE) {
-		DRM_DEBUG_KMS("backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
 		ctl2 &= ~BLM_PWM_ENABLE;
-		I915_WRITE(BLC_PWM_CTL2, ctl2);
+		intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
 	}
 
 	freq = panel->backlight.max;
@@ -1027,16 +1040,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
 		freq /= 0xff;
 
 	ctl = freq << 16;
-	I915_WRITE(BLC_PWM_CTL, ctl);
+	intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
 
 	ctl2 = BLM_PIPE(pipe);
 	if (panel->backlight.combination_mode)
 		ctl2 |= BLM_COMBINATION_MODE;
 	if (panel->backlight.active_low_pwm)
 		ctl2 |= BLM_POLARITY_I965;
-	I915_WRITE(BLC_PWM_CTL2, ctl2);
-	POSTING_READ(BLC_PWM_CTL2);
-	I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+	intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+	intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+	intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
 
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
 }
@@ -1050,15 +1063,15 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 	u32 ctl, ctl2;
 
-	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
 	if (ctl2 & BLM_PWM_ENABLE) {
-		DRM_DEBUG_KMS("backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
 		ctl2 &= ~BLM_PWM_ENABLE;
-		I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+		intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
 	}
 
 	ctl = panel->backlight.max << 16;
-	I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+	intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
 
 	/* XXX: combine this into above write? */
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1066,9 +1079,10 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
 	ctl2 = 0;
 	if (panel->backlight.active_low_pwm)
 		ctl2 |= BLM_POLARITY_I965;
-	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
-	POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
-	I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+	intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+	intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+	intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+		       ctl2 | BLM_PWM_ENABLE);
 }
 
 static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1082,30 +1096,34 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
 
 	/* Controller 1 uses the utility pin. */
 	if (panel->backlight.controller == 1) {
-		val = I915_READ(UTIL_PIN_CTL);
+		val = intel_de_read(dev_priv, UTIL_PIN_CTL);
 		if (val & UTIL_PIN_ENABLE) {
-			DRM_DEBUG_KMS("util pin already enabled\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "util pin already enabled\n");
 			val &= ~UTIL_PIN_ENABLE;
-			I915_WRITE(UTIL_PIN_CTL, val);
+			intel_de_write(dev_priv, UTIL_PIN_CTL, val);
 		}
 
 		val = 0;
 		if (panel->backlight.util_pin_active_low)
 			val |= UTIL_PIN_POLARITY;
-		I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
-				UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+		intel_de_write(dev_priv, UTIL_PIN_CTL,
+			       val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
 	}
 
-	pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	pwm_ctl = intel_de_read(dev_priv,
+				BXT_BLC_PWM_CTL(panel->backlight.controller));
 	if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
-		DRM_DEBUG_KMS("backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
 		pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
-		I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
-				pwm_ctl);
+		intel_de_write(dev_priv,
+			       BXT_BLC_PWM_CTL(panel->backlight.controller),
+			       pwm_ctl);
 	}
 
-	I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
-			panel->backlight.max);
+	intel_de_write(dev_priv,
+		       BXT_BLC_PWM_FREQ(panel->backlight.controller),
+		       panel->backlight.max);
 
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
 
@@ -1113,10 +1131,12 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
 	if (panel->backlight.active_low_pwm)
 		pwm_ctl |= BXT_BLC_PWM_POLARITY;
 
-	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
-	POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
-	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
-			pwm_ctl | BXT_BLC_PWM_ENABLE);
+	intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+		       pwm_ctl);
+	intel_de_posting_read(dev_priv,
+			      BXT_BLC_PWM_CTL(panel->backlight.controller));
+	intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+		       pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
 static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1127,16 +1147,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
 	struct intel_panel *panel = &connector->panel;
 	u32 pwm_ctl;
 
-	pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	pwm_ctl = intel_de_read(dev_priv,
+				BXT_BLC_PWM_CTL(panel->backlight.controller));
 	if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
-		DRM_DEBUG_KMS("backlight already enabled\n");
+		drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
 		pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
-		I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
-			   pwm_ctl);
+		intel_de_write(dev_priv,
+			       BXT_BLC_PWM_CTL(panel->backlight.controller),
+			       pwm_ctl);
 	}
 
-	I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
-		   panel->backlight.max);
+	intel_de_write(dev_priv,
+		       BXT_BLC_PWM_FREQ(panel->backlight.controller),
+		       panel->backlight.max);
 
 	intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
 
@@ -1144,10 +1167,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
 	if (panel->backlight.active_low_pwm)
 		pwm_ctl |= BXT_BLC_PWM_POLARITY;
 
-	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
-	POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
-	I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
-		   pwm_ctl | BXT_BLC_PWM_ENABLE);
+	intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+		       pwm_ctl);
+	intel_de_posting_read(dev_priv,
+			      BXT_BLC_PWM_CTL(panel->backlight.controller));
+	intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+		       pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
 static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1194,7 +1219,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
 	if (!panel->backlight.present)
 		return;
 
-	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+	drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
 
 	mutex_lock(&dev_priv->backlight_lock);
 
@@ -1219,7 +1244,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
 
 	mutex_unlock(&dev_priv->backlight_lock);
 
-	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+	drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
 	return val;
 }
 
@@ -1237,7 +1262,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
 
 	mutex_lock(&dev_priv->backlight_lock);
 
-	WARN_ON(panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
 
 	hw_level = scale_user_to_hw(connector, user_level, user_max);
 	panel->backlight.level = hw_level;
@@ -1380,7 +1405,8 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
+	return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+				 pwm_freq_hz);
 }
 
 /*
@@ -1441,7 +1467,8 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
-	return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
+	return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+				 pwm_freq_hz * 128);
 }
 
 /*
@@ -1458,7 +1485,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 	int clock;
 
 	if (IS_PINEVIEW(dev_priv))
-		clock = KHz(dev_priv->rawclk_freq);
+		clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
 	else
 		clock = KHz(dev_priv->cdclk.hw.cdclk);
 
@@ -1476,7 +1503,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 	int clock;
 
 	if (IS_G4X(dev_priv))
-		clock = KHz(dev_priv->rawclk_freq);
+		clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
 	else
 		clock = KHz(dev_priv->cdclk.hw.cdclk);
 
@@ -1493,14 +1520,14 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	int mul, clock;
 
-	if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+	if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
 		if (IS_CHERRYVIEW(dev_priv))
 			clock = KHz(19200);
 		else
 			clock = MHz(25);
 		mul = 16;
 	} else {
-		clock = KHz(dev_priv->rawclk_freq);
+		clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
 		mul = 128;
 	}
 
@@ -1515,22 +1542,26 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
 	u32 pwm;
 
 	if (!panel->backlight.hz_to_pwm) {
-		DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "backlight frequency conversion not supported\n");
 		return 0;
 	}
 
 	if (pwm_freq_hz) {
-		DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
-			      pwm_freq_hz);
+		drm_dbg_kms(&dev_priv->drm,
+			    "VBT defined backlight frequency %u Hz\n",
+			    pwm_freq_hz);
 	} else {
 		pwm_freq_hz = 200;
-		DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
-			      pwm_freq_hz);
+		drm_dbg_kms(&dev_priv->drm,
+			    "default backlight frequency %u Hz\n",
+			    pwm_freq_hz);
 	}
 
 	pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
 	if (!pwm) {
-		DRM_DEBUG_KMS("backlight frequency conversion failed\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "backlight frequency conversion failed\n");
 		return 0;
 	}
 
@@ -1546,7 +1577,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
 	struct intel_panel *panel = &connector->panel;
 	int min;
 
-	WARN_ON(panel->backlight.max == 0);
+	drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
 
 	/*
 	 * XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1557,8 +1588,9 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
 	 */
 	min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
 	if (min != dev_priv->vbt.backlight.min_brightness) {
-		DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
-			      dev_priv->vbt.backlight.min_brightness, min);
+		drm_dbg_kms(&dev_priv->drm,
+			    "clamping VBT min backlight %d/255 to %d/255\n",
+			    dev_priv->vbt.backlight.min_brightness, min);
 	}
 
 	/* vbt value is a coefficient in range [0..255] */
@@ -1573,18 +1605,18 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 	bool alt, cpu_mode;
 
 	if (HAS_PCH_LPT(dev_priv))
-		alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+		alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
 	else
-		alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+		alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
 	panel->backlight.alternate_pwm_increment = alt;
 
-	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
 	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
 
-	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
 	panel->backlight.max = pch_ctl2 >> 16;
 
-	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
 
 	if (!panel->backlight.max)
 		panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1608,13 +1640,16 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
 				       panel->backlight.max);
 
 	if (cpu_mode) {
-		DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "CPU backlight register was enabled, switching to PCH override\n");
 
 		/* Write converted CPU PWM value to PCH override register */
 		lpt_set_backlight(connector->base.state, panel->backlight.level);
-		I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+		intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+			       pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
 
-		I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
+		intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+			       cpu_ctl2 & ~BLM_PWM_ENABLE);
 	}
 
 	return 0;
@@ -1626,10 +1661,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
 	struct intel_panel *panel = &connector->panel;
 	u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
 
-	pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+	pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
 	panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
 
-	pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
 	panel->backlight.max = pch_ctl2 >> 16;
 
 	if (!panel->backlight.max)
@@ -1645,7 +1680,7 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
 	panel->backlight.level = clamp(val, panel->backlight.min,
 				       panel->backlight.max);
 
-	cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+	cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
 	panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
 		(pch_ctl1 & BLM_PCH_PWM_ENABLE);
 
@@ -1658,7 +1693,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
 	struct intel_panel *panel = &connector->panel;
 	u32 ctl, val;
 
-	ctl = I915_READ(BLC_PWM_CTL);
+	ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
 
 	if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
 		panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
@@ -1697,11 +1732,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
 	struct intel_panel *panel = &connector->panel;
 	u32 ctl, ctl2, val;
 
-	ctl2 = I915_READ(BLC_PWM_CTL2);
+	ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
 	panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
 	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
-	ctl = I915_READ(BLC_PWM_CTL);
+	ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
 	panel->backlight.max = ctl >> 16;
 
 	if (!panel->backlight.max)
@@ -1731,13 +1766,13 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
 	struct intel_panel *panel = &connector->panel;
 	u32 ctl, ctl2, val;
 
-	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
 		return -ENODEV;
 
-	ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+	ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
 	panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
-	ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
+	ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
 	panel->backlight.max = ctl >> 16;
 
 	if (!panel->backlight.max)
@@ -1767,18 +1802,20 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
 
 	panel->backlight.controller = dev_priv->vbt.backlight.controller;
 
-	pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	pwm_ctl = intel_de_read(dev_priv,
+				BXT_BLC_PWM_CTL(panel->backlight.controller));
 
 	/* Controller 1 uses the utility pin. */
 	if (panel->backlight.controller == 1) {
-		val = I915_READ(UTIL_PIN_CTL);
+		val = intel_de_read(dev_priv, UTIL_PIN_CTL);
 		panel->backlight.util_pin_active_low =
 					val & UTIL_PIN_POLARITY;
 	}
 
 	panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
 	panel->backlight.max =
-		I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+		intel_de_read(dev_priv,
+			      BXT_BLC_PWM_FREQ(panel->backlight.controller));
 
 	if (!panel->backlight.max)
 		panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1812,11 +1849,13 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
 	 */
 	panel->backlight.controller = 0;
 
-	pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+	pwm_ctl = intel_de_read(dev_priv,
+				BXT_BLC_PWM_CTL(panel->backlight.controller));
 
 	panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
 	panel->backlight.max =
-		I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+		intel_de_read(dev_priv,
+			      BXT_BLC_PWM_FREQ(panel->backlight.controller));
 
 	if (!panel->backlight.max)
 		panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1843,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_panel *panel = &connector->panel;
 	const char *desc;
+	u32 level, ns;
 	int retval;
 
 	/* Get the right PWM chip for DSI backlight according to VBT */
@@ -1855,7 +1895,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 	}
 
 	if (IS_ERR(panel->backlight.pwm)) {
-		DRM_ERROR("Failed to get the %s PWM chip\n", desc);
+		drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+			desc);
 		panel->backlight.pwm = NULL;
 		return -ENODEV;
 	}
@@ -1866,23 +1907,27 @@ static int pwm_setup_backlight(struct intel_connector *connector,
 	 */
 	pwm_apply_args(panel->backlight.pwm);
 
-	retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
-			    CRC_PMIC_PWM_PERIOD_NS);
+	panel->backlight.min = 0; /* 0% */
+	panel->backlight.max = 100; /* 100% */
+	level = intel_panel_compute_brightness(connector, 100);
+	ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+	retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
 	if (retval < 0) {
-		DRM_ERROR("Failed to configure the pwm chip\n");
+		drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
 		pwm_put(panel->backlight.pwm);
 		panel->backlight.pwm = NULL;
 		return retval;
 	}
 
-	panel->backlight.min = 0; /* 0% */
-	panel->backlight.max = 100; /* 100% */
-	panel->backlight.level = DIV_ROUND_UP(
-				 pwm_get_duty_cycle(panel->backlight.pwm) * 100,
-				 CRC_PMIC_PWM_PERIOD_NS);
+	level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+			     CRC_PMIC_PWM_PERIOD_NS);
+	panel->backlight.level =
+		intel_panel_compute_brightness(connector, level);
 	panel->backlight.enabled = panel->backlight.level != 0;
 
-	DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
+	drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+		 desc);
 	return 0;
 }
 
@@ -1913,15 +1958,17 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 
 	if (!dev_priv->vbt.backlight.present) {
 		if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
-			DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "no backlight present per VBT, but present per quirk\n");
 		} else {
-			DRM_DEBUG_KMS("no backlight present per VBT\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "no backlight present per VBT\n");
 			return 0;
 		}
 	}
 
 	/* ensure intel_panel has been initialized first */
-	if (WARN_ON(!panel->backlight.setup))
+	if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
 		return -ENODEV;
 
 	/* set level and max in panel struct */
@@ -1930,17 +1977,19 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 	mutex_unlock(&dev_priv->backlight_lock);
 
 	if (ret) {
-		DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
-			      connector->name);
+		drm_dbg_kms(&dev_priv->drm,
+			    "failed to setup backlight for connector %s\n",
+			    connector->name);
 		return ret;
 	}
 
 	panel->backlight.present = true;
 
-	DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
-		      connector->name,
-		      enableddisabled(panel->backlight.enabled),
-		      panel->backlight.level, panel->backlight.max);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Connector %s backlight initialized, %s, brightness %u/%u\n",
+		    connector->name,
+		    enableddisabled(panel->backlight.enabled),
+		    panel->backlight.level, panel->backlight.max);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 520408e..a9a5df2 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -110,8 +110,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
 				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
 				break;
 			default:
-				WARN(1, "nonexisting DP port %c\n",
-				     port_name(dig_port->base.port));
+				drm_WARN(dev, 1, "nonexisting DP port %c\n",
+					 port_name(dig_port->base.port));
 				break;
 			}
 			break;
@@ -172,7 +172,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 	 *   - DisplayPort scrambling: used for EMI reduction
 	 */
 	if (need_stable_symbols) {
-		u32 tmp = I915_READ(PORT_DFT2_G4X);
+		u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
 
 		tmp |= DC_BALANCE_RESET_VLV;
 		switch (pipe) {
@@ -188,7 +188,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 		default:
 			return -EINVAL;
 		}
-		I915_WRITE(PORT_DFT2_G4X, tmp);
+		intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
 	}
 
 	return 0;
@@ -237,7 +237,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
 static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 					 enum pipe pipe)
 {
-	u32 tmp = I915_READ(PORT_DFT2_G4X);
+	u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
 
 	switch (pipe) {
 	case PIPE_A:
@@ -254,7 +254,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
 	}
 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
 		tmp &= ~DC_BALANCE_RESET_VLV;
-	I915_WRITE(PORT_DFT2_G4X, tmp);
+	intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
 }
 
 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -328,7 +328,8 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
 
 	drm_atomic_state_put(state);
 unlock:
-	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+	drm_WARN(&dev_priv->drm, ret,
+		 "Toggling workaround to %i returns %i\n", enable, ret);
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 }
@@ -440,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
 	return 0;
 }
 
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_crtc_crc_init(struct intel_crtc *crtc)
 {
-	enum pipe pipe;
+	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
 
-	for_each_pipe(dev_priv, pipe) {
-		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
-		spin_lock_init(&pipe_crc->lock);
-	}
+	spin_lock_init(&pipe_crc->lock);
 }
 
 static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
@@ -570,7 +567,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
 	enum intel_pipe_crc_source source;
 
 	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
-		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+		drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
 		return -EINVAL;
 	}
 
@@ -586,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
 int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
 	enum intel_display_power_domain power_domain;
 	enum intel_pipe_crc_source source;
 	intel_wakeref_t wakeref;
@@ -595,14 +593,15 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
 	bool enable;
 
 	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
-		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+		drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
 		return -EINVAL;
 	}
 
 	power_domain = POWER_DOMAIN_PIPE(crtc->index);
 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
 	if (!wakeref) {
-		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Trying to capture CRC while pipe is off\n");
 		return -EIO;
 	}
 
@@ -615,8 +614,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
 		goto out;
 
 	pipe_crc->source = source;
-	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
-	POSTING_READ(PIPE_CRC_CTL(crtc->index));
+	intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+	intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
 
 	if (!source) {
 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -638,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
 	u32 val = 0;
 
 	if (!crtc->crc.opened)
@@ -650,22 +649,22 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
 	/* Don't need pipe_crc->lock here, IRQs are not generated. */
 	pipe_crc->skipped = 0;
 
-	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
-	POSTING_READ(PIPE_CRC_CTL(crtc->index));
+	intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+	intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
 }
 
 void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+	struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
 
 	/* Swallow crc's until we stop generating them. */
 	spin_lock_irq(&pipe_crc->lock);
 	pipe_crc->skipped = INT_MIN;
 	spin_unlock_irq(&pipe_crc->lock);
 
-	I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
-	POSTING_READ(PIPE_CRC_CTL(crtc->index));
+	intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), 0);
+	intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
 	intel_synchronize_irq(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a7..43012b1 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
 struct intel_crtc;
 
 #ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_crtc_crc_init(struct intel_crtc *crtc);
 int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
 int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
 				 const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
 void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
 void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
 #else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
 #define intel_crtc_set_crc_source NULL
 #define intel_crtc_verify_crc_source NULL
 #define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 8302505..fd9b146 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -59,11 +59,28 @@
  * get called by the frontbuffer tracking code. Note that because of locking
  * issues the self-refresh re-enable code is done from a work queue, which
  * must be correctly synchronized/cancelled when shutting down the pipe."
+ *
+ * DC3CO (DC3 clock off)
+ *
+ * On top of PSR2, GEN12 adds a intermediate power savings state that turns
+ * clock off automatically during PSR2 idle state.
+ * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
+ * entry/exit allows the HW to enter a low-power state even when page flipping
+ * periodically (for instance a 30fps video playback scenario).
+ *
+ * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
+ * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
+ * frames, if no other flip occurs and the function above is executed, DC3CO is
+ * disabled and PSR2 is configured to enter deep sleep, resetting again in case
+ * of another flip.
+ * Front buffer modifications do not trigger DC3CO activation on purpose as it
+ * would bring a lot of complexity and most of the moderns systems will only
+ * use page flips.
  */
 
-static bool psr_global_enabled(u32 debug)
+static bool psr_global_enabled(struct drm_i915_private *i915)
 {
-	switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+	switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 	case I915_PSR_DEBUG_DEFAULT:
 		return i915_modparams.enable_psr;
 	case I915_PSR_DEBUG_DISABLE:
@@ -77,8 +94,8 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
 			       const struct intel_crtc_state *crtc_state)
 {
 	/* Cannot enable DSC and PSR2 simultaneously */
-	WARN_ON(crtc_state->dsc.compression_enable &&
-		crtc_state->has_psr2);
+	drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable &&
+		    crtc_state->has_psr2);
 
 	switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 	case I915_PSR_DEBUG_DISABLE:
@@ -114,10 +131,10 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
 			EDP_PSR_PRE_ENTRY(trans_shift);
 
 	/* Warning: it is masking/setting reserved bits too */
-	val = I915_READ(imr_reg);
+	val = intel_de_read(dev_priv, imr_reg);
 	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
 	val |= ~mask;
-	I915_WRITE(imr_reg, val);
+	intel_de_write(dev_priv, imr_reg, val);
 }
 
 static void psr_event_print(u32 val, bool psr2_enabled)
@@ -174,20 +191,24 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 
 	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
 		dev_priv->psr.last_entry_attempt = time_ns;
-		DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
-			      transcoder_name(cpu_transcoder));
+		drm_dbg_kms(&dev_priv->drm,
+			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
+			    transcoder_name(cpu_transcoder));
 	}
 
 	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
 		dev_priv->psr.last_exit = time_ns;
-		DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
-			      transcoder_name(cpu_transcoder));
+		drm_dbg_kms(&dev_priv->drm,
+			    "[transcoder %s] PSR exit completed\n",
+			    transcoder_name(cpu_transcoder));
 
 		if (INTEL_GEN(dev_priv) >= 9) {
-			u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+			u32 val = intel_de_read(dev_priv,
+						PSR_EVENT(cpu_transcoder));
 			bool psr2_enabled = dev_priv->psr.psr2_enabled;
 
-			I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
+				       val);
 			psr_event_print(val, psr2_enabled);
 		}
 	}
@@ -195,7 +216,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
 		u32 val;
 
-		DRM_WARN("[transcoder %s] PSR aux error\n",
+		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
 			 transcoder_name(cpu_transcoder));
 
 		dev_priv->psr.irq_aux_error = true;
@@ -208,9 +229,9 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 		 * again so we don't care about unmask the interruption
 		 * or unset irq_aux_error.
 		 */
-		val = I915_READ(imr_reg);
+		val = intel_de_read(dev_priv, imr_reg);
 		val |= EDP_PSR_ERROR(trans_shift);
-		I915_WRITE(imr_reg, val);
+		intel_de_write(dev_priv, imr_reg, val);
 
 		schedule_work(&dev_priv->psr.work);
 	}
@@ -270,7 +291,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 
 	if (dev_priv->psr.dp) {
-		DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+		drm_warn(&dev_priv->drm,
+			 "More than one eDP panel found, PSR support should be extended\n");
 		return;
 	}
 
@@ -279,16 +301,18 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 
 	if (!intel_dp->psr_dpcd[0])
 		return;
-	DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
-		      intel_dp->psr_dpcd[0]);
+	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+		    intel_dp->psr_dpcd[0]);
 
-	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
-		DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+	if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR support not currently available for this panel\n");
 		return;
 	}
 
 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
-		DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Panel lacks power state control, PSR cannot be enabled\n");
 		return;
 	}
 
@@ -316,8 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 		 * GTC first.
 		 */
 		dev_priv->psr.sink_psr2_support = y_req && alpm;
-		DRM_DEBUG_KMS("PSR2 %ssupported\n",
-			      dev_priv->psr.sink_psr2_support ? "" : "not ");
+		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+			    dev_priv->psr.sink_psr2_support ? "" : "not ");
 
 		if (dev_priv->psr.sink_psr2_support) {
 			dev_priv->psr.colorimetry_support =
@@ -380,8 +404,9 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 
 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
 	for (i = 0; i < sizeof(aux_msg); i += 4)
-		I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
-			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+		intel_de_write(dev_priv,
+			       EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+			       intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
@@ -391,7 +416,8 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 
 	/* Select only valid bits for SRD_AUX_CTL */
 	aux_ctl &= psr_aux_mask;
-	I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
+	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+		       aux_ctl);
 }
 
 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -454,22 +480,30 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
 	return val;
 }
 
+static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	int idle_frames;
+
+	/* Let's use 6 as the minimum to cover all known cases including the
+	 * off-by-one issue that HW has in some cases.
+	 */
+	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+
+	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+		idle_frames = 0xf;
+
+	return idle_frames;
+}
+
 static void hsw_activate_psr1(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	u32 max_sleep_time = 0x1f;
 	u32 val = EDP_PSR_ENABLE;
 
-	/* Let's use 6 as the minimum to cover all known cases including the
-	 * off-by-one issue that HW has in some cases.
-	 */
-	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
-	/* sink_sync_latency of 8 means source has to wait for more than 8
-	 * frames, we'll go with 9 frames for now
-	 */
-	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
-	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
 
 	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
 	if (IS_HASWELL(dev_priv))
@@ -483,9 +517,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
 	if (INTEL_GEN(dev_priv) >= 8)
 		val |= EDP_PSR_CRC_ENABLE;
 
-	val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
 		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
-	I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+	intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
 }
 
 static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -493,13 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	u32 val;
 
-	/* Let's use 6 as the minimum to cover all known cases including the
-	 * off-by-one issue that HW has in some cases.
-	 */
-	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
-	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
-	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+	val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
 
 	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -521,9 +549,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
 	 * recommending keep this bit unset while PSR2 is enabled.
 	 */
-	I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+	intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
 
-	I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
 }
 
 static bool
@@ -552,10 +580,10 @@ static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
 	u32 val;
 
 	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
-	val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+	val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
 	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
 	val |= idle_frames;
-	I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
 }
 
 static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -566,29 +594,22 @@ static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
 
 static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
 {
-	int idle_frames;
+	struct intel_dp *intel_dp = dev_priv->psr.dp;
 
 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-	/*
-	 * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
-	 * cases including the off-by-one issue that HW has in some cases.
-	 */
-	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
-	psr2_program_idle_frames(dev_priv, idle_frames);
+	psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
 }
 
-static void tgl_dc5_idle_thread(struct work_struct *work)
+static void tgl_dc3co_disable_work(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), psr.idle_work.work);
+		container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
 
 	mutex_lock(&dev_priv->psr.lock);
 	/* If delayed work is pending, it is not idle */
-	if (delayed_work_pending(&dev_priv->psr.idle_work))
+	if (delayed_work_pending(&dev_priv->psr.dc3co_work))
 		goto unlock;
 
-	DRM_DEBUG_KMS("DC5/6 idle thread\n");
 	tgl_psr2_disable_dc3co(dev_priv);
 unlock:
 	mutex_unlock(&dev_priv->psr.lock);
@@ -599,11 +620,41 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
 	if (!dev_priv->psr.dc3co_enabled)
 		return;
 
-	cancel_delayed_work(&dev_priv->psr.idle_work);
+	cancel_delayed_work(&dev_priv->psr.dc3co_work);
 	/* Before PSR2 exit disallow dc3co*/
 	tgl_psr2_disable_dc3co(dev_priv);
 }
 
+static void
+tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
+				  struct intel_crtc_state *crtc_state)
+{
+	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	u32 exit_scanlines;
+
+	if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+		return;
+
+	/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
+	    dig_port->base.port != PORT_A)
+		return;
+
+	/*
+	 * DC3CO Exit time 200us B.Spec 49196
+	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+	 */
+	exit_scanlines =
+		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
+
+	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+		return;
+
+	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+}
+
 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 				    struct intel_crtc_state *crtc_state)
 {
@@ -616,8 +667,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 		return false;
 
 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
-		DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
-			      transcoder_name(crtc_state->cpu_transcoder));
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR2 not supported in transcoder %s\n",
+			    transcoder_name(crtc_state->cpu_transcoder));
 		return false;
 	}
 
@@ -627,7 +679,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	 * over PSR2.
 	 */
 	if (crtc_state->dsc.compression_enable) {
-		DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR2 cannot be enabled since DSC is enabled\n");
 		return false;
 	}
 
@@ -646,15 +699,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	}
 
 	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
-		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
-			      crtc_hdisplay, crtc_vdisplay,
-			      psr_max_h, psr_max_v);
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+			    crtc_hdisplay, crtc_vdisplay,
+			    psr_max_h, psr_max_v);
 		return false;
 	}
 
 	if (crtc_state->pipe_bpp > max_bpp) {
-		DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
-			      crtc_state->pipe_bpp, max_bpp);
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
+			    crtc_state->pipe_bpp, max_bpp);
 		return false;
 	}
 
@@ -665,16 +720,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	 * x granularity.
 	 */
 	if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
-		DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
-			      crtc_hdisplay, dev_priv->psr.su_x_granularity);
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+			    crtc_hdisplay, dev_priv->psr.su_x_granularity);
 		return false;
 	}
 
 	if (crtc_state->crc_enabled) {
-		DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
 		return false;
 	}
 
+	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
 	return true;
 }
 
@@ -700,31 +758,36 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 	 * hardcoded to PORT_A
 	 */
 	if (dig_port->base.port != PORT_A) {
-		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR condition failed: Port not supported\n");
 		return;
 	}
 
 	if (dev_priv->psr.sink_not_reliable) {
-		DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR sink implementation is not reliable\n");
 		return;
 	}
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR condition failed: Interlaced mode enabled\n");
 		return;
 	}
 
 	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
 	if (psr_setup_time < 0) {
-		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
-			      intel_dp->psr_dpcd[1]);
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+			    intel_dp->psr_dpcd[1]);
 		return;
 	}
 
 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
-		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
-			      psr_setup_time);
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR condition failed: PSR setup time (%d us) too long\n",
+			    psr_setup_time);
 		return;
 	}
 
@@ -737,10 +800,12 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 	if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
-		WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+		drm_WARN_ON(&dev_priv->drm,
+			    intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
 
-	WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
-	WARN_ON(dev_priv->psr.active);
+	drm_WARN_ON(&dev_priv->drm,
+		    intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
 	lockdep_assert_held(&dev_priv->psr.lock);
 
 	/* psr1 and psr2 are mutually exclusive.*/
@@ -768,11 +833,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 	if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
 					   !IS_GEMINILAKE(dev_priv))) {
 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
-		u32 chicken = I915_READ(reg);
+		u32 chicken = intel_de_read(dev_priv, reg);
 
 		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
 			   PSR2_ADD_VERTICAL_LINE_COUNT;
-		I915_WRITE(reg, chicken);
+		intel_de_write(dev_priv, reg, chicken);
 	}
 
 	/*
@@ -789,9 +854,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 	if (INTEL_GEN(dev_priv) < 11)
 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
 
-	I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+	intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+		       mask);
 
 	psr_irq_control(dev_priv);
+
+	if (crtc_state->dc3co_exitline) {
+		u32 val;
+
+		/*
+		 * TODO: if future platforms supports DC3CO in more than one
+		 * transcoder, EXITLINE will need to be unset when disabling PSR
+		 */
+		val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
+		val &= ~EXITLINE_MASK;
+		val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
+		val |= EXITLINE_ENABLE;
+		intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
+	}
 }
 
 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -800,14 +880,16 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
 	struct intel_dp *intel_dp = dev_priv->psr.dp;
 	u32 val;
 
-	WARN_ON(dev_priv->psr.enabled);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
 
 	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 	dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 	dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
-	dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
 	dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+	/* DC5/DC6 requires at least 6 idle frames */
+	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+	dev_priv->psr.dc3co_exit_delay = val;
 
 	/*
 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -818,20 +900,22 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
 	 * to avoid any rendering problems.
 	 */
 	if (INTEL_GEN(dev_priv) >= 12) {
-		val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+		val = intel_de_read(dev_priv,
+				    TRANS_PSR_IIR(dev_priv->psr.transcoder));
 		val &= EDP_PSR_ERROR(0);
 	} else {
-		val = I915_READ(EDP_PSR_IIR);
+		val = intel_de_read(dev_priv, EDP_PSR_IIR);
 		val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
 	}
 	if (val) {
 		dev_priv->psr.sink_not_reliable = true;
-		DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR interruption error set, not enabling PSR\n");
 		return;
 	}
 
-	DRM_DEBUG_KMS("Enabling PSR%s\n",
-		      dev_priv->psr.psr2_enabled ? "2" : "1");
+	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+		    dev_priv->psr.psr2_enabled ? "2" : "1");
 	intel_psr_setup_vsc(intel_dp, crtc_state);
 	intel_psr_enable_sink(intel_dp);
 	intel_psr_enable_source(intel_dp, crtc_state);
@@ -860,12 +944,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 	if (!crtc_state->has_psr)
 		return;
 
-	WARN_ON(dev_priv->drrs.dp);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
 
 	mutex_lock(&dev_priv->psr.lock);
 
-	if (!psr_global_enabled(dev_priv->psr.debug)) {
-		DRM_DEBUG_KMS("PSR disabled by flag\n");
+	if (!psr_global_enabled(dev_priv)) {
+		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
 		goto unlock;
 	}
 
@@ -881,27 +965,33 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
 
 	if (!dev_priv->psr.active) {
 		if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
-			val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
-			WARN_ON(val & EDP_PSR2_ENABLE);
+			val = intel_de_read(dev_priv,
+					    EDP_PSR2_CTL(dev_priv->psr.transcoder));
+			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
 		}
 
-		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
-		WARN_ON(val & EDP_PSR_ENABLE);
+		val = intel_de_read(dev_priv,
+				    EDP_PSR_CTL(dev_priv->psr.transcoder));
+		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
 
 		return;
 	}
 
 	if (dev_priv->psr.psr2_enabled) {
 		tgl_disallow_dc3co_on_psr2_exit(dev_priv);
-		val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
-		WARN_ON(!(val & EDP_PSR2_ENABLE));
+		val = intel_de_read(dev_priv,
+				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
+		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
 		val &= ~EDP_PSR2_ENABLE;
-		I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+		intel_de_write(dev_priv,
+			       EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
 	} else {
-		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
-		WARN_ON(!(val & EDP_PSR_ENABLE));
+		val = intel_de_read(dev_priv,
+				    EDP_PSR_CTL(dev_priv->psr.transcoder));
+		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
 		val &= ~EDP_PSR_ENABLE;
-		I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+		intel_de_write(dev_priv,
+			       EDP_PSR_CTL(dev_priv->psr.transcoder), val);
 	}
 	dev_priv->psr.active = false;
 }
@@ -917,8 +1007,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 	if (!dev_priv->psr.enabled)
 		return;
 
-	DRM_DEBUG_KMS("Disabling PSR%s\n",
-		      dev_priv->psr.psr2_enabled ? "2" : "1");
+	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+		    dev_priv->psr.psr2_enabled ? "2" : "1");
 
 	intel_psr_exit(dev_priv);
 
@@ -933,7 +1023,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 	/* Wait till PSR is idle */
 	if (intel_de_wait_for_clear(dev_priv, psr_status,
 				    psr_status_mask, 2000))
-		DRM_ERROR("Timed out waiting PSR idle state\n");
+		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
 
 	/* Disable PSR on Sink */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -959,7 +1049,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 	if (!old_crtc_state->has_psr)
 		return;
 
-	if (WARN_ON(!CAN_PSR(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
@@ -968,7 +1058,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 
 	mutex_unlock(&dev_priv->psr.lock);
 	cancel_work_sync(&dev_priv->psr.work);
-	cancel_delayed_work_sync(&dev_priv->psr.idle_work);
+	cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
 }
 
 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -983,7 +1073,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
 		 * but it makes more sense write to the current active
 		 * pipe.
 		 */
-		I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+		intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
 	else
 		/*
 		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
@@ -1015,7 +1105,7 @@ void intel_psr_update(struct intel_dp *intel_dp,
 
 	mutex_lock(&dev_priv->psr.lock);
 
-	enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+	enable = crtc_state->has_psr && psr_global_enabled(dev_priv);
 	psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
 
 	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
@@ -1103,7 +1193,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
 
 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
 	if (err)
-		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+		drm_err(&dev_priv->drm,
+			"Timed out waiting for PSR Idle for re-enable\n");
 
 	/* After the unlocked wait, verify that PSR is still wanted! */
 	mutex_lock(&dev_priv->psr.lock);
@@ -1167,7 +1258,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
 
 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
 	    mode > I915_PSR_DEBUG_FORCE_PSR1) {
-		DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
 		return -EINVAL;
 	}
 
@@ -1279,14 +1370,12 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
  * When we will be completely rely on PSR2 S/W tracking in future,
  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
  * event also therefore tgl_dc3co_flush() require to be changed
- * accrodingly in future.
+ * accordingly in future.
  */
 static void
 tgl_dc3co_flush(struct drm_i915_private *dev_priv,
 		unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
-	u32 delay;
-
 	mutex_lock(&dev_priv->psr.lock);
 
 	if (!dev_priv->psr.dc3co_enabled)
@@ -1304,10 +1393,8 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
 		goto unlock;
 
 	tgl_psr2_enable_dc3co(dev_priv);
-	/* DC5/DC6 required idle frames = 6 */
-	delay = 6 * dev_priv->psr.dc3co_exit_delay;
-	mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
-			 usecs_to_jiffies(delay));
+	mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
+			 dev_priv->psr.dc3co_exit_delay);
 
 unlock:
 	mutex_unlock(&dev_priv->psr.lock);
@@ -1391,7 +1478,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
 
 	INIT_WORK(&dev_priv->psr.work, intel_psr_work);
-	INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
+	INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
 	mutex_init(&dev_priv->psr.lock);
 }
 
@@ -1427,14 +1514,15 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
 
 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
 	if (r != 1) {
-		DRM_ERROR("Error reading ALPM status\n");
+		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
 		return;
 	}
 
 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
 		intel_psr_disable_locked(intel_dp);
 		psr->sink_not_reliable = true;
-		DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "ALPM lock timeout error, disabling PSR\n");
 
 		/* Clearing error */
 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
@@ -1450,14 +1538,15 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
 
 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
 	if (r != 1) {
-		DRM_ERROR("Error reading DP_PSR_ESI\n");
+		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
 		return;
 	}
 
 	if (val & DP_PSR_CAPS_CHANGE) {
 		intel_psr_disable_locked(intel_dp);
 		psr->sink_not_reliable = true;
-		DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Sink PSR capability changed, disabling PSR\n");
 
 		/* Clearing it */
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
@@ -1482,7 +1571,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 		goto exit;
 
 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
-		DRM_ERROR("Error reading PSR status or error status\n");
+		drm_err(&dev_priv->drm,
+			"Error reading PSR status or error status\n");
 		goto exit;
 	}
 
@@ -1492,17 +1582,22 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 	}
 
 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
-		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR sink internal error, disabling PSR\n");
 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
-		DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR RFB storage error, disabling PSR\n");
 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
-		DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
 	if (error_status & DP_PSR_LINK_CRC_ERROR)
-		DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "PSR Link CRC error, disabling PSR\n");
 
 	if (error_status & ~errors)
-		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
-			  error_status & ~errors);
+		drm_err(&dev_priv->drm,
+			"PSR_ERROR_STATUS unhandled errors %x\n",
+			error_status & ~errors);
 	/* clear status register */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
 
@@ -1542,7 +1637,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
 		return;
 
 	intel_connector = to_intel_connector(connector);
-	dig_port = enc_to_dig_port(intel_connector->encoder);
+	dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
 	if (dev_priv->psr.dp != &dig_port->dp)
 		return;
 
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 399b154..46beb15 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -14,7 +14,7 @@
 static void quirk_ssc_force_disable(struct drm_i915_private *i915)
 {
 	i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
-	DRM_INFO("applying lvds SSC disable quirk\n");
+	drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
 }
 
 /*
@@ -24,14 +24,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
 static void quirk_invert_brightness(struct drm_i915_private *i915)
 {
 	i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
-	DRM_INFO("applying inverted panel brightness quirk\n");
+	drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
 }
 
 /* Some VBT's incorrectly indicate no backlight is present */
 static void quirk_backlight_present(struct drm_i915_private *i915)
 {
 	i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
-	DRM_INFO("applying backlight present quirk\n");
+	drm_info(&i915->drm, "applying backlight present quirk\n");
 }
 
 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
@@ -40,7 +40,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
 static void quirk_increase_t12_delay(struct drm_i915_private *i915)
 {
 	i915->quirks |= QUIRK_INCREASE_T12_DELAY;
-	DRM_INFO("Applying T12 delay quirk\n");
+	drm_info(&i915->drm, "Applying T12 delay quirk\n");
 }
 
 /*
@@ -50,7 +50,7 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
 static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
 {
 	i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
-	DRM_INFO("Applying Increase DDI Disabled quirk\n");
+	drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
 }
 
 struct intel_quirk {
@@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
 				},
 			},
+			{
+				.callback = intel_dmi_reverse_brightness,
+				.ident = "Thundersoft TST178 tablet",
+				/* DMI strings are too generic, also match on BIOS date */
+				.matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+					    DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+					    DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+					    DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+				},
+			},
 			{ }  /* terminating entry */
 		},
 		.hook = quirk_invert_brightness,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index e8819fd..637d8fe 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -34,7 +34,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_atomic.h"
@@ -217,23 +216,23 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 	int i;
 
 	if (HAS_PCH_SPLIT(dev_priv)) {
-		I915_WRITE(intel_sdvo->sdvo_reg, val);
-		POSTING_READ(intel_sdvo->sdvo_reg);
+		intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+		intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
 		/*
 		 * HW workaround, need to write this twice for issue
 		 * that may result in first write getting masked.
 		 */
 		if (HAS_PCH_IBX(dev_priv)) {
-			I915_WRITE(intel_sdvo->sdvo_reg, val);
-			POSTING_READ(intel_sdvo->sdvo_reg);
+			intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+			intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
 		}
 		return;
 	}
 
 	if (intel_sdvo->port == PORT_B)
-		cval = I915_READ(GEN3_SDVOC);
+		cval = intel_de_read(dev_priv, GEN3_SDVOC);
 	else
-		bval = I915_READ(GEN3_SDVOB);
+		bval = intel_de_read(dev_priv, GEN3_SDVOB);
 
 	/*
 	 * Write the registers twice for luck. Sometimes,
@@ -241,11 +240,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 	 * The BIOS does this too. Yay, magic
 	 */
 	for (i = 0; i < 2; i++) {
-		I915_WRITE(GEN3_SDVOB, bval);
-		POSTING_READ(GEN3_SDVOB);
+		intel_de_write(dev_priv, GEN3_SDVOB, bval);
+		intel_de_posting_read(dev_priv, GEN3_SDVOB);
 
-		I915_WRITE(GEN3_SDVOC, cval);
-		POSTING_READ(GEN3_SDVOC);
+		intel_de_write(dev_priv, GEN3_SDVOC, cval);
+		intel_de_posting_read(dev_priv, GEN3_SDVOC);
 	}
 }
 
@@ -414,12 +413,10 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 {
 	const char *cmd_name;
 	int i, pos = 0;
-#define BUF_LEN 256
-	char buffer[BUF_LEN];
+	char buffer[64];
 
 #define BUF_PRINT(args...) \
-	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
-
+	pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
 
 	for (i = 0; i < args_len; i++) {
 		BUF_PRINT("%02X ", ((u8 *)args)[i]);
@@ -433,9 +430,9 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
 		BUF_PRINT("(%s)", cmd_name);
 	else
 		BUF_PRINT("(%02X)", cmd);
-	BUG_ON(pos >= BUF_LEN - 1);
+
+	WARN_ON(pos >= sizeof(buffer) - 1);
 #undef BUF_PRINT
-#undef BUF_LEN
 
 	DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
 }
@@ -540,8 +537,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
 	u8 status;
 	int i, pos = 0;
-#define BUF_LEN 256
-	char buffer[BUF_LEN];
+	char buffer[64];
 
 	buffer[0] = '\0';
 
@@ -581,7 +577,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 	}
 
 #define BUF_PRINT(args...) \
-	pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+	pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
 
 	cmd_status = sdvo_cmd_status(status);
 	if (cmd_status)
@@ -600,9 +596,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
 			goto log_fail;
 		BUF_PRINT(" %02X", ((u8 *)response)[i]);
 	}
-	BUG_ON(pos >= BUF_LEN - 1);
+
+	WARN_ON(pos >= sizeof(buffer) - 1);
 #undef BUF_PRINT
-#undef BUF_LEN
 
 	DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
 	return true;
@@ -1267,6 +1263,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
 	pipe_config->clock_set = true;
 }
 
+static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
+				const struct drm_connector_state *conn_state)
+{
+	return sdvo->has_hdmi_monitor &&
+		READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
 static int intel_sdvo_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *pipe_config,
 				     struct drm_connector_state *conn_state)
@@ -1322,12 +1325,15 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
 	pipe_config->pixel_multiplier =
 		intel_sdvo_get_pixel_multiplier(adjusted_mode);
 
-	if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI)
-		pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+	pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
 
-	if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON ||
-	    (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio))
-		pipe_config->has_audio = true;
+	if (pipe_config->has_hdmi_sink) {
+		if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+			pipe_config->has_audio = intel_sdvo->has_hdmi_audio;
+		else
+			pipe_config->has_audio =
+				intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
+	}
 
 	if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
 		/*
@@ -1470,7 +1476,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 	else
 		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
 	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
-		DRM_INFO("Setting output timings on %s failed\n",
+		drm_info(&dev_priv->drm,
+			 "Setting output timings on %s failed\n",
 			 SDVO_NAME(intel_sdvo));
 
 	/* Set the input timing to the screen. Assume always input 0. */
@@ -1494,12 +1501,14 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 	if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
 		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
 	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
-		DRM_INFO("Setting input timings on %s failed\n",
+		drm_info(&dev_priv->drm,
+			 "Setting input timings on %s failed\n",
 			 SDVO_NAME(intel_sdvo));
 
 	switch (crtc_state->pixel_multiplier) {
 	default:
-		WARN(1, "unknown pixel multiplier specified\n");
+		drm_WARN(&dev_priv->drm, 1,
+			 "unknown pixel multiplier specified\n");
 		/* fall through */
 	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
 	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1518,7 +1527,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
 		if (INTEL_GEN(dev_priv) < 5)
 			sdvox |= SDVO_BORDER_ENABLE;
 	} else {
-		sdvox = I915_READ(intel_sdvo->sdvo_reg);
+		sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
 		if (intel_sdvo->port == PORT_B)
 			sdvox &= SDVOB_PRESERVE_MASK;
 		else
@@ -1564,7 +1573,7 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
 {
 	u32 val;
 
-	val = I915_READ(sdvo_reg);
+	val = intel_de_read(dev_priv, sdvo_reg);
 
 	/* asserts want to know the pipe even if the port is disabled */
 	if (HAS_PCH_CPT(dev_priv))
@@ -1607,7 +1616,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
 
-	sdvox = I915_READ(intel_sdvo->sdvo_reg);
+	sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
 
 	ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
 	if (!ret) {
@@ -1615,7 +1624,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 		 * Some sdvo encoders are not spec compliant and don't
 		 * implement the mandatory get_timings function.
 		 */
-		DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+		drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n");
 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
 	} else {
 		if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
@@ -1667,9 +1676,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 		}
 	}
 
-	WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
-	     "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
-	     pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+	drm_WARN(dev,
+		 encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+		 "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+		 pipe_config->pixel_multiplier, encoder_pixel_multiplier);
 
 	if (sdvox & HDMI_COLOR_RANGE_16_235)
 		pipe_config->limited_color_range = true;
@@ -1734,7 +1744,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
 		intel_sdvo_set_encoder_power_state(intel_sdvo,
 						   DRM_MODE_DPMS_OFF);
 
-	temp = I915_READ(intel_sdvo->sdvo_reg);
+	temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
 
 	temp &= ~SDVO_ENABLE;
 	intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1791,7 +1801,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
 	int i;
 	bool success;
 
-	temp = I915_READ(intel_sdvo->sdvo_reg);
+	temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
 	temp |= SDVO_ENABLE;
 	intel_sdvo_write_sdvox(intel_sdvo, temp);
 
@@ -1806,8 +1816,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
 	 * a given it the status is a success, we succeeded.
 	 */
 	if (success && !input1) {
-		DRM_DEBUG_KMS("First %s output reported failure to "
-				"sync\n", SDVO_NAME(intel_sdvo));
+		drm_dbg_kms(&dev_priv->drm,
+			    "First %s output reported failure to "
+			    "sync\n", SDVO_NAME(intel_sdvo));
 	}
 
 	if (0)
@@ -2219,8 +2230,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct drm_display_mode *newmode;
 
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-		      connector->base.id, connector->name);
+	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+		    connector->base.id, connector->name);
 
 	/*
 	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2709,6 +2720,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
 		 * Some SDVO devices have one-shot hotplug interrupts.
 		 * Ensure that they get re-enabled when an interrupt happens.
 		 */
+		intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 		intel_encoder->hotplug = intel_sdvo_hotplug;
 		intel_sdvo_enable_hotplug(intel_encoder);
 	} else {
@@ -3229,9 +3241,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
 				   enum port port)
 {
 	if (HAS_PCH_SPLIT(dev_priv))
-		WARN_ON(port != PORT_B);
+		drm_WARN_ON(&dev_priv->drm, port != PORT_B);
 	else
-		WARN_ON(port != PORT_B && port != PORT_C);
+		drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
 }
 
 bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -3269,8 +3281,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
 		u8 byte;
 
 		if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
-			DRM_DEBUG_KMS("No SDVO device found on %s\n",
-				      SDVO_NAME(intel_sdvo));
+			drm_dbg_kms(&dev_priv->drm,
+				    "No SDVO device found on %s\n",
+				    SDVO_NAME(intel_sdvo));
 			goto err;
 		}
 	}
@@ -3293,8 +3306,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
 
 	if (intel_sdvo_output_setup(intel_sdvo,
 				    intel_sdvo->caps.output_flags) != true) {
-		DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
-			      SDVO_NAME(intel_sdvo));
+		drm_dbg_kms(&dev_priv->drm,
+			    "SDVO output failed to setup on %s\n",
+			    SDVO_NAME(intel_sdvo));
 		/* Output_setup can leave behind connectors! */
 		goto err_output;
 	}
@@ -3331,7 +3345,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
 						    &intel_sdvo->pixel_clock_max))
 		goto err_output;
 
-	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+	drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
 			"clock range %dMHz - %dMHz, "
 			"input 1: %c, input 2: %c, "
 			"output 1: %c, output 2: %c\n",
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index a66f224..72065e4 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -8,8 +8,6 @@
 
 #include <linux/types.h>
 
-#include <drm/i915_drm.h>
-
 #include "i915_reg.h"
 
 struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index fca77ec1..deda351 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -37,10 +37,10 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_trace.h"
+#include "i915_vgpu.h"
 #include "intel_atomic_plane.h"
 #include "intel_display_types.h"
 #include "intel_frontbuffer.h"
@@ -104,7 +104,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	if (min <= 0 || max <= 0)
 		goto irq_disable;
 
-	if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
+	if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
 		goto irq_disable;
 
 	/*
@@ -113,8 +113,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	 * re-entry as well.
 	 */
 	if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
-		DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
-			  psr_status);
+		drm_err(&dev_priv->drm,
+			"PSR idle timed out 0x%x, atomic update may fail\n",
+			psr_status);
 
 	local_irq_disable();
 
@@ -135,8 +136,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 			break;
 
 		if (!timeout) {
-			DRM_ERROR("Potential atomic update failure on pipe %c\n",
-				  pipe_name(crtc->pipe));
+			drm_err(&dev_priv->drm,
+				"Potential atomic update failure on pipe %c\n",
+				pipe_name(crtc->pipe));
 			break;
 		}
 
@@ -204,7 +206,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 	 * event outside of the critical section - the spinlock might spin for a
 	 * while ... */
 	if (new_crtc_state->uapi.event) {
-		WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+		drm_WARN_ON(&dev_priv->drm,
+			    drm_crtc_vblank_get(&crtc->base) != 0);
 
 		spin_lock(&crtc->base.dev->event_lock);
 		drm_crtc_arm_vblank_event(&crtc->base,
@@ -221,17 +224,20 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
 
 	if (crtc->debug.start_vbl_count &&
 	    crtc->debug.start_vbl_count != end_vbl_count) {
-		DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
-			  pipe_name(pipe), crtc->debug.start_vbl_count,
-			  end_vbl_count,
-			  ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
-			  crtc->debug.min_vbl, crtc->debug.max_vbl,
-			  crtc->debug.scanline_start, scanline_end);
+		drm_err(&dev_priv->drm,
+			"Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+			pipe_name(pipe), crtc->debug.start_vbl_count,
+			end_vbl_count,
+			ktime_us_delta(end_vbl_time,
+				       crtc->debug.start_vbl_time),
+			crtc->debug.min_vbl, crtc->debug.max_vbl,
+			crtc->debug.scanline_start, scanline_end);
 	}
 #ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
 	else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
 		 VBLANK_EVASION_TIME_US)
-		DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+		drm_warn(&dev_priv->drm,
+			 "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
 			 pipe_name(pipe),
 			 ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
 			 VBLANK_EVASION_TIME_US);
@@ -278,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
 	bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
 
 	/*
+	 * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+	 * abuses hsub/vsub so we can't use them here. But as they
+	 * are limited to 32bpp RGB formats we don't actually need
+	 * to check anything.
+	 */
+	if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+	    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+		return 0;
+
+	/*
 	 * Hardware doesn't handle subpixel coordinates.
 	 * Adjust to (macro)pixel boundary, but be careful not to
 	 * increase the source viewport size, because that could
@@ -291,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
 	drm_rect_init(src, src_x << 16, src_y << 16,
 		      src_w << 16, src_h << 16);
 
-	if (!fb->format->is_yuv)
-		return 0;
-
-	/* YUV specific checks */
-	if (!rotated) {
+	if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+		hsub = 2;
+		vsub = 2;
+	} else {
 		hsub = fb->format->hsub;
 		vsub = fb->format->vsub;
-	} else {
-		hsub = vsub = max(fb->format->hsub, fb->format->vsub);
 	}
 
+	if (rotated)
+		hsub = vsub = max(hsub, vsub);
+
 	if (src_x % hsub || src_w % hsub) {
-		DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
-			      src_x, src_w, hsub, rotated ? "rotated " : "");
+		DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+			      src_x, src_w, hsub, yesno(rotated));
 		return -EINVAL;
 	}
 
 	if (src_y % vsub || src_h % vsub) {
-		DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
-			      src_y, src_h, vsub, rotated ? "rotated " : "");
+		DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+			      src_y, src_h, vsub, yesno(rotated));
 		return -EINVAL;
 	}
 
@@ -349,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
 			       const struct intel_plane_state *plane_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
-	unsigned int pixel_rate = crtc_state->pixel_rate;
-	unsigned int src_w, src_h, dst_w, dst_h;
 	unsigned int num, den;
+	unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
 
 	skl_plane_ratio(crtc_state, plane_state, &num, &den);
 
@@ -359,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		den *= 2;
 
-	src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
-	src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
-	dst_w = drm_rect_width(&plane_state->uapi.dst);
-	dst_h = drm_rect_height(&plane_state->uapi.dst);
-
-	/* Downscaling limits the maximum pixel rate */
-	dst_w = min(src_w, dst_w);
-	dst_h = min(src_h, dst_h);
-
-	return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
-				  mul_u32_u32(den, dst_w * dst_h));
+	return DIV_ROUND_UP(pixel_rate * num, den);
 }
 
 static unsigned int
@@ -434,14 +439,16 @@ skl_program_scaler(struct intel_plane *plane,
 		uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
 	}
 
-	I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
-		      PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
-	I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
-		      PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-	I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
-		      PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-	I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
-	I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+	intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id),
+			  PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+	intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+			  PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+	intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+			  PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+	intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+			  (crtc_x << 16) | crtc_y);
+	intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+			  (crtc_w << 16) | crtc_h);
 }
 
 /* Preoffset values for YUV to RGB Conversion */
@@ -547,28 +554,37 @@ icl_program_input_csc(struct intel_plane *plane,
 	else
 		csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
 
-	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
-		      GOFF(csc[1]));
-	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
-	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
-		      GOFF(csc[4]));
-	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
-	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
-		      GOFF(csc[7]));
-	I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+			  ROFF(csc[0]) | GOFF(csc[1]));
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+			  BOFF(csc[2]));
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+			  ROFF(csc[3]) | GOFF(csc[4]));
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+			  BOFF(csc[5]));
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+			  ROFF(csc[6]) | GOFF(csc[7]));
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+			  BOFF(csc[8]));
 
-	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
-		      PREOFF_YUV_TO_RGB_HI);
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+			  PREOFF_YUV_TO_RGB_HI);
 	if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-		I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+		intel_de_write_fw(dev_priv,
+				  PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+				  0);
 	else
-		I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
-			      PREOFF_YUV_TO_RGB_ME);
-	I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
-		      PREOFF_YUV_TO_RGB_LO);
-	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
-	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
-	I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+		intel_de_write_fw(dev_priv,
+				  PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+				  PREOFF_YUV_TO_RGB_ME);
+	intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+			  PREOFF_YUV_TO_RGB_LO);
+	intel_de_write_fw(dev_priv,
+			  PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+	intel_de_write_fw(dev_priv,
+			  PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+	intel_de_write_fw(dev_priv,
+			  PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
 }
 
 static void
@@ -623,44 +639,49 @@ skl_program_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
-	I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
-	I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+	intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
+	intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+			  (crtc_y << 16) | crtc_x);
+	intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+			  (src_h << 16) | src_w);
 
 	if (INTEL_GEN(dev_priv) < 12)
 		aux_dist |= aux_stride;
-	I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+	intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
 
 	if (icl_is_hdr_plane(dev_priv, plane_id))
-		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
+		intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+				  plane_state->cus_ctl);
 
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
-		I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+		intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+				  plane_color_ctl);
 
 	if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
 		icl_program_input_csc(plane, crtc_state, plane_state);
 
 	skl_write_plane_wm(plane, crtc_state);
 
-	I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
-	I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
-	I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+	intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
+			  key->min_value);
+	intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
+	intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
 
-	I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+	intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+			  (y << 16) | x);
 
 	if (INTEL_GEN(dev_priv) < 11)
-		I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
-			      (plane_state->color_plane[1].y << 16) |
-			      plane_state->color_plane[1].x);
+		intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+				  (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
 
 	/*
 	 * The control register self-arms if the plane was previously
 	 * disabled. Try to make the plane enable atomic by writing
 	 * the control register just before the surface register.
 	 */
-	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
-	I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
-		      intel_plane_ggtt_offset(plane_state) + surf_addr);
+	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+			  intel_plane_ggtt_offset(plane_state) + surf_addr);
 
 	if (plane_state->scaler_id >= 0)
 		skl_program_scaler(plane, crtc_state, plane_state);
@@ -693,12 +714,12 @@ skl_disable_plane(struct intel_plane *plane,
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
 	if (icl_is_hdr_plane(dev_priv, plane_id))
-		I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+		intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
 
 	skl_write_plane_wm(plane, crtc_state);
 
-	I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
-	I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+	intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+	intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -718,7 +739,7 @@ skl_plane_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+	ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
 
 	*pipe = plane->pipe;
 
@@ -774,23 +795,36 @@ chv_update_csc(const struct intel_plane_state *plane_state)
 	if (!fb->format->is_yuv)
 		return;
 
-	I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-	I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-	I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+	intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+			  SPCSC_OOFF(0) | SPCSC_IOFF(0));
+	intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+			  SPCSC_OOFF(0) | SPCSC_IOFF(0));
+	intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+			  SPCSC_OOFF(0) | SPCSC_IOFF(0));
 
-	I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
-	I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
-	I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
-	I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
-	I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+	intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+			  SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+	intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+			  SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+	intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+			  SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+	intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+			  SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+	intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
 
-	I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
-	I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
-	I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+	intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+			  SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+	intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+			  SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+	intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+			  SPCSC_IMAX(512) | SPCSC_IMIN(-512));
 
-	I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-	I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
-	I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+			  SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+			  SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+	intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+			  SPCSC_OMAX(1023) | SPCSC_OMIN(0));
 }
 
 #define SIN_0 0
@@ -829,10 +863,10 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
 	}
 
 	/* FIXME these register are single buffered :( */
-	I915_WRITE_FW(SPCLRC0(pipe, plane_id),
-		      SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
-	I915_WRITE_FW(SPCLRC1(pipe, plane_id),
-		      SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+	intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+			  SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+	intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+			  SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
 }
 
 static void
@@ -1019,10 +1053,8 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state)
 	/* FIXME these register are single buffered :( */
 	/* The two end points are implicit (0.0 and 1.0) */
 	for (i = 1; i < 8 - 1; i++)
-		I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
-			      gamma[i] << 16 |
-			      gamma[i] << 8 |
-			      gamma[i]);
+		intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+				  gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
 }
 
 static void
@@ -1055,32 +1087,37 @@ vlv_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
-		      plane_state->color_plane[0].stride);
-	I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
-	I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
-	I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
+	intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+			  plane_state->color_plane[0].stride);
+	intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+			  (crtc_y << 16) | crtc_x);
+	intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+			  (crtc_h << 16) | crtc_w);
+	intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
 
 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
 		chv_update_csc(plane_state);
 
 	if (key->flags) {
-		I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
-		I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
-		I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+		intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+				  key->min_value);
+		intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+				  key->channel_mask);
+		intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+				  key->max_value);
 	}
 
-	I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
-	I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+	intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
+	intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x);
 
 	/*
 	 * The control register self-arms if the plane was previously
 	 * disabled. Try to make the plane enable atomic by writing
 	 * the control register just before the surface register.
 	 */
-	I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
-	I915_WRITE_FW(SPSURF(pipe, plane_id),
-		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+	intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
+	intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+			  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 
 	vlv_update_clrc(plane_state);
 	vlv_update_gamma(plane_state);
@@ -1099,8 +1136,8 @@ vlv_disable_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-	I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+	intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
+	intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1120,7 +1157,7 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+	ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
 
 	*pipe = plane->pipe;
 
@@ -1424,19 +1461,17 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
 
 	/* FIXME these register are single buffered :( */
 	for (i = 0; i < 16; i++)
-		I915_WRITE_FW(SPRGAMC(pipe, i),
-			      gamma[i] << 20 |
-			      gamma[i] << 10 |
-			      gamma[i]);
+		intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+				  gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
 
-	I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
-	I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
-	I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+	intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
+	intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
+	intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
 	i++;
 
-	I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
-	I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
-	I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+	intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
+	intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
+	intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
 	i++;
 }
 
@@ -1476,25 +1511,27 @@ ivb_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
-	I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-	I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+	intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+			  plane_state->color_plane[0].stride);
+	intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+	intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
 	if (IS_IVYBRIDGE(dev_priv))
-		I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+		intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
 
 	if (key->flags) {
-		I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
-		I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
-		I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+		intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
+		intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+				  key->channel_mask);
+		intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
 	}
 
 	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
 	 * register */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
+		intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x);
 	} else {
-		I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
-		I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+		intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
+		intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x);
 	}
 
 	/*
@@ -1502,9 +1539,9 @@ ivb_update_plane(struct intel_plane *plane,
 	 * disabled. Try to make the plane enable atomic by writing
 	 * the control register just before the surface register.
 	 */
-	I915_WRITE_FW(SPRCTL(pipe), sprctl);
-	I915_WRITE_FW(SPRSURF(pipe),
-		      intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+	intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
+	intel_de_write_fw(dev_priv, SPRSURF(pipe),
+			  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
 
 	ivb_update_gamma(plane_state);
 
@@ -1521,11 +1558,11 @@ ivb_disable_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(SPRCTL(pipe), 0);
+	intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
 	/* Disable the scaler */
 	if (IS_IVYBRIDGE(dev_priv))
-		I915_WRITE_FW(SPRSCALE(pipe), 0);
-	I915_WRITE_FW(SPRSURF(pipe), 0);
+		intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
+	intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1544,7 +1581,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	ret =  I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+	ret =  intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
 
 	*pipe = plane->pipe;
 
@@ -1710,10 +1747,8 @@ static void g4x_update_gamma(const struct intel_plane_state *plane_state)
 	/* FIXME these register are single buffered :( */
 	/* The two end points are implicit (0.0 and 1.0) */
 	for (i = 1; i < 8 - 1; i++)
-		I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
-			      gamma[i] << 16 |
-			      gamma[i] << 8 |
-			      gamma[i]);
+		intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+				  gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
 }
 
 static void ilk_sprite_linear_gamma(u16 gamma[17])
@@ -1741,14 +1776,12 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state)
 
 	/* FIXME these register are single buffered :( */
 	for (i = 0; i < 16; i++)
-		I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
-			      gamma[i] << 20 |
-			      gamma[i] << 10 |
-			      gamma[i]);
+		intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+				  gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
 
-	I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
-	I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
-	I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+	intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+	intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+	intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
 	i++;
 }
 
@@ -1788,28 +1821,30 @@ g4x_update_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
-	I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-	I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
-	I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+	intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+			  plane_state->color_plane[0].stride);
+	intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+	intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+	intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
 
 	if (key->flags) {
-		I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
-		I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
-		I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+		intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
+		intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+				  key->channel_mask);
+		intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
 	}
 
-	I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
-	I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+	intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
+	intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
 
 	/*
 	 * The control register self-arms if the plane was previously
 	 * disabled. Try to make the plane enable atomic by writing
 	 * the control register just before the surface register.
 	 */
-	I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
-	I915_WRITE_FW(DVSSURF(pipe),
-		      intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+	intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
+	intel_de_write_fw(dev_priv, DVSSURF(pipe),
+			  intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
 
 	if (IS_G4X(dev_priv))
 		g4x_update_gamma(plane_state);
@@ -1829,10 +1864,10 @@ g4x_disable_plane(struct intel_plane *plane,
 
 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
-	I915_WRITE_FW(DVSCNTR(pipe), 0);
+	intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
 	/* Disable the scaler */
-	I915_WRITE_FW(DVSSCALE(pipe), 0);
-	I915_WRITE_FW(DVSSURF(pipe), 0);
+	intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
+	intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
@@ -1851,7 +1886,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
 	if (!wakeref)
 		return false;
 
-	ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+	ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
 
 	*pipe = plane->pipe;
 
@@ -1999,7 +2034,8 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
 	if (IS_CHERRYVIEW(dev_priv) &&
 	    rotation & DRM_MODE_ROTATE_180 &&
 	    rotation & DRM_MODE_REFLECT_X) {
-		DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Cannot rotate and reflect at the same time\n");
 		return -EINVAL;
 	}
 
@@ -2040,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
 	return 0;
 }
 
+static bool intel_format_is_p01x(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_P010:
+	case DRM_FORMAT_P012:
+	case DRM_FORMAT_P016:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 			      const struct intel_plane_state *plane_state)
 {
@@ -2054,21 +2102,24 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 
 	if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
 	    is_ccs_modifier(fb->modifier)) {
-		DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
-			      rotation);
+		drm_dbg_kms(&dev_priv->drm,
+			    "RC support only with 0/180 degree rotation (%x)\n",
+			    rotation);
 		return -EINVAL;
 	}
 
 	if (rotation & DRM_MODE_REFLECT_X &&
 	    fb->modifier == DRM_FORMAT_MOD_LINEAR) {
-		DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "horizontal flip is not supported with linear surface formats\n");
 		return -EINVAL;
 	}
 
 	if (drm_rotation_90_or_270(rotation)) {
 		if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
 		    fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
-			DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "Y/Yf tiling required for 90/270!\n");
 			return -EINVAL;
 		}
 
@@ -2091,9 +2142,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 		case DRM_FORMAT_Y216:
 		case DRM_FORMAT_XVYU12_16161616:
 		case DRM_FORMAT_XVYU16161616:
-			DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
-				      drm_get_format_name(fb->format->format,
-							  &format_name));
+			drm_dbg_kms(&dev_priv->drm,
+				    "Unsupported pixel format %s for 90/270!\n",
+				    drm_get_format_name(fb->format->format,
+							&format_name));
 			return -EINVAL;
 		default:
 			break;
@@ -2109,7 +2161,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
 	     fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
 	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
 	     fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
-		DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Y/Yf tiling not supported in IF-ID mode\n");
+		return -EINVAL;
+	}
+
+	/* Wa_1606054188:tgl */
+	if (IS_TIGERLAKE(dev_priv) &&
+	    plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+	    intel_format_is_p01x(fb->format->format)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Source color keying not supported with P01x formats\n");
 		return -EINVAL;
 	}
 
@@ -2136,10 +2198,11 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
 	 */
 	if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
 	    (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
-		DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
-			      crtc_x + crtc_w < 4 ? "end" : "start",
-			      crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
-			      4, pipe_src_w - 4);
+		drm_dbg_kms(&dev_priv->drm,
+			    "requested plane X %s position %d invalid (valid range %d-%d)\n",
+			    crtc_x + crtc_w < 4 ? "end" : "start",
+			    crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+			    4, pipe_src_w - 4);
 		return -ERANGE;
 	}
 
@@ -2968,7 +3031,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
 	struct intel_plane *plane;
 	enum drm_plane_type plane_type;
 	unsigned int supported_rotations;
-	unsigned int possible_crtcs;
 	const u64 *modifiers;
 	const u32 *formats;
 	int num_formats;
@@ -3023,10 +3085,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
 	else
 		plane_type = DRM_PLANE_TYPE_OVERLAY;
 
-	possible_crtcs = BIT(pipe);
-
 	ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-				       possible_crtcs, plane_funcs,
+				       0, plane_funcs,
 				       formats, num_formats, modifiers,
 				       plane_type,
 				       "plane %d%c", plane_id + 1,
@@ -3077,7 +3137,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 {
 	struct intel_plane *plane;
 	const struct drm_plane_funcs *plane_funcs;
-	unsigned long possible_crtcs;
 	unsigned int supported_rotations;
 	const u64 *modifiers;
 	const u32 *formats;
@@ -3162,10 +3221,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 	plane->id = PLANE_SPRITE0 + sprite;
 	plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
 
-	possible_crtcs = BIT(pipe);
-
 	ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
-				       possible_crtcs, plane_funcs,
+				       0, plane_funcs,
 				       formats, num_formats, modifiers,
 				       DRM_PLANE_TYPE_OVERLAY,
 				       "sprite %c", sprite_name(pipe, sprite));
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 7773169..9b850c1 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -61,7 +61,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
 	lane_mask = intel_uncore_read(uncore,
 				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
 
-	WARN_ON(lane_mask == 0xffffffff);
+	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
 
 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -76,7 +76,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
 	pin_mask = intel_uncore_read(uncore,
 				     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
 
-	WARN_ON(pin_mask == 0xffffffff);
+	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
 
 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
 	       DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -120,7 +120,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
 	struct intel_uncore *uncore = &i915->uncore;
 	u32 val;
 
-	WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+	drm_WARN_ON(&i915->drm,
+		    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
 
 	val = intel_uncore_read(uncore,
 				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
@@ -181,8 +182,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
 				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
 
 	if (val == 0xffffffff) {
-		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
-			      dig_port->tc_port_name);
+		drm_dbg_kms(&i915->drm,
+			    "Port %s: PHY in TCCOLD, nothing connected\n",
+			    dig_port->tc_port_name);
 		return mask;
 	}
 
@@ -195,7 +197,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
 		mask |= BIT(TC_PORT_LEGACY);
 
 	/* The sink can be connected only in a single mode. */
-	if (!WARN_ON(hweight32(mask) > 1))
+	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
 		tc_port_fixup_legacy_flag(dig_port, mask);
 
 	return mask;
@@ -210,8 +212,9 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
 	val = intel_uncore_read(uncore,
 				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
 	if (val == 0xffffffff) {
-		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
-			      dig_port->tc_port_name);
+		drm_dbg_kms(&i915->drm,
+			    "Port %s: PHY in TCCOLD, assuming not complete\n",
+			    dig_port->tc_port_name);
 		return false;
 	}
 
@@ -228,8 +231,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
 	val = intel_uncore_read(uncore,
 				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
 	if (val == 0xffffffff) {
-		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
-			      dig_port->tc_port_name,
+		drm_dbg_kms(&i915->drm,
+			    "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+			    dig_port->tc_port_name,
 			      enableddisabled(enable));
 
 		return false;
@@ -243,8 +247,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
 			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
 
 	if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
-		DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
-			      dig_port->tc_port_name);
+		drm_dbg_kms(&i915->drm,
+			    "Port %s: PHY complete clear timed out\n",
+			    dig_port->tc_port_name);
 
 	return true;
 }
@@ -258,8 +263,9 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
 	val = intel_uncore_read(uncore,
 				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
 	if (val == 0xffffffff) {
-		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
-			      dig_port->tc_port_name);
+		drm_dbg_kms(&i915->drm,
+			    "Port %s: PHY in TCCOLD, assume safe mode\n",
+			    dig_port->tc_port_name);
 		return true;
 	}
 
@@ -409,16 +415,17 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
 	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
 
 	intel_display_power_flush_work(i915);
-	WARN_ON(intel_display_power_is_enabled(i915,
-					       intel_aux_power_domain(dig_port)));
+	drm_WARN_ON(&i915->drm,
+		    intel_display_power_is_enabled(i915,
+					intel_aux_power_domain(dig_port)));
 
 	icl_tc_phy_disconnect(dig_port);
 	icl_tc_phy_connect(dig_port, required_lanes);
 
-	DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
-		      dig_port->tc_port_name,
-		      tc_port_mode_name(old_tc_mode),
-		      tc_port_mode_name(dig_port->tc_mode));
+	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+		    dig_port->tc_port_name,
+		    tc_port_mode_name(old_tc_mode),
+		    tc_port_mode_name(dig_port->tc_mode));
 }
 
 static void
@@ -503,7 +510,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
 	    intel_tc_port_needs_reset(dig_port))
 		intel_tc_port_reset_mode(dig_port, required_lanes);
 
-	WARN_ON(dig_port->tc_lock_wakeref);
+	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
 	dig_port->tc_lock_wakeref = wakeref;
 }
 
@@ -550,7 +557,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
 	enum port port = dig_port->base.port;
 	enum tc_port tc_port = intel_port_to_tc(i915, port);
 
-	if (WARN_ON(tc_port == PORT_TC_NONE))
+	if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
 		return;
 
 	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index c75e0ce..d2e3a3a 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,7 +33,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "intel_connector.h"
@@ -907,7 +906,7 @@ static bool
 intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-	u32 tmp = I915_READ(TV_CTL);
+	u32 tmp = intel_de_read(dev_priv, TV_CTL);
 
 	*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
 
@@ -926,7 +925,8 @@ intel_enable_tv(struct intel_encoder *encoder,
 	intel_wait_for_vblank(dev_priv,
 			      to_intel_crtc(pipe_config->uapi.crtc)->pipe);
 
-	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+	intel_de_write(dev_priv, TV_CTL,
+		       intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
 }
 
 static void
@@ -937,7 +937,8 @@ intel_disable_tv(struct intel_encoder *encoder,
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+	intel_de_write(dev_priv, TV_CTL,
+		       intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
 }
 
 static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -1095,11 +1096,11 @@ intel_tv_get_config(struct intel_encoder *encoder,
 
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
 
-	tv_ctl = I915_READ(TV_CTL);
-	hctl1 = I915_READ(TV_H_CTL_1);
-	hctl3 = I915_READ(TV_H_CTL_3);
-	vctl1 = I915_READ(TV_V_CTL_1);
-	vctl2 = I915_READ(TV_V_CTL_2);
+	tv_ctl = intel_de_read(dev_priv, TV_CTL);
+	hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
+	hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
+	vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
+	vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
 
 	tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
 	tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
@@ -1134,17 +1135,17 @@ intel_tv_get_config(struct intel_encoder *encoder,
 		break;
 	}
 
-	tmp = I915_READ(TV_WIN_POS);
+	tmp = intel_de_read(dev_priv, TV_WIN_POS);
 	xpos = tmp >> 16;
 	ypos = tmp & 0xffff;
 
-	tmp = I915_READ(TV_WIN_SIZE);
+	tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
 	xsize = tmp >> 16;
 	ysize = tmp & 0xffff;
 
 	intel_tv_mode_to_mode(&mode, &tv_mode);
 
-	DRM_DEBUG_KMS("TV mode:\n");
+	drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
 	drm_mode_debug_printmodeline(&mode);
 
 	intel_tv_scale_mode_horiz(&mode, hdisplay,
@@ -1200,7 +1201,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
 
 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
-	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+	drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
 	pipe_config->pipe_bpp = 8*3;
 
 	pipe_config->port_clock = tv_mode->clock;
@@ -1215,7 +1216,8 @@ intel_tv_compute_config(struct intel_encoder *encoder,
 		extra = adjusted_mode->crtc_vdisplay - vdisplay;
 
 		if (extra < 0) {
-			DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n");
+			drm_dbg_kms(&dev_priv->drm,
+				    "No vertical scaling for >1024 pixel wide modes\n");
 			return -EINVAL;
 		}
 
@@ -1248,7 +1250,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
 		tv_conn_state->bypass_vfilter = false;
 	}
 
-	DRM_DEBUG_KMS("TV mode:\n");
+	drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
 	drm_mode_debug_printmodeline(adjusted_mode);
 
 	/*
@@ -1380,16 +1382,16 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
 	vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
 		(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
 
-	I915_WRITE(TV_H_CTL_1, hctl1);
-	I915_WRITE(TV_H_CTL_2, hctl2);
-	I915_WRITE(TV_H_CTL_3, hctl3);
-	I915_WRITE(TV_V_CTL_1, vctl1);
-	I915_WRITE(TV_V_CTL_2, vctl2);
-	I915_WRITE(TV_V_CTL_3, vctl3);
-	I915_WRITE(TV_V_CTL_4, vctl4);
-	I915_WRITE(TV_V_CTL_5, vctl5);
-	I915_WRITE(TV_V_CTL_6, vctl6);
-	I915_WRITE(TV_V_CTL_7, vctl7);
+	intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
+	intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
+	intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
+	intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
+	intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
+	intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
+	intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
+	intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
+	intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
+	intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
 }
 
 static void set_color_conversion(struct drm_i915_private *dev_priv,
@@ -1398,18 +1400,18 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
 	if (!color_conversion)
 		return;
 
-	I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
-		   color_conversion->gy);
-	I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
-		   color_conversion->ay);
-	I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
-		   color_conversion->gu);
-	I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
-		   color_conversion->au);
-	I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
-		   color_conversion->gv);
-	I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
-		   color_conversion->av);
+	intel_de_write(dev_priv, TV_CSC_Y,
+		       (color_conversion->ry << 16) | color_conversion->gy);
+	intel_de_write(dev_priv, TV_CSC_Y2,
+		       (color_conversion->by << 16) | color_conversion->ay);
+	intel_de_write(dev_priv, TV_CSC_U,
+		       (color_conversion->ru << 16) | color_conversion->gu);
+	intel_de_write(dev_priv, TV_CSC_U2,
+		       (color_conversion->bu << 16) | color_conversion->au);
+	intel_de_write(dev_priv, TV_CSC_V,
+		       (color_conversion->rv << 16) | color_conversion->gv);
+	intel_de_write(dev_priv, TV_CSC_V2,
+		       (color_conversion->bv << 16) | color_conversion->av);
 }
 
 static void intel_tv_pre_enable(struct intel_encoder *encoder,
@@ -1434,7 +1436,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 	if (!tv_mode)
 		return;	/* can't happen (mode_prepare prevents this) */
 
-	tv_ctl = I915_READ(TV_CTL);
+	tv_ctl = intel_de_read(dev_priv, TV_CTL);
 	tv_ctl &= TV_CTL_SAVE;
 
 	switch (intel_tv->type) {
@@ -1511,21 +1513,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 
 	set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
 
-	I915_WRITE(TV_SC_CTL_1, scctl1);
-	I915_WRITE(TV_SC_CTL_2, scctl2);
-	I915_WRITE(TV_SC_CTL_3, scctl3);
+	intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
+	intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
+	intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
 
 	set_color_conversion(dev_priv, color_conversion);
 
 	if (INTEL_GEN(dev_priv) >= 4)
-		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+		intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
 	else
-		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+		intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
 
 	if (video_levels)
-		I915_WRITE(TV_CLR_LEVEL,
-			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
-			    (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+		intel_de_write(dev_priv, TV_CLR_LEVEL,
+			       ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
 
 	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
 
@@ -1533,7 +1534,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 	tv_filter_ctl = TV_AUTO_SCALE;
 	if (tv_conn_state->bypass_vfilter)
 		tv_filter_ctl |= TV_V_FILTER_BYPASS;
-	I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl);
+	intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
 
 	xsize = tv_mode->hblank_start - tv_mode->hblank_end;
 	ysize = intel_tv_mode_vdisplay(tv_mode);
@@ -1544,20 +1545,25 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
 		  conn_state->tv.margins.right);
 	ysize -= (tv_conn_state->margins.top +
 		  tv_conn_state->margins.bottom);
-	I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
-	I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+	intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
+	intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
 
 	j = 0;
 	for (i = 0; i < 60; i++)
-		I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
+		intel_de_write(dev_priv, TV_H_LUMA(i),
+			       tv_mode->filter_table[j++]);
 	for (i = 0; i < 60; i++)
-		I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
+		intel_de_write(dev_priv, TV_H_CHROMA(i),
+			       tv_mode->filter_table[j++]);
 	for (i = 0; i < 43; i++)
-		I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
+		intel_de_write(dev_priv, TV_V_LUMA(i),
+			       tv_mode->filter_table[j++]);
 	for (i = 0; i < 43; i++)
-		I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
-	I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
-	I915_WRITE(TV_CTL, tv_ctl);
+		intel_de_write(dev_priv, TV_V_CHROMA(i),
+			       tv_mode->filter_table[j++]);
+	intel_de_write(dev_priv, TV_DAC,
+		       intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
+	intel_de_write(dev_priv, TV_CTL, tv_ctl);
 }
 
 static int
@@ -1581,8 +1587,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 		spin_unlock_irq(&dev_priv->irq_lock);
 	}
 
-	save_tv_dac = tv_dac = I915_READ(TV_DAC);
-	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+	save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
+	save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
 
 	/* Poll for TV detection */
 	tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
@@ -1608,15 +1614,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 		tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
 			    TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
 
-	I915_WRITE(TV_CTL, tv_ctl);
-	I915_WRITE(TV_DAC, tv_dac);
-	POSTING_READ(TV_DAC);
+	intel_de_write(dev_priv, TV_CTL, tv_ctl);
+	intel_de_write(dev_priv, TV_DAC, tv_dac);
+	intel_de_posting_read(dev_priv, TV_DAC);
 
 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 	type = -1;
-	tv_dac = I915_READ(TV_DAC);
-	DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+	tv_dac = intel_de_read(dev_priv, TV_DAC);
+	drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
 	/*
 	 *  A B C
 	 *  0 1 1 Composite
@@ -1624,22 +1630,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
 	 *  0 0 0 Component
 	 */
 	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
-		DRM_DEBUG_KMS("Detected Composite TV connection\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Detected Composite TV connection\n");
 		type = DRM_MODE_CONNECTOR_Composite;
 	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
-		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Detected S-Video TV connection\n");
 		type = DRM_MODE_CONNECTOR_SVIDEO;
 	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
-		DRM_DEBUG_KMS("Detected Component TV connection\n");
+		drm_dbg_kms(&dev_priv->drm,
+			    "Detected Component TV connection\n");
 		type = DRM_MODE_CONNECTOR_Component;
 	} else {
-		DRM_DEBUG_KMS("Unrecognised TV connection\n");
+		drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
 		type = -1;
 	}
 
-	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
-	I915_WRITE(TV_CTL, save_tv_ctl);
-	POSTING_READ(TV_CTL);
+	intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
+	intel_de_posting_read(dev_priv, TV_CTL);
 
 	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -1794,7 +1803,7 @@ intel_tv_get_modes(struct drm_connector *connector)
 		 */
 		intel_tv_mode_to_mode(mode, tv_mode);
 		if (count == 0) {
-			DRM_DEBUG_KMS("TV mode:\n");
+			drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
 			drm_mode_debug_printmodeline(mode);
 		}
 		intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
@@ -1870,11 +1879,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
 	int i, initial_mode = 0;
 	struct drm_connector_state *state;
 
-	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+	if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
 		return;
 
 	if (!intel_bios_is_tv_present(dev_priv)) {
-		DRM_DEBUG_KMS("Integrated TV is not present.\n");
+		drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
 		return;
 	}
 
@@ -1882,15 +1891,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
 	 * Sanity check the TV output by checking to see if the
 	 * DAC register holds a value
 	 */
-	save_tv_dac = I915_READ(TV_DAC);
+	save_tv_dac = intel_de_read(dev_priv, TV_DAC);
 
-	I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
-	tv_dac_on = I915_READ(TV_DAC);
+	intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+	tv_dac_on = intel_de_read(dev_priv, TV_DAC);
 
-	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
-	tv_dac_off = I915_READ(TV_DAC);
+	intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	tv_dac_off = intel_de_read(dev_priv, TV_DAC);
 
-	I915_WRITE(TV_DAC, save_tv_dac);
+	intel_de_write(dev_priv, TV_DAC, save_tv_dac);
 
 	/*
 	 * If the register does not hold the state change enable
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 4d0c23b..05c7cbe 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -111,7 +111,7 @@ enum bdb_block_id {
 	BDB_LVDS_LFP_DATA_PTRS		= 41,
 	BDB_LVDS_LFP_DATA		= 42,
 	BDB_LVDS_BACKLIGHT		= 43,
-	BDB_LVDS_POWER			= 44,
+	BDB_LFP_POWER			= 44,
 	BDB_MIPI_CONFIG			= 52,
 	BDB_MIPI_SEQUENCE		= 53,
 	BDB_COMPRESSION_PARAMETERS	= 56,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 9e6aaa3..95ad87d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,8 +6,6 @@
  *         Manasi Navare <manasi.d.navare@intel.com>
  */
 
-#include <drm/i915_drm.h>
-
 #include "i915_drv.h"
 #include "intel_display_types.h"
 #include "intel_dsi.h"
@@ -374,7 +372,7 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
 		return false;
 
 	/* There's no pipe A DSC engine on ICL */
-	WARN_ON(crtc->pipe == PIPE_A);
+	drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
 
 	return true;
 }
@@ -518,119 +516,149 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
 		pps_val |= DSC_422_ENABLE;
 	if (vdsc_cfg->vbr_enable)
 		pps_val |= DSC_VBR_ENABLE;
-	DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_1 registers */
 	pps_val = 0;
 	pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
-	DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_2 registers */
 	pps_val = 0;
 	pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
 		DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
-	DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_3 registers */
 	pps_val = 0;
 	pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
 		DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
-	DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_4 registers */
 	pps_val = 0;
 	pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
 		DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
-	DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_5 registers */
 	pps_val = 0;
 	pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
 		DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
-	DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_6 registers */
@@ -639,80 +667,100 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
 		DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
 		DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
 		DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
-	DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_7 registers */
 	pps_val = 0;
 	pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
 		DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
-	DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_8 registers */
 	pps_val = 0;
 	pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
 		DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
-	DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_9 registers */
 	pps_val = 0;
 	pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
 		DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
-	DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+			intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9,
+				       pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+				       pps_val);
 	}
 
 	/* Populate PICTURE_PARAMETER_SET_10 registers */
@@ -721,20 +769,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
 		DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
 		DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
 		DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
-	DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+			intel_de_write(dev_priv,
+				       DSCC_PICTURE_PARAMETER_SET_10, pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+				       pps_val);
 	}
 
 	/* Populate Picture parameter set 16 */
@@ -744,20 +797,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
 				   vdsc_cfg->slice_width) |
 		DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
 					vdsc_cfg->slice_height);
-	DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+	drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+		intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
+			       pps_val);
 		/*
 		 * If 2 VDSC instances are needed, configure PPS for second
 		 * VDSC
 		 */
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+			intel_de_write(dev_priv,
+				       DSCC_PICTURE_PARAMETER_SET_16, pps_val);
 	} else {
-		I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe),
+			       pps_val);
 		if (crtc_state->dsc.dsc_split)
-			I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
-				   pps_val);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+				       pps_val);
 	}
 
 	/* Populate the RC_BUF_THRESH registers */
@@ -766,42 +824,50 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
 		rc_buf_thresh_dword[i / 4] |=
 			(u32)(vdsc_cfg->rc_buf_thresh[i] <<
 			      BITS_PER_BYTE * (i % 4));
-		DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+		drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
 			 rc_buf_thresh_dword[i / 4]);
 	}
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
-		I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
-		I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
-		I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+		intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+			       rc_buf_thresh_dword[0]);
+		intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+			       rc_buf_thresh_dword[1]);
+		intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+			       rc_buf_thresh_dword[2]);
+		intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+			       rc_buf_thresh_dword[3]);
 		if (crtc_state->dsc.dsc_split) {
-			I915_WRITE(DSCC_RC_BUF_THRESH_0,
-				   rc_buf_thresh_dword[0]);
-			I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
-				   rc_buf_thresh_dword[1]);
-			I915_WRITE(DSCC_RC_BUF_THRESH_1,
-				   rc_buf_thresh_dword[2]);
-			I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
-				   rc_buf_thresh_dword[3]);
+			intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+				       rc_buf_thresh_dword[0]);
+			intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+				       rc_buf_thresh_dword[1]);
+			intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+				       rc_buf_thresh_dword[2]);
+			intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+				       rc_buf_thresh_dword[3]);
 		}
 	} else {
-		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
-			   rc_buf_thresh_dword[0]);
-		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
-			   rc_buf_thresh_dword[1]);
-		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
-			   rc_buf_thresh_dword[2]);
-		I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
-			   rc_buf_thresh_dword[3]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+			       rc_buf_thresh_dword[0]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+			       rc_buf_thresh_dword[1]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+			       rc_buf_thresh_dword[2]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+			       rc_buf_thresh_dword[3]);
 		if (crtc_state->dsc.dsc_split) {
-			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
-				   rc_buf_thresh_dword[0]);
-			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
-				   rc_buf_thresh_dword[1]);
-			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
-				   rc_buf_thresh_dword[2]);
-			I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
-				   rc_buf_thresh_dword[3]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_BUF_THRESH_0(pipe),
+				       rc_buf_thresh_dword[0]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+				       rc_buf_thresh_dword[1]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_BUF_THRESH_1(pipe),
+				       rc_buf_thresh_dword[2]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+				       rc_buf_thresh_dword[3]);
 		}
 	}
 
@@ -815,78 +881,94 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
 				RC_MAX_QP_SHIFT) |
 			       (vdsc_cfg->rc_range_params[i].range_min_qp <<
 				RC_MIN_QP_SHIFT)) << 16 * (i % 2));
-		DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+		drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
 			 rc_range_params_dword[i / 2]);
 	}
 	if (!is_pipe_dsc(crtc_state)) {
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
-			   rc_range_params_dword[0]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
-			   rc_range_params_dword[1]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
-			   rc_range_params_dword[2]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
-			   rc_range_params_dword[3]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
-			   rc_range_params_dword[4]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
-			   rc_range_params_dword[5]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
-			   rc_range_params_dword[6]);
-		I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
-			   rc_range_params_dword[7]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+			       rc_range_params_dword[0]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+			       rc_range_params_dword[1]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+			       rc_range_params_dword[2]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+			       rc_range_params_dword[3]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+			       rc_range_params_dword[4]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+			       rc_range_params_dword[5]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+			       rc_range_params_dword[6]);
+		intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+			       rc_range_params_dword[7]);
 		if (crtc_state->dsc.dsc_split) {
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
-				   rc_range_params_dword[0]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
-				   rc_range_params_dword[1]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
-				   rc_range_params_dword[2]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
-				   rc_range_params_dword[3]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
-				   rc_range_params_dword[4]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
-				   rc_range_params_dword[5]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
-				   rc_range_params_dword[6]);
-			I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
-				   rc_range_params_dword[7]);
+			intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+				       rc_range_params_dword[0]);
+			intel_de_write(dev_priv,
+				       DSCC_RC_RANGE_PARAMETERS_0_UDW,
+				       rc_range_params_dword[1]);
+			intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+				       rc_range_params_dword[2]);
+			intel_de_write(dev_priv,
+				       DSCC_RC_RANGE_PARAMETERS_1_UDW,
+				       rc_range_params_dword[3]);
+			intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+				       rc_range_params_dword[4]);
+			intel_de_write(dev_priv,
+				       DSCC_RC_RANGE_PARAMETERS_2_UDW,
+				       rc_range_params_dword[5]);
+			intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+				       rc_range_params_dword[6]);
+			intel_de_write(dev_priv,
+				       DSCC_RC_RANGE_PARAMETERS_3_UDW,
+				       rc_range_params_dword[7]);
 		}
 	} else {
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
-			   rc_range_params_dword[0]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
-			   rc_range_params_dword[1]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
-			   rc_range_params_dword[2]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
-			   rc_range_params_dword[3]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
-			   rc_range_params_dword[4]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
-			   rc_range_params_dword[5]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
-			   rc_range_params_dword[6]);
-		I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
-			   rc_range_params_dword[7]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+			       rc_range_params_dword[0]);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+			       rc_range_params_dword[1]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+			       rc_range_params_dword[2]);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+			       rc_range_params_dword[3]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+			       rc_range_params_dword[4]);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+			       rc_range_params_dword[5]);
+		intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+			       rc_range_params_dword[6]);
+		intel_de_write(dev_priv,
+			       ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+			       rc_range_params_dword[7]);
 		if (crtc_state->dsc.dsc_split) {
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
-				   rc_range_params_dword[0]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
-				   rc_range_params_dword[1]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
-				   rc_range_params_dword[2]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
-				   rc_range_params_dword[3]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
-				   rc_range_params_dword[4]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
-				   rc_range_params_dword[5]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
-				   rc_range_params_dword[6]);
-			I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
-				   rc_range_params_dword[7]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+				       rc_range_params_dword[0]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+				       rc_range_params_dword[1]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+				       rc_range_params_dword[2]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+				       rc_range_params_dword[3]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+				       rc_range_params_dword[4]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+				       rc_range_params_dword[5]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+				       rc_range_params_dword[6]);
+			intel_de_write(dev_priv,
+				       ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+				       rc_range_params_dword[7]);
 		}
 	}
 }
@@ -912,11 +994,11 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
 		return;
 
 	if (!is_pipe_dsc(crtc_state)) {
-		dss_ctl1 = I915_READ(DSS_CTL1);
-		dss_ctl2 = I915_READ(DSS_CTL2);
+		dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+		dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
 	} else {
-		dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
-		dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+		dss_ctl1 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+		dss_ctl2 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL2(pipe));
 	}
 
 	crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
@@ -930,9 +1012,10 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
 
 	/* PPS1 */
 	if (!is_pipe_dsc(crtc_state))
-		val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+		val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
 	else
-		val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+		val = intel_de_read(dev_priv,
+				    ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
 	vdsc_cfg->bits_per_pixel = val;
 	crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
 out:
@@ -1013,8 +1096,8 @@ void intel_dsc_enable(struct intel_encoder *encoder,
 		dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
 		dss_ctl1_val |= JOINER_ENABLE;
 	}
-	I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
-	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+	intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
+	intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
 }
 
 void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
@@ -1035,17 +1118,17 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
 		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
 		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
 	}
-	dss_ctl1_val = I915_READ(dss_ctl1_reg);
+	dss_ctl1_val = intel_de_read(dev_priv, dss_ctl1_reg);
 	if (dss_ctl1_val & JOINER_ENABLE)
 		dss_ctl1_val &= ~JOINER_ENABLE;
-	I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+	intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
 
-	dss_ctl2_val = I915_READ(dss_ctl2_reg);
+	dss_ctl2_val = intel_de_read(dev_priv, dss_ctl2_reg);
 	if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
 	    dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
 		dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
 				  RIGHT_BRANCH_VDSC_ENABLE);
-	I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+	intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
 
 	/* Disable Power wells for VDSC/joining */
 	intel_display_power_put_unchecked(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 2ff7293..be33369 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -9,6 +9,7 @@
 #include <drm/i915_drm.h>
 
 #include "i915_drv.h"
+#include "intel_de.h"
 #include "intel_vga.h"
 
 static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
@@ -36,16 +37,17 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
 	vga_put(pdev, VGA_RSRC_LEGACY_IO);
 	udelay(300);
 
-	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
-	POSTING_READ(vga_reg);
+	intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE);
+	intel_de_posting_read(dev_priv, vga_reg);
 }
 
 void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
 {
 	i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
 
-	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
-		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+	if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) {
+		drm_dbg_kms(&dev_priv->drm,
+			    "Something enabled VGA plane, disabling it\n");
 		intel_vga_disable(dev_priv);
 	}
 }
@@ -98,7 +100,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
 	u16 gmch_ctrl;
 
 	if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
-		DRM_ERROR("failed to read control word\n");
+		drm_err(&i915->drm, "failed to read control word\n");
 		return -EIO;
 	}
 
@@ -111,7 +113,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
 
 	if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
-		DRM_ERROR("failed to write control word\n");
+		drm_err(&i915->drm, "failed to write control word\n");
 		return -EIO;
 	}
 
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index daf4fc3..f4c362d 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,7 +85,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 
 	if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
 				  mask, 100))
-		DRM_ERROR("DPI FIFOs are not empty\n");
+		drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
 }
 
 static void write_data(struct drm_i915_private *dev_priv,
@@ -100,7 +100,7 @@ static void write_data(struct drm_i915_private *dev_priv,
 		for (j = 0; j < min_t(u32, len - i, 4); j++)
 			val |= *data++ << 8 * j;
 
-		I915_WRITE(reg, val);
+		intel_de_write(dev_priv, reg, val);
 	}
 }
 
@@ -111,7 +111,7 @@ static void read_data(struct drm_i915_private *dev_priv,
 	u32 i, j;
 
 	for (i = 0; i < len; i += 4) {
-		u32 val = I915_READ(reg);
+		u32 val = intel_de_read(dev_priv, reg);
 
 		for (j = 0; j < min_t(u32, len - i, 4); j++)
 			*data++ = val >> 8 * j;
@@ -154,29 +154,34 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 	if (packet.payload_length) {
 		if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
 					    data_mask, 50))
-			DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+			drm_err(&dev_priv->drm,
+				"Timeout waiting for HS/LP DATA FIFO !full\n");
 
 		write_data(dev_priv, data_reg, packet.payload,
 			   packet.payload_length);
 	}
 
 	if (msg->rx_len) {
-		I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+		intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+			       GEN_READ_DATA_AVAIL);
 	}
 
 	if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
 				    ctrl_mask, 50)) {
-		DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+		drm_err(&dev_priv->drm,
+			"Timeout waiting for HS/LP CTRL FIFO !full\n");
 	}
 
-	I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+	intel_de_write(dev_priv, ctrl_reg,
+		       header[2] << 16 | header[1] << 8 | header[0]);
 
 	/* ->rx_len is set only for reads */
 	if (msg->rx_len) {
 		data_mask = GEN_READ_DATA_AVAIL;
 		if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
 					  data_mask, 50))
-			DRM_ERROR("Timeout waiting for read data.\n");
+			drm_err(&dev_priv->drm,
+				"Timeout waiting for read data.\n");
 
 		read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
 	}
@@ -223,17 +228,19 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
 		cmd |= DPI_LP_MODE;
 
 	/* clear bit */
-	I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+	intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
 
 	/* XXX: old code skips write if control unchanged */
-	if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
-		DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
+	if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
+		drm_dbg_kms(&dev_priv->drm,
+			    "Same special packet %02x twice in a row.\n", cmd);
 
-	I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+	intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
 
 	mask = SPL_PKT_SENT_INTERRUPT;
 	if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
-		DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+		drm_err(&dev_priv->drm,
+			"Video mode command 0x%08x send failed.\n", cmd);
 
 	return 0;
 }
@@ -265,7 +272,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
 	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
 	int ret;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
 
 	if (fixed_mode) {
@@ -328,36 +335,37 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
 	 * Power ON MIPI IO first and then write into IO reset and LP wake bits
 	 */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(MIPI_CTRL(port));
-		I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE);
+		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+		intel_de_write(dev_priv, MIPI_CTRL(port),
+			       tmp | GLK_MIPIIO_ENABLE);
 	}
 
 	/* Put the IO into reset */
-	tmp = I915_READ(MIPI_CTRL(PORT_A));
+	tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
 	tmp &= ~GLK_MIPIIO_RESET_RELEASED;
-	I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+	intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
 
 	/* Program LP Wake */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(MIPI_CTRL(port));
-		if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+		if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
 			tmp &= ~GLK_LP_WAKE;
 		else
 			tmp |= GLK_LP_WAKE;
-		I915_WRITE(MIPI_CTRL(port), tmp);
+		intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
 	}
 
 	/* Wait for Pwr ACK */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
 					  GLK_MIPIIO_PORT_POWERED, 20))
-			DRM_ERROR("MIPIO port is powergated\n");
+			drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
 	}
 
 	/* Check for cold boot scenario */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		cold_boot |=
-			!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
+			!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
 	}
 
 	return cold_boot;
@@ -374,48 +382,49 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
 					  GLK_PHY_STATUS_PORT_READY, 20))
-			DRM_ERROR("PHY is not ON\n");
+			drm_err(&dev_priv->drm, "PHY is not ON\n");
 	}
 
 	/* Get IO out of reset */
-	val = I915_READ(MIPI_CTRL(PORT_A));
-	I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED);
+	val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+	intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+		       val | GLK_MIPIIO_RESET_RELEASED);
 
 	/* Get IO out of Low power state*/
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
-			val = I915_READ(MIPI_DEVICE_READY(port));
+		if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
+			val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
 			val &= ~ULPS_STATE_MASK;
 			val |= DEVICE_READY;
-			I915_WRITE(MIPI_DEVICE_READY(port), val);
+			intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 			usleep_range(10, 15);
 		} else {
 			/* Enter ULPS */
-			val = I915_READ(MIPI_DEVICE_READY(port));
+			val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
 			val &= ~ULPS_STATE_MASK;
 			val |= (ULPS_STATE_ENTER | DEVICE_READY);
-			I915_WRITE(MIPI_DEVICE_READY(port), val);
+			intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 
 			/* Wait for ULPS active */
 			if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
 						    GLK_ULPS_NOT_ACTIVE, 20))
-				DRM_ERROR("ULPS not active\n");
+				drm_err(&dev_priv->drm, "ULPS not active\n");
 
 			/* Exit ULPS */
-			val = I915_READ(MIPI_DEVICE_READY(port));
+			val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
 			val &= ~ULPS_STATE_MASK;
 			val |= (ULPS_STATE_EXIT | DEVICE_READY);
-			I915_WRITE(MIPI_DEVICE_READY(port), val);
+			intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 
 			/* Enter Normal Mode */
-			val = I915_READ(MIPI_DEVICE_READY(port));
+			val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
 			val &= ~ULPS_STATE_MASK;
 			val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
-			I915_WRITE(MIPI_DEVICE_READY(port), val);
+			intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 
-			val = I915_READ(MIPI_CTRL(port));
+			val = intel_de_read(dev_priv, MIPI_CTRL(port));
 			val &= ~GLK_LP_WAKE;
-			I915_WRITE(MIPI_CTRL(port), val);
+			intel_de_write(dev_priv, MIPI_CTRL(port), val);
 		}
 	}
 
@@ -423,14 +432,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
 					  GLK_DATA_LANE_STOP_STATE, 20))
-			DRM_ERROR("Date lane not in STOP state\n");
+			drm_err(&dev_priv->drm,
+				"Date lane not in STOP state\n");
 	}
 
 	/* Wait for AFE LATCH */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
 					  AFE_LATCHOUT, 20))
-			DRM_ERROR("D-PHY not entering LP-11 state\n");
+			drm_err(&dev_priv->drm,
+				"D-PHY not entering LP-11 state\n");
 	}
 }
 
@@ -441,23 +452,24 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 	enum port port;
 	u32 val;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	/* Enable MIPI PHY transparent latch */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		val = I915_READ(BXT_MIPI_PORT_CTRL(port));
-		I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
+		val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+		intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
+			       val | LP_OUTPUT_HOLD);
 		usleep_range(2000, 2500);
 	}
 
 	/* Clear ULPS and set device ready */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		val = I915_READ(MIPI_DEVICE_READY(port));
+		val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
 		val &= ~ULPS_STATE_MASK;
-		I915_WRITE(MIPI_DEVICE_READY(port), val);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 		usleep_range(2000, 2500);
 		val |= DEVICE_READY;
-		I915_WRITE(MIPI_DEVICE_READY(port), val);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 	}
 }
 
@@ -468,7 +480,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
 	enum port port;
 	u32 val;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	vlv_flisdsi_get(dev_priv);
 	/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -481,21 +493,25 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
 
 	for_each_dsi_port(port, intel_dsi->ports) {
 
-		I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+			       ULPS_STATE_ENTER);
 		usleep_range(2500, 3000);
 
 		/* Enable MIPI PHY transparent latch
 		 * Common bit for both MIPI Port A & MIPI Port C
 		 * No similar bit in MIPI Port C reg
 		 */
-		val = I915_READ(MIPI_PORT_CTRL(PORT_A));
-		I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+		val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
+		intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
+			       val | LP_OUTPUT_HOLD);
 		usleep_range(1000, 1500);
 
-		I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+			       ULPS_STATE_EXIT);
 		usleep_range(2500, 3000);
 
-		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+			       DEVICE_READY);
 		usleep_range(2500, 3000);
 	}
 }
@@ -521,24 +537,25 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
 
 	/* Enter ULPS */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		val = I915_READ(MIPI_DEVICE_READY(port));
+		val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
 		val &= ~ULPS_STATE_MASK;
 		val |= (ULPS_STATE_ENTER | DEVICE_READY);
-		I915_WRITE(MIPI_DEVICE_READY(port), val);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
 	}
 
 	/* Wait for MIPI PHY status bit to unset */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
 					    GLK_PHY_STATUS_PORT_READY, 20))
-			DRM_ERROR("PHY is not turning OFF\n");
+			drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
 	}
 
 	/* Wait for Pwr ACK bit to unset */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
 					    GLK_MIPIIO_PORT_POWERED, 20))
-			DRM_ERROR("MIPI IO Port is not powergated\n");
+			drm_err(&dev_priv->drm,
+				"MIPI IO Port is not powergated\n");
 	}
 }
 
@@ -550,22 +567,22 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
 	u32 tmp;
 
 	/* Put the IO into reset */
-	tmp = I915_READ(MIPI_CTRL(PORT_A));
+	tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
 	tmp &= ~GLK_MIPIIO_RESET_RELEASED;
-	I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+	intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
 
 	/* Wait for MIPI PHY status bit to unset */
 	for_each_dsi_port(port, intel_dsi->ports) {
 		if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
 					    GLK_PHY_STATUS_PORT_READY, 20))
-			DRM_ERROR("PHY is not turning OFF\n");
+			drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
 	}
 
 	/* Clear MIPI mode */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		tmp = I915_READ(MIPI_CTRL(port));
+		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
 		tmp &= ~GLK_MIPIIO_ENABLE;
-		I915_WRITE(MIPI_CTRL(port), tmp);
+		intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
 	}
 }
 
@@ -581,23 +598,23 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 	enum port port;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 	for_each_dsi_port(port, intel_dsi->ports) {
 		/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
 		i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
 		u32 val;
 
-		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
-							ULPS_STATE_ENTER);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+			       DEVICE_READY | ULPS_STATE_ENTER);
 		usleep_range(2000, 2500);
 
-		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
-							ULPS_STATE_EXIT);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+			       DEVICE_READY | ULPS_STATE_EXIT);
 		usleep_range(2000, 2500);
 
-		I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
-							ULPS_STATE_ENTER);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+			       DEVICE_READY | ULPS_STATE_ENTER);
 		usleep_range(2000, 2500);
 
 		/*
@@ -607,14 +624,14 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
 		if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
 		    intel_de_wait_for_clear(dev_priv, port_ctrl,
 					    AFE_LATCHOUT, 30))
-			DRM_ERROR("DSI LP not going Low\n");
+			drm_err(&dev_priv->drm, "DSI LP not going Low\n");
 
 		/* Disable MIPI PHY transparent latch */
-		val = I915_READ(port_ctrl);
-		I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
+		val = intel_de_read(dev_priv, port_ctrl);
+		intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
 		usleep_range(1000, 1500);
 
-		I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
 		usleep_range(2000, 2500);
 	}
 }
@@ -631,18 +648,20 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
 		u32 temp;
 		if (IS_GEN9_LP(dev_priv)) {
 			for_each_dsi_port(port, intel_dsi->ports) {
-				temp = I915_READ(MIPI_CTRL(port));
+				temp = intel_de_read(dev_priv,
+						     MIPI_CTRL(port));
 				temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
 					intel_dsi->pixel_overlap <<
 					BXT_PIXEL_OVERLAP_CNT_SHIFT;
-				I915_WRITE(MIPI_CTRL(port), temp);
+				intel_de_write(dev_priv, MIPI_CTRL(port),
+					       temp);
 			}
 		} else {
-			temp = I915_READ(VLV_CHICKEN_3);
+			temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
 			temp &= ~PIXEL_OVERLAP_CNT_MASK |
 					intel_dsi->pixel_overlap <<
 					PIXEL_OVERLAP_CNT_SHIFT;
-			I915_WRITE(VLV_CHICKEN_3, temp);
+			intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
 		}
 	}
 
@@ -651,7 +670,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
 		u32 temp;
 
-		temp = I915_READ(port_ctrl);
+		temp = intel_de_read(dev_priv, port_ctrl);
 
 		temp &= ~LANE_CONFIGURATION_MASK;
 		temp &= ~DUAL_LINK_MODE_MASK;
@@ -671,8 +690,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
 			temp |= DITHERING_ENABLE;
 
 		/* assert ip_tg_enable signal */
-		I915_WRITE(port_ctrl, temp | DPI_ENABLE);
-		POSTING_READ(port_ctrl);
+		intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
+		intel_de_posting_read(dev_priv, port_ctrl);
 	}
 }
 
@@ -689,9 +708,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
 		u32 temp;
 
 		/* de-assert ip_tg_enable signal */
-		temp = I915_READ(port_ctrl);
-		I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
-		POSTING_READ(port_ctrl);
+		temp = intel_de_read(dev_priv, port_ctrl);
+		intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+		intel_de_posting_read(dev_priv, port_ctrl);
 	}
 }
 
@@ -753,7 +772,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 	u32 val;
 	bool glk_cold_boot = false;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
@@ -771,22 +790,22 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 
 	if (IS_BROXTON(dev_priv)) {
 		/* Add MIPI IO reset programming for modeset */
-		val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
-		I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
-					val | MIPIO_RST_CTRL);
+		val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+		intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+			       val | MIPIO_RST_CTRL);
 
 		/* Power up DSI regulator */
-		I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
-		I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+		intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+		intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
 	}
 
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		u32 val;
 
 		/* Disable DPOunit clock gating, can stall pipe */
-		val = I915_READ(DSPCLK_GATE_D);
+		val = intel_de_read(dev_priv, DSPCLK_GATE_D);
 		val |= DPOUNIT_CLOCK_GATE_DISABLE;
-		I915_WRITE(DSPCLK_GATE_D, val);
+		intel_de_write(dev_priv, DSPCLK_GATE_D, val);
 	}
 
 	if (!IS_GEMINILAKE(dev_priv))
@@ -820,7 +839,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 	 * recommendation, port should be enabled befor plane & pipe */
 	if (is_cmd_mode(intel_dsi)) {
 		for_each_dsi_port(port, intel_dsi->ports)
-			I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+			intel_de_write(dev_priv,
+				       MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
 		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
 		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
 	} else {
@@ -838,6 +858,15 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
 }
 
+static void bxt_dsi_enable(struct intel_encoder *encoder,
+			   const struct intel_crtc_state *crtc_state,
+			   const struct drm_connector_state *conn_state)
+{
+	WARN_ON(crtc_state->has_pch_encoder);
+
+	intel_crtc_vblank_on(crtc_state);
+}
+
 /*
  * DSI port disable has to be done after pipe and plane disable, so we do it in
  * the post_disable hook.
@@ -886,7 +915,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 	enum port port;
 	u32 val;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	if (IS_GEN9_LP(dev_priv)) {
 		intel_crtc_vblank_off(old_crtc_state);
@@ -917,13 +946,14 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 
 	if (IS_BROXTON(dev_priv)) {
 		/* Power down DSI regulator to save power */
-		I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
-		I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
+		intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+		intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+			       HS_IO_CTRL_SELECT);
 
 		/* Add MIPI IO reset programming for modeset */
-		val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
-		I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
-				val & ~MIPIO_RST_CTRL);
+		val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+		intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+			       val & ~MIPIO_RST_CTRL);
 	}
 
 	if (IS_GEN9_LP(dev_priv)) {
@@ -933,9 +963,9 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 
 		vlv_dsi_pll_disable(encoder);
 
-		val = I915_READ(DSPCLK_GATE_D);
+		val = intel_de_read(dev_priv, DSPCLK_GATE_D);
 		val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
-		I915_WRITE(DSPCLK_GATE_D, val);
+		intel_de_write(dev_priv, DSPCLK_GATE_D, val);
 	}
 
 	/* Assert reset */
@@ -960,7 +990,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 	enum port port;
 	bool active = false;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	wakeref = intel_display_power_get_if_enabled(dev_priv,
 						     encoder->power_domain);
@@ -979,7 +1009,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 	for_each_dsi_port(port, intel_dsi->ports) {
 		i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
 			BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
-		bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
+		bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
 
 		/*
 		 * Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -988,26 +1018,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 		 */
 		if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 		    port == PORT_C)
-			enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+			enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
 
 		/* Try command mode if video mode not enabled */
 		if (!enabled) {
-			u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+			u32 tmp = intel_de_read(dev_priv,
+						MIPI_DSI_FUNC_PRG(port));
 			enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
 		}
 
 		if (!enabled)
 			continue;
 
-		if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+		if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
 			continue;
 
 		if (IS_GEN9_LP(dev_priv)) {
-			u32 tmp = I915_READ(MIPI_CTRL(port));
+			u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
 			tmp &= BXT_PIPE_SELECT_MASK;
 			tmp >>= BXT_PIPE_SELECT_SHIFT;
 
-			if (WARN_ON(tmp > PIPE_C))
+			if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
 				continue;
 
 			*pipe = tmp;
@@ -1051,11 +1082,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 	 * encoder->get_hw_state() returns true.
 	 */
 	for_each_dsi_port(port, intel_dsi->ports) {
-		if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+		if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
 			break;
 	}
 
-	fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+	fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
 	bpp = mipi_dsi_pixel_format_to_bpp(
 			pixel_format_from_register_bits(fmt));
 
@@ -1067,21 +1098,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 
 	/* In terms of pixels */
 	adjusted_mode->crtc_hdisplay =
-				I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+				intel_de_read(dev_priv,
+				              BXT_MIPI_TRANS_HACTIVE(port));
 	adjusted_mode->crtc_vdisplay =
-				I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+				intel_de_read(dev_priv,
+				              BXT_MIPI_TRANS_VACTIVE(port));
 	adjusted_mode->crtc_vtotal =
-				I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+				intel_de_read(dev_priv,
+				              BXT_MIPI_TRANS_VTOTAL(port));
 
 	hactive = adjusted_mode->crtc_hdisplay;
-	hfp = I915_READ(MIPI_HFP_COUNT(port));
+	hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
 
 	/*
 	 * Meaningful for video mode non-burst sync pulse mode only,
 	 * can be zero for non-burst sync events and burst modes
 	 */
-	hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
-	hbp = I915_READ(MIPI_HBP_COUNT(port));
+	hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
+	hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
 
 	/* harizontal values are in terms of high speed byte clock */
 	hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1098,9 +1132,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
 	}
 
 	/* vertical values are in terms of lines */
-	vfp = I915_READ(MIPI_VFP_COUNT(port));
-	vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
-	vbp = I915_READ(MIPI_VBP_COUNT(port));
+	vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
+	vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+	vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port));
 
 	adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
 	adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1191,7 +1225,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 pclk;
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
 
@@ -1268,26 +1302,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
 			 * vactive, as they are calculated per channel basis,
 			 * whereas these values should be based on resolution.
 			 */
-			I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
-				   adjusted_mode->crtc_hdisplay);
-			I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
-				   adjusted_mode->crtc_vdisplay);
-			I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
-				   adjusted_mode->crtc_vtotal);
+			intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+				       adjusted_mode->crtc_hdisplay);
+			intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+				       adjusted_mode->crtc_vdisplay);
+			intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+				       adjusted_mode->crtc_vtotal);
 		}
 
-		I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
-		I915_WRITE(MIPI_HFP_COUNT(port), hfp);
+		intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+			       hactive);
+		intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
 
 		/* meaningful for video mode non-burst sync pulse mode only,
 		 * can be zero for non-burst sync events and burst modes */
-		I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
-		I915_WRITE(MIPI_HBP_COUNT(port), hbp);
+		intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+			       hsync);
+		intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
 
 		/* vertical values are in terms of lines */
-		I915_WRITE(MIPI_VFP_COUNT(port), vfp);
-		I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
-		I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+		intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
+		intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+			       vsync);
+		intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
 	}
 }
 
@@ -1322,7 +1359,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 	u32 val, tmp;
 	u16 mode_hdisplay;
 
-	DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
+	drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe));
 
 	mode_hdisplay = adjusted_mode->crtc_hdisplay;
 
@@ -1338,35 +1375,35 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 			 * escape clock divider, 20MHz, shared for A and C.
 			 * device ready must be off when doing this! txclkesc?
 			 */
-			tmp = I915_READ(MIPI_CTRL(PORT_A));
+			tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
 			tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
-			I915_WRITE(MIPI_CTRL(PORT_A), tmp |
-					ESCAPE_CLOCK_DIVIDER_1);
+			intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+				       tmp | ESCAPE_CLOCK_DIVIDER_1);
 
 			/* read request priority is per pipe */
-			tmp = I915_READ(MIPI_CTRL(port));
+			tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
 			tmp &= ~READ_REQUEST_PRIORITY_MASK;
-			I915_WRITE(MIPI_CTRL(port), tmp |
-					READ_REQUEST_PRIORITY_HIGH);
+			intel_de_write(dev_priv, MIPI_CTRL(port),
+				       tmp | READ_REQUEST_PRIORITY_HIGH);
 		} else if (IS_GEN9_LP(dev_priv)) {
 			enum pipe pipe = intel_crtc->pipe;
 
-			tmp = I915_READ(MIPI_CTRL(port));
+			tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
 			tmp &= ~BXT_PIPE_SELECT_MASK;
 
 			tmp |= BXT_PIPE_SELECT(pipe);
-			I915_WRITE(MIPI_CTRL(port), tmp);
+			intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
 		}
 
 		/* XXX: why here, why like this? handling in irq handler?! */
-		I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
-		I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+		intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
+		intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
 
-		I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+		intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+			       intel_dsi->dphy_reg);
 
-		I915_WRITE(MIPI_DPI_RESOLUTION(port),
-			adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
-			mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+		intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+			       adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
 	}
 
 	set_dsi_timings(encoder, adjusted_mode);
@@ -1393,7 +1430,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 	}
 
 	for_each_dsi_port(port, intel_dsi->ports) {
-		I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+		intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
 
 		/* timeouts for recovery. one frame IIUC. if counter expires,
 		 * EOT and stop state. */
@@ -1414,28 +1451,24 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 
 		if (is_vid_mode(intel_dsi) &&
 			intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
-			I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
-				txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
-					    intel_dsi->lane_count,
-					    intel_dsi->burst_mode_ratio) + 1);
+			intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+				       txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
 		} else {
-			I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
-				txbyteclkhs(adjusted_mode->crtc_vtotal *
-					    adjusted_mode->crtc_htotal,
-					    bpp, intel_dsi->lane_count,
-					    intel_dsi->burst_mode_ratio) + 1);
+			intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+				       txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
 		}
-		I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
-		I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
-						intel_dsi->turn_arnd_val);
-		I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
-						intel_dsi->rst_timer_val);
+		intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+			       intel_dsi->lp_rx_timeout);
+		intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+			       intel_dsi->turn_arnd_val);
+		intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+			       intel_dsi->rst_timer_val);
 
 		/* dphy stuff */
 
 		/* in terms of low power clock */
-		I915_WRITE(MIPI_INIT_COUNT(port),
-				txclkesc(intel_dsi->escape_clk_div, 100));
+		intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+			       txclkesc(intel_dsi->escape_clk_div, 100));
 
 		if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
 			/*
@@ -1444,24 +1477,25 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 			 * getting used. So write the other port
 			 * if not in dual link mode.
 			 */
-			I915_WRITE(MIPI_INIT_COUNT(port ==
-						PORT_A ? PORT_C : PORT_A),
-					intel_dsi->init_count);
+			intel_de_write(dev_priv,
+				       MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+				       intel_dsi->init_count);
 		}
 
 		/* recovery disables */
-		I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
+		intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
 
 		/* in terms of low power clock */
-		I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+		intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+			       intel_dsi->init_count);
 
 		/* in terms of txbyteclkhs. actual high to low switch +
 		 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
 		 *
 		 * XXX: write MIPI_STOP_STATE_STALL?
 		 */
-		I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
-						intel_dsi->hs_to_lp_count);
+		intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+			       intel_dsi->hs_to_lp_count);
 
 		/* XXX: low power clock equivalence in terms of byte clock.
 		 * the number of byte clocks occupied in one low power clock.
@@ -1469,14 +1503,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 		 * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
 		 * ) / 105.???
 		 */
-		I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+		intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+			       intel_dsi->lp_byte_clk);
 
 		if (IS_GEMINILAKE(dev_priv)) {
-			I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
-					intel_dsi->lp_byte_clk);
+			intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+				       intel_dsi->lp_byte_clk);
 			/* Shadow of DPHY reg */
-			I915_WRITE(MIPI_CLK_LANE_TIMING(port),
-					intel_dsi->dphy_reg);
+			intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+				       intel_dsi->dphy_reg);
 		}
 
 		/* the bw essential for transmitting 16 long packets containing
@@ -1484,21 +1519,18 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
 		 * this register in terms of byte clocks. based on dsi transfer
 		 * rate and the number of lanes configured the time taken to
 		 * transmit 16 long packets in a dsi stream varies. */
-		I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+		intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+			       intel_dsi->bw_timer);
 
-		I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
-		intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
-		intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+		intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+			       intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
 
 		if (is_vid_mode(intel_dsi))
 			/* Some panels might have resolution which is not a
 			 * multiple of 64 like 1366 x 768. Enable RANDOM
 			 * resolution support for such panels by default */
-			I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
-				intel_dsi->video_frmt_cfg_bits |
-				intel_dsi->video_mode_format |
-				IP_TG_CONFIG |
-				RANDOM_DPI_DISPLAY_RESOLUTION);
+			intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port),
+				       intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION);
 	}
 }
 
@@ -1514,19 +1546,19 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
 
 	for_each_dsi_port(port, intel_dsi->ports) {
 		/* Panel commands can be sent when clock is in LP11 */
-		I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
 
 		if (IS_GEN9_LP(dev_priv))
 			bxt_dsi_reset_clocks(encoder, port);
 		else
 			vlv_dsi_reset_clocks(encoder, port);
-		I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+		intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
 
-		val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+		val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
 		val &= ~VID_MODE_FORMAT_MASK;
-		I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+		intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
 
-		I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+		intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
 	}
 }
 
@@ -1559,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
 };
 
-static enum drm_panel_orientation
-vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
-{
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	struct intel_encoder *encoder = connector->encoder;
-	enum intel_display_power_domain power_domain;
-	enum drm_panel_orientation orientation;
-	struct intel_plane *plane;
-	struct intel_crtc *crtc;
-	intel_wakeref_t wakeref;
-	enum pipe pipe;
-	u32 val;
-
-	if (!encoder->get_hw_state(encoder, &pipe))
-		return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
-	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-	plane = to_intel_plane(crtc->base.primary);
-
-	power_domain = POWER_DOMAIN_PIPE(pipe);
-	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
-	if (!wakeref)
-		return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
-	val = I915_READ(DSPCNTR(plane->i9xx_plane));
-
-	if (!(val & DISPLAY_PLANE_ENABLE))
-		orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-	else if (val & DISPPLANE_ROTATE_180)
-		orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
-	else
-		orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-
-	intel_display_power_put(dev_priv, power_domain, wakeref);
-
-	return orientation;
-}
-
-static enum drm_panel_orientation
-vlv_dsi_get_panel_orientation(struct intel_connector *connector)
-{
-	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-	enum drm_panel_orientation orientation;
-
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		orientation = vlv_dsi_get_hw_panel_orientation(connector);
-		if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
-			return orientation;
-	}
-
-	return intel_dsi_get_panel_orientation(connector);
-}
-
 static void vlv_dsi_add_properties(struct intel_connector *connector)
 {
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1628,10 +1607,9 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
 
 		connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
 
-		connector->base.display_info.panel_orientation =
-			vlv_dsi_get_panel_orientation(connector);
-		drm_connector_init_panel_orientation_property(
+		drm_connector_set_panel_orientation_with_quirk(
 				&connector->base,
+				intel_dsi_get_panel_orientation(connector),
 				connector->panel.fixed_mode->hdisplay,
 				connector->panel.fixed_mode->vdisplay);
 	}
@@ -1703,7 +1681,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
 	prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
 
 	if (prepare_cnt > PREPARE_CNT_MAX) {
-		DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+		drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+			    prepare_cnt);
 		prepare_cnt = PREPARE_CNT_MAX;
 	}
 
@@ -1723,7 +1702,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
 		exit_zero_cnt += 1;
 
 	if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
-		DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+		drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+			    exit_zero_cnt);
 		exit_zero_cnt = EXIT_ZERO_CNT_MAX;
 	}
 
@@ -1733,7 +1713,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
 				* ui_den, ui_num * mul);
 
 	if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
-		DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+		drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+			    clk_zero_cnt);
 		clk_zero_cnt = CLK_ZERO_CNT_MAX;
 	}
 
@@ -1742,7 +1723,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
 	trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
 
 	if (trail_cnt > TRAIL_CNT_MAX) {
-		DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
+		drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+			    trail_cnt);
 		trail_cnt = TRAIL_CNT_MAX;
 	}
 
@@ -1817,7 +1799,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	enum port port;
 	enum pipe pipe;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	/* There is no detection method for MIPI so rely on VBT */
 	if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1849,6 +1831,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 
 	intel_encoder->compute_config = intel_dsi_compute_config;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
+	if (IS_GEN9_LP(dev_priv))
+		intel_encoder->enable = bxt_dsi_enable;
 	intel_encoder->disable = intel_dsi_disable;
 	intel_encoder->post_disable = intel_dsi_post_disable;
 	intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1894,18 +1878,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	}
 
 	if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
-		DRM_DEBUG_KMS("no device found\n");
+		drm_dbg_kms(&dev_priv->drm, "no device found\n");
 		goto err;
 	}
 
 	/* Use clock read-back from current hw-state for fastboot */
 	current_mode = intel_encoder_current_mode(intel_encoder);
 	if (current_mode) {
-		DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n",
-			      intel_dsi->pclk, current_mode->clock);
+		drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+			    intel_dsi->pclk, current_mode->clock);
 		if (intel_fuzzy_clock_check(intel_dsi->pclk,
 					    current_mode->clock)) {
-			DRM_DEBUG_KMS("Using GOP pclk\n");
+			drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
 			intel_dsi->pclk = current_mode->clock;
 		}
 
@@ -1933,7 +1917,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev->mode_config.mutex);
 
 	if (!fixed_mode) {
-		DRM_DEBUG_KMS("no fixed mode\n");
+		drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
 		goto err_cleanup_connector;
 	}
 
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 6b89e67..d0a5143 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -64,7 +64,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
 
 	/* target_dsi_clk is expected in kHz */
 	if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
-		DRM_ERROR("DSI CLK Out of Range\n");
+		drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
 		return -ECHRNG;
 	}
 
@@ -126,7 +126,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
 
 	ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
 	if (ret) {
-		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+		drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
 		return ret;
 	}
 
@@ -138,8 +138,8 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
 
 	config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
 
-	DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
-		      config->dsi_pll.div, config->dsi_pll.ctrl);
+	drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+		    config->dsi_pll.div, config->dsi_pll.ctrl);
 
 	return 0;
 }
@@ -149,7 +149,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	vlv_cck_get(dev_priv);
 
@@ -169,12 +169,12 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
 						DSI_PLL_LOCK, 20)) {
 
 		vlv_cck_put(dev_priv);
-		DRM_ERROR("DSI PLL lock failed\n");
+		drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
 		return;
 	}
 	vlv_cck_put(dev_priv);
 
-	DRM_DEBUG_KMS("DSI PLL locked\n");
+	drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
 }
 
 void vlv_dsi_pll_disable(struct intel_encoder *encoder)
@@ -182,7 +182,7 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 tmp;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	vlv_cck_get(dev_priv);
 
@@ -201,7 +201,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
 	u32 mask;
 
 	mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
-	val = I915_READ(BXT_DSI_PLL_ENABLE);
+	val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
 	enabled = (val & mask) == mask;
 
 	if (!enabled)
@@ -215,15 +215,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
 	 * times, and since accessing DSI registers with invalid dividers
 	 * causes a system hang.
 	 */
-	val = I915_READ(BXT_DSI_PLL_CTL);
+	val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
 	if (IS_GEMINILAKE(dev_priv)) {
 		if (!(val & BXT_DSIA_16X_MASK)) {
-			DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+			drm_dbg(&dev_priv->drm,
+				"Invalid PLL divider (%08x)\n", val);
 			enabled = false;
 		}
 	} else {
 		if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
-			DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+			drm_dbg(&dev_priv->drm,
+				"Invalid PLL divider (%08x)\n", val);
 			enabled = false;
 		}
 	}
@@ -236,11 +238,11 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	u32 val;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
-	val = I915_READ(BXT_DSI_PLL_ENABLE);
+	val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
 	val &= ~BXT_DSI_PLL_DO_ENABLE;
-	I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+	intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
 
 	/*
 	 * PLL lock should deassert within 200us.
@@ -248,7 +250,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
 	 */
 	if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
 				    BXT_DSI_PLL_LOCKED, 1))
-		DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
+		drm_err(&dev_priv->drm,
+			"Timeout waiting for PLL lock deassertion\n");
 }
 
 u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
@@ -263,7 +266,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
 	int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
 	int i;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	vlv_cck_get(dev_priv);
 	pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -292,7 +295,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
 	p--;
 
 	if (!p) {
-		DRM_ERROR("wrong P1 divisor\n");
+		drm_err(&dev_priv->drm, "wrong P1 divisor\n");
 		return 0;
 	}
 
@@ -302,7 +305,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
 	}
 
 	if (i == ARRAY_SIZE(lfsr_converts)) {
-		DRM_ERROR("wrong m_seed programmed\n");
+		drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
 		return 0;
 	}
 
@@ -325,7 +328,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
 
-	config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
+	config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
 
 	dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
 
@@ -333,7 +336,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
 
 	pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
 
-	DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
+	drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
 	return pclk;
 }
 
@@ -343,11 +346,10 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
-	temp = I915_READ(MIPI_CTRL(port));
+	temp = intel_de_read(dev_priv, MIPI_CTRL(port));
 	temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
-	I915_WRITE(MIPI_CTRL(port), temp |
-			intel_dsi->escape_clk_div <<
-			ESCAPE_CLOCK_DIVIDER_SHIFT);
+	intel_de_write(dev_priv, MIPI_CTRL(port),
+		       temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
 }
 
 static void glk_dsi_program_esc_clock(struct drm_device *dev,
@@ -393,8 +395,10 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
 	else
 		txesc2_div = 10;
 
-	I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
-	I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+	intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+		       (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+	intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+		       (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
 }
 
 /* Program BXT Mipi clocks and dividers */
@@ -412,7 +416,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
 	u32 mipi_8by3_divider;
 
 	/* Clear old configurations */
-	tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+	tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
 	tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
 	tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
 	tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -448,7 +452,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
 	tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
 	tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
 
-	I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+	intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
 }
 
 int bxt_dsi_pll_compute(struct intel_encoder *encoder,
@@ -478,10 +482,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
 	}
 
 	if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
-		DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
+		drm_err(&dev_priv->drm,
+			"Cant get a suitable ratio from DSI PLL ratios\n");
 		return -ECHRNG;
 	} else
-		DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");
+		drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
 
 	/*
 	 * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -507,11 +512,11 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
 	enum port port;
 	u32 val;
 
-	DRM_DEBUG_KMS("\n");
+	drm_dbg_kms(&dev_priv->drm, "\n");
 
 	/* Configure PLL vales */
-	I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
-	POSTING_READ(BXT_DSI_PLL_CTL);
+	intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+	intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
 
 	/* Program TX, RX, Dphy clocks */
 	if (IS_BROXTON(dev_priv)) {
@@ -522,18 +527,19 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
 	}
 
 	/* Enable DSI PLL */
-	val = I915_READ(BXT_DSI_PLL_ENABLE);
+	val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
 	val |= BXT_DSI_PLL_DO_ENABLE;
-	I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+	intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
 
 	/* Timeout and fail if PLL not locked */
 	if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
 				  BXT_DSI_PLL_LOCKED, 1)) {
-		DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
+		drm_err(&dev_priv->drm,
+			"Timed out waiting for DSI PLL to lock\n");
 		return;
 	}
 
-	DRM_DEBUG_KMS("DSI PLL locked\n");
+	drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
 }
 
 void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -544,20 +550,20 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 
 	/* Clear old configurations */
 	if (IS_BROXTON(dev_priv)) {
-		tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+		tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
 		tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
 		tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
 		tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
 		tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
-		I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+		intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
 	} else {
-		tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
+		tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
 		tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
-		I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);
+		intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
 
-		tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
+		tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
 		tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
-		I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
+		intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
 	}
-	I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+	intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 81366aa..0598e53 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work)
 					   0);
 out_request:
 	if (unlikely(err)) {
-		i915_request_skip(rq, err);
+		i915_request_set_error_once(rq, err);
 		err = 0;
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 151a1e8..68326ad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,14 +67,11 @@
 #include <linux/log2.h>
 #include <linux/nospec.h>
 
-#include <drm/i915_drm.h>
-
 #include "gt/gen6_ppgtt.h"
 #include "gt/intel_context.h"
+#include "gt/intel_context_param.h"
 #include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_engine_pm.h"
 #include "gt/intel_engine_user.h"
-#include "gt/intel_lrc_reg.h"
 #include "gt/intel_ring.h"
 
 #include "i915_gem_context.h"
@@ -245,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 		if (!e->engines[count])
 			continue;
 
-		RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
 		intel_context_put(e->engines[count]);
 	}
 	kfree(e);
@@ -258,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e)
 
 static void free_engines_rcu(struct rcu_head *rcu)
 {
-	free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+	struct i915_gem_engines *engines =
+		container_of(rcu, struct i915_gem_engines, rcu);
+
+	i915_sw_fence_fini(&engines->fence);
+	free_engines(engines);
+}
+
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+	struct i915_gem_engines *engines =
+		container_of(fence, typeof(*engines), fence);
+
+	switch (state) {
+	case FENCE_COMPLETE:
+		if (!list_empty(&engines->link)) {
+			struct i915_gem_context *ctx = engines->ctx;
+			unsigned long flags;
+
+			spin_lock_irqsave(&ctx->stale.lock, flags);
+			list_del(&engines->link);
+			spin_unlock_irqrestore(&ctx->stale.lock, flags);
+		}
+		i915_gem_context_put(engines->ctx);
+		break;
+
+	case FENCE_FREE:
+		init_rcu_head(&engines->rcu);
+		call_rcu(&engines->rcu, free_engines_rcu);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct i915_gem_engines *alloc_engines(unsigned int count)
+{
+	struct i915_gem_engines *e;
+
+	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+	if (!e)
+		return NULL;
+
+	i915_sw_fence_init(&e->fence, engines_notify);
+	return e;
 }
 
 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -268,11 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 	struct i915_gem_engines *e;
 	enum intel_engine_id id;
 
-	e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+	e = alloc_engines(I915_NUM_ENGINES);
 	if (!e)
 		return ERR_PTR(-ENOMEM);
 
-	init_rcu_head(&e->rcu);
 	for_each_engine(engine, gt, id) {
 		struct intel_context *ce;
 
@@ -306,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	list_del(&ctx->link);
 	spin_unlock(&ctx->i915->gem.contexts.lock);
 
-	free_engines(rcu_access_pointer(ctx->engines));
 	mutex_destroy(&ctx->engines_mutex);
 
 	if (ctx->timeline)
@@ -421,7 +459,7 @@ static struct intel_engine_cs *__active_engine(struct i915_request *rq)
 	}
 
 	engine = NULL;
-	if (i915_request_is_active(rq) && !rq->fence.error)
+	if (i915_request_is_active(rq) && rq->fence.error != -EIO)
 		engine = rq->engine;
 
 	spin_unlock_irq(&locked->active.lock);
@@ -452,7 +490,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
 	return engine;
 }
 
-static void kill_context(struct i915_gem_context *ctx)
+static void kill_engines(struct i915_gem_engines *engines)
 {
 	struct i915_gem_engines_iter it;
 	struct intel_context *ce;
@@ -464,7 +502,7 @@ static void kill_context(struct i915_gem_context *ctx)
 	 * However, we only care about pending requests, so only include
 	 * engines on which there are incomplete requests.
 	 */
-	for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+	for_each_gem_engine(ce, engines, it) {
 		struct intel_engine_cs *engine;
 
 		if (intel_context_set_banned(ce))
@@ -486,10 +524,84 @@ static void kill_context(struct i915_gem_context *ctx)
 			 * the context from the GPU, we have to resort to a full
 			 * reset. We hope the collateral damage is worth it.
 			 */
-			__reset_context(ctx, engine);
+			__reset_context(engines->ctx, engine);
 	}
 }
 
+static void kill_stale_engines(struct i915_gem_context *ctx)
+{
+	struct i915_gem_engines *pos, *next;
+
+	spin_lock_irq(&ctx->stale.lock);
+	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
+		if (!i915_sw_fence_await(&pos->fence)) {
+			list_del_init(&pos->link);
+			continue;
+		}
+
+		spin_unlock_irq(&ctx->stale.lock);
+
+		kill_engines(pos);
+
+		spin_lock_irq(&ctx->stale.lock);
+		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+		list_safe_reset_next(pos, next, link);
+		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
+
+		i915_sw_fence_complete(&pos->fence);
+	}
+	spin_unlock_irq(&ctx->stale.lock);
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+	kill_stale_engines(ctx);
+}
+
+static void engines_idle_release(struct i915_gem_context *ctx,
+				 struct i915_gem_engines *engines)
+{
+	struct i915_gem_engines_iter it;
+	struct intel_context *ce;
+
+	INIT_LIST_HEAD(&engines->link);
+
+	engines->ctx = i915_gem_context_get(ctx);
+
+	for_each_gem_engine(ce, engines, it) {
+		struct dma_fence *fence;
+		int err = 0;
+
+		/* serialises with execbuf */
+		set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+		if (!intel_context_pin_if_active(ce))
+			continue;
+
+		fence = i915_active_fence_get(&ce->timeline->last_request);
+		if (fence) {
+			err = i915_sw_fence_await_dma_fence(&engines->fence,
+							    fence, 0,
+							    GFP_KERNEL);
+			dma_fence_put(fence);
+		}
+		intel_context_unpin(ce);
+		if (err < 0)
+			goto kill;
+	}
+
+	spin_lock_irq(&ctx->stale.lock);
+	if (!i915_gem_context_is_closed(ctx))
+		list_add_tail(&engines->link, &ctx->stale.engines);
+	spin_unlock_irq(&ctx->stale.lock);
+
+kill:
+	if (list_empty(&engines->link)) /* raced, already closed */
+		kill_engines(engines);
+
+	i915_sw_fence_commit(&engines->fence);
+}
+
 static void set_closed_name(struct i915_gem_context *ctx)
 {
 	char *s;
@@ -511,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx)
 {
 	struct i915_address_space *vm;
 
+	/* Flush any concurrent set_engines() */
+	mutex_lock(&ctx->engines_mutex);
+	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
 	i915_gem_context_set_closed(ctx);
-	set_closed_name(ctx);
+	mutex_unlock(&ctx->engines_mutex);
 
 	mutex_lock(&ctx->mutex);
 
+	set_closed_name(ctx);
+
 	vm = i915_gem_context_vm(ctx);
 	if (vm)
 		i915_vm_close(vm);
@@ -604,6 +721,9 @@ __create_context(struct drm_i915_private *i915)
 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 	mutex_init(&ctx->mutex);
 
+	spin_lock_init(&ctx->stale.lock);
+	INIT_LIST_HEAD(&ctx->stale.engines);
+
 	mutex_init(&ctx->engines_mutex);
 	e = default_engines(ctx);
 	if (IS_ERR(e)) {
@@ -637,23 +757,30 @@ __create_context(struct drm_i915_private *i915)
 	return ERR_PTR(err);
 }
 
-static void
+static int
 context_apply_all(struct i915_gem_context *ctx,
-		  void (*fn)(struct intel_context *ce, void *data),
+		  int (*fn)(struct intel_context *ce, void *data),
 		  void *data)
 {
 	struct i915_gem_engines_iter it;
 	struct intel_context *ce;
+	int err = 0;
 
-	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
-		fn(ce, data);
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		err = fn(ce, data);
+		if (err)
+			break;
+	}
 	i915_gem_context_unlock_engines(ctx);
+
+	return err;
 }
 
-static void __apply_ppgtt(struct intel_context *ce, void *vm)
+static int __apply_ppgtt(struct intel_context *ce, void *vm)
 {
 	i915_vm_put(ce->vm);
 	ce->vm = i915_vm_get(vm);
+	return 0;
 }
 
 static struct i915_address_space *
@@ -691,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst,
 		intel_timeline_put(old);
 }
 
-static void __apply_timeline(struct intel_context *ce, void *timeline)
+static int __apply_timeline(struct intel_context *ce, void *timeline)
 {
 	__set_timeline(&ce->timeline, timeline);
+	return 0;
 }
 
 static void __assign_timeline(struct i915_gem_context *ctx,
@@ -724,8 +852,8 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 
 		ppgtt = i915_ppgtt_create(&i915->gt);
 		if (IS_ERR(ppgtt)) {
-			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
-					 PTR_ERR(ppgtt));
+			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
+				PTR_ERR(ppgtt));
 			context_close(ctx);
 			return ERR_CAST(ppgtt);
 		}
@@ -767,20 +895,15 @@ static void init_contexts(struct i915_gem_contexts *gc)
 void i915_gem_init__contexts(struct drm_i915_private *i915)
 {
 	init_contexts(&i915->gem.contexts);
-	DRM_DEBUG_DRIVER("%s context support initialized\n",
-			 DRIVER_CAPS(i915)->has_logical_contexts ?
-			 "logical" : "fake");
+	drm_dbg(&i915->drm, "%s context support initialized\n",
+		DRIVER_CAPS(i915)->has_logical_contexts ?
+		"logical" : "fake");
 }
 
 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
 {
 	flush_work(&i915->gem.contexts.free_work);
-}
-
-static int vm_idr_cleanup(int id, void *p, void *data)
-{
-	i915_vm_put(p);
-	return 0;
+	rcu_barrier(); /* and flush the left over RCU frees */
 }
 
 static int gem_context_register(struct i915_gem_context *ctx,
@@ -820,8 +943,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 
 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
 
-	mutex_init(&file_priv->vm_idr_lock);
-	idr_init_base(&file_priv->vm_idr, 1);
+	/* 0 reserved for invalid/unassigned ppgtt */
+	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
 
 	ctx = i915_gem_create_context(i915, 0);
 	if (IS_ERR(ctx)) {
@@ -839,9 +962,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 err_ctx:
 	context_close(ctx);
 err:
-	idr_destroy(&file_priv->vm_idr);
+	xa_destroy(&file_priv->vm_xa);
 	xa_destroy(&file_priv->context_xa);
-	mutex_destroy(&file_priv->vm_idr_lock);
 	return err;
 }
 
@@ -849,6 +971,7 @@ void i915_gem_context_close(struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_private *i915 = file_priv->dev_priv;
+	struct i915_address_space *vm;
 	struct i915_gem_context *ctx;
 	unsigned long idx;
 
@@ -856,9 +979,9 @@ void i915_gem_context_close(struct drm_file *file)
 		context_close(ctx);
 	xa_destroy(&file_priv->context_xa);
 
-	idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
-	idr_destroy(&file_priv->vm_idr);
-	mutex_destroy(&file_priv->vm_idr_lock);
+	xa_for_each(&file_priv->vm_xa, idx, vm)
+		i915_vm_put(vm);
+	xa_destroy(&file_priv->vm_xa);
 
 	contexts_flush_free(&i915->gem.contexts);
 }
@@ -870,6 +993,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_vm_control *args = data;
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct i915_ppgtt *ppgtt;
+	u32 id;
 	int err;
 
 	if (!HAS_FULL_PPGTT(i915))
@@ -892,23 +1016,15 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 			goto err_put;
 	}
 
-	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
+		       xa_limit_32b, GFP_KERNEL);
 	if (err)
 		goto err_put;
 
-	err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
-	if (err < 0)
-		goto err_unlock;
-
-	GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
-
-	mutex_unlock(&file_priv->vm_idr_lock);
-
-	args->vm_id = err;
+	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+	args->vm_id = id;
 	return 0;
 
-err_unlock:
-	mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
 	i915_vm_put(&ppgtt->vm);
 	return err;
@@ -920,8 +1036,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_gem_vm_control *args = data;
 	struct i915_address_space *vm;
-	int err;
-	u32 id;
 
 	if (args->flags)
 		return -EINVAL;
@@ -929,17 +1043,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 	if (args->extensions)
 		return -EINVAL;
 
-	id = args->vm_id;
-	if (!id)
-		return -ENOENT;
-
-	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
-	if (err)
-		return err;
-
-	vm = idr_remove(&file_priv->vm_idr, id);
-
-	mutex_unlock(&file_priv->vm_idr_lock);
+	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
 	if (!vm)
 		return -ENOENT;
 
@@ -965,6 +1069,30 @@ static void cb_retire(struct i915_active *base)
 	kfree(cb);
 }
 
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+	struct i915_gem_engines *engines;
+
+	rcu_read_lock();
+	do {
+		engines = rcu_dereference(ctx->engines);
+		if (unlikely(!engines))
+			break;
+
+		if (unlikely(!i915_sw_fence_await(&engines->fence)))
+			continue;
+
+		if (likely(engines == rcu_access_pointer(ctx->engines)))
+			break;
+
+		i915_sw_fence_complete(&engines->fence);
+	} while (1);
+	rcu_read_unlock();
+
+	return engines;
+}
+
 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
 				intel_engine_mask_t engines,
@@ -975,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 {
 	struct context_barrier_task *cb;
 	struct i915_gem_engines_iter it;
+	struct i915_gem_engines *e;
 	struct intel_context *ce;
 	int err = 0;
 
@@ -991,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 		return err;
 	}
 
-	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+	e = __context_engines_await(ctx);
+	if (!e) {
+		i915_active_release(&cb->base);
+		return -ENOENT;
+	}
+
+	for_each_gem_engine(ce, e, it) {
 		struct i915_request *rq;
 
 		if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -1022,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 		if (err)
 			break;
 	}
-	i915_gem_context_unlock_engines(ctx);
+	i915_sw_fence_complete(&e->fence);
 
 	cb->task = err ? NULL : task; /* caller needs to unwind instead */
 	cb->data = data;
@@ -1037,7 +1172,8 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
 		     struct drm_i915_gem_context_param *args)
 {
 	struct i915_address_space *vm;
-	int ret;
+	int err;
+	u32 id;
 
 	if (!rcu_access_pointer(ctx->vm))
 		return -ENODEV;
@@ -1045,27 +1181,22 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
 	rcu_read_lock();
 	vm = context_get_vm_rcu(ctx);
 	rcu_read_unlock();
+	if (!vm)
+		return -ENODEV;
 
-	ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
-	if (ret)
+	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+	if (err)
 		goto err_put;
 
-	ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
-	GEM_BUG_ON(!ret);
-	if (ret < 0)
-		goto err_unlock;
-
 	i915_vm_open(vm);
 
+	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+	args->value = id;
 	args->size = 0;
-	args->value = ret;
 
-	ret = 0;
-err_unlock:
-	mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
 	i915_vm_put(vm);
-	return ret;
+	return err;
 }
 
 static void set_ppgtt_barrier(void *data)
@@ -1167,7 +1298,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
 		return -ENOENT;
 
 	rcu_read_lock();
-	vm = idr_find(&file_priv->vm_idr, args->value);
+	vm = xa_load(&file_priv->vm_xa, args->value);
 	if (vm && !kref_get_unless_zero(&vm->ref))
 		vm = NULL;
 	rcu_read_unlock();
@@ -1213,90 +1344,64 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
 	return err;
 }
 
-static int gen8_emit_rpcs_config(struct i915_request *rq,
-				 struct intel_context *ce,
-				 struct intel_sseu sseu)
+static int __apply_ringsize(struct intel_context *ce, void *sz)
 {
-	u64 offset;
-	u32 *cs;
+	return intel_context_set_ring_size(ce, (unsigned long)sz);
+}
 
-	cs = intel_ring_begin(rq, 4);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
+static int set_ringsize(struct i915_gem_context *ctx,
+			struct drm_i915_gem_context_param *args)
+{
+	if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+		return -ENODEV;
 
-	offset = i915_ggtt_offset(ce->state) +
-		 LRC_STATE_PN * PAGE_SIZE +
-		 CTX_R_PWR_CLK_STATE * 4;
+	if (args->size)
+		return -EINVAL;
 
-	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-	*cs++ = lower_32_bits(offset);
-	*cs++ = upper_32_bits(offset);
-	*cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+	if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
+		return -EINVAL;
 
-	intel_ring_advance(rq, cs);
+	if (args->value < I915_GTT_PAGE_SIZE)
+		return -EINVAL;
 
+	if (args->value > 128 * I915_GTT_PAGE_SIZE)
+		return -EINVAL;
+
+	return context_apply_all(ctx,
+				 __apply_ringsize,
+				 __intel_context_ring_size(args->value));
+}
+
+static int __get_ringsize(struct intel_context *ce, void *arg)
+{
+	long sz;
+
+	sz = intel_context_get_ring_size(ce);
+	GEM_BUG_ON(sz > INT_MAX);
+
+	return sz; /* stop on first engine */
+}
+
+static int get_ringsize(struct i915_gem_context *ctx,
+			struct drm_i915_gem_context_param *args)
+{
+	int sz;
+
+	if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+		return -ENODEV;
+
+	if (args->size)
+		return -EINVAL;
+
+	sz = context_apply_all(ctx, __get_ringsize, NULL);
+	if (sz < 0)
+		return sz;
+
+	args->value = sz;
 	return 0;
 }
 
 static int
-gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
-{
-	struct i915_request *rq;
-	int ret;
-
-	lockdep_assert_held(&ce->pin_mutex);
-
-	/*
-	 * If the context is not idle, we have to submit an ordered request to
-	 * modify its context image via the kernel context (writing to our own
-	 * image, or into the registers directory, does not stick). Pristine
-	 * and idle contexts will be configured on pinning.
-	 */
-	if (!intel_context_pin_if_active(ce))
-		return 0;
-
-	rq = intel_engine_create_kernel_request(ce->engine);
-	if (IS_ERR(rq)) {
-		ret = PTR_ERR(rq);
-		goto out_unpin;
-	}
-
-	/* Serialise with the remote context */
-	ret = intel_context_prepare_remote_request(ce, rq);
-	if (ret == 0)
-		ret = gen8_emit_rpcs_config(rq, ce, sseu);
-
-	i915_request_add(rq);
-out_unpin:
-	intel_context_unpin(ce);
-	return ret;
-}
-
-static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
-	int ret;
-
-	GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
-
-	ret = intel_context_lock_pinned(ce);
-	if (ret)
-		return ret;
-
-	/* Nothing to do if unmodified. */
-	if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
-		goto unlock;
-
-	ret = gen8_modify_rpcs(ce, sseu);
-	if (!ret)
-		ce->sseu = sseu;
-
-unlock:
-	intel_context_unlock_pinned(ce);
-	return ret;
-}
-
-static int
 user_to_context_sseu(struct drm_i915_private *i915,
 		     const struct drm_i915_gem_context_param_sseu *user,
 		     struct intel_sseu *context)
@@ -1460,6 +1565,7 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 	struct i915_context_engines_load_balance __user *ext =
 		container_of_user(base, typeof(*ext), base);
 	const struct set_engines *set = data;
+	struct drm_i915_private *i915 = set->ctx->i915;
 	struct intel_engine_cs *stack[16];
 	struct intel_engine_cs **siblings;
 	struct intel_context *ce;
@@ -1467,24 +1573,25 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 	unsigned int n;
 	int err;
 
-	if (!HAS_EXECLISTS(set->ctx->i915))
+	if (!HAS_EXECLISTS(i915))
 		return -ENODEV;
 
-	if (USES_GUC_SUBMISSION(set->ctx->i915))
+	if (intel_uc_uses_guc_submission(&i915->gt.uc))
 		return -ENODEV; /* not implement yet */
 
 	if (get_user(idx, &ext->engine_index))
 		return -EFAULT;
 
 	if (idx >= set->engines->num_engines) {
-		DRM_DEBUG("Invalid placement value, %d >= %d\n",
-			  idx, set->engines->num_engines);
+		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+			idx, set->engines->num_engines);
 		return -EINVAL;
 	}
 
 	idx = array_index_nospec(idx, set->engines->num_engines);
 	if (set->engines->engines[idx]) {
-		DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
+		drm_dbg(&i915->drm,
+			"Invalid placement[%d], already occupied\n", idx);
 		return -EEXIST;
 	}
 
@@ -1516,12 +1623,13 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 			goto out_siblings;
 		}
 
-		siblings[n] = intel_engine_lookup_user(set->ctx->i915,
+		siblings[n] = intel_engine_lookup_user(i915,
 						       ci.engine_class,
 						       ci.engine_instance);
 		if (!siblings[n]) {
-			DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
-				  n, ci.engine_class, ci.engine_instance);
+			drm_dbg(&i915->drm,
+				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
+				n, ci.engine_class, ci.engine_instance);
 			err = -EINVAL;
 			goto out_siblings;
 		}
@@ -1554,6 +1662,7 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 	struct i915_context_engines_bond __user *ext =
 		container_of_user(base, typeof(*ext), base);
 	const struct set_engines *set = data;
+	struct drm_i915_private *i915 = set->ctx->i915;
 	struct i915_engine_class_instance ci;
 	struct intel_engine_cs *virtual;
 	struct intel_engine_cs *master;
@@ -1564,14 +1673,15 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 		return -EFAULT;
 
 	if (idx >= set->engines->num_engines) {
-		DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
-			  idx, set->engines->num_engines);
+		drm_dbg(&i915->drm,
+			"Invalid index for virtual engine: %d >= %d\n",
+			idx, set->engines->num_engines);
 		return -EINVAL;
 	}
 
 	idx = array_index_nospec(idx, set->engines->num_engines);
 	if (!set->engines->engines[idx]) {
-		DRM_DEBUG("Invalid engine at %d\n", idx);
+		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
 		return -EINVAL;
 	}
 	virtual = set->engines->engines[idx]->engine;
@@ -1589,11 +1699,12 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
 		return -EFAULT;
 
-	master = intel_engine_lookup_user(set->ctx->i915,
+	master = intel_engine_lookup_user(i915,
 					  ci.engine_class, ci.engine_instance);
 	if (!master) {
-		DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
-			  ci.engine_class, ci.engine_instance);
+		drm_dbg(&i915->drm,
+			"Unrecognised master engine: { class:%u, instance:%u }\n",
+			ci.engine_class, ci.engine_instance);
 		return -EINVAL;
 	}
 
@@ -1606,12 +1717,13 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
 			return -EFAULT;
 
-		bond = intel_engine_lookup_user(set->ctx->i915,
+		bond = intel_engine_lookup_user(i915,
 						ci.engine_class,
 						ci.engine_instance);
 		if (!bond) {
-			DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
-				  n, ci.engine_class, ci.engine_instance);
+			drm_dbg(&i915->drm,
+				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+				n, ci.engine_class, ci.engine_instance);
 			return -EINVAL;
 		}
 
@@ -1640,6 +1752,7 @@ static int
 set_engines(struct i915_gem_context *ctx,
 	    const struct drm_i915_gem_context_param *args)
 {
+	struct drm_i915_private *i915 = ctx->i915;
 	struct i915_context_param_engines __user *user =
 		u64_to_user_ptr(args->value);
 	struct set_engines set = { .ctx = ctx };
@@ -1661,8 +1774,8 @@ set_engines(struct i915_gem_context *ctx,
 	BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
 	if (args->size < sizeof(*user) ||
 	    !IS_ALIGNED(args->size, sizeof(*user->engines))) {
-		DRM_DEBUG("Invalid size for engine array: %d\n",
-			  args->size);
+		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
+			args->size);
 		return -EINVAL;
 	}
 
@@ -1671,13 +1784,10 @@ set_engines(struct i915_gem_context *ctx,
 	 * first 64 engines defined here.
 	 */
 	num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-
-	set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
-			      GFP_KERNEL);
+	set.engines = alloc_engines(num_engines);
 	if (!set.engines)
 		return -ENOMEM;
 
-	init_rcu_head(&set.engines->rcu);
 	for (n = 0; n < num_engines; n++) {
 		struct i915_engine_class_instance ci;
 		struct intel_engine_cs *engine;
@@ -1698,8 +1808,9 @@ set_engines(struct i915_gem_context *ctx,
 						  ci.engine_class,
 						  ci.engine_instance);
 		if (!engine) {
-			DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
-				  n, ci.engine_class, ci.engine_instance);
+			drm_dbg(&i915->drm,
+				"Invalid engine[%d]: { class:%d, instance:%d }\n",
+				n, ci.engine_class, ci.engine_instance);
 			__free_engines(set.engines, n);
 			return -ENOENT;
 		}
@@ -1729,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx,
 
 replace:
 	mutex_lock(&ctx->engines_mutex);
+	if (i915_gem_context_is_closed(ctx)) {
+		mutex_unlock(&ctx->engines_mutex);
+		free_engines(set.engines);
+		return -ENOENT;
+	}
 	if (args->size)
 		i915_gem_context_set_user_engines(ctx);
 	else
@@ -1736,7 +1852,8 @@ set_engines(struct i915_gem_context *ctx,
 	set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
 	mutex_unlock(&ctx->engines_mutex);
 
-	call_rcu(&set.engines->rcu, free_engines_rcu);
+	/* Keep track of old engine sets for kill_context() */
+	engines_idle_release(ctx, set.engines);
 
 	return 0;
 }
@@ -1747,11 +1864,10 @@ __copy_engines(struct i915_gem_engines *e)
 	struct i915_gem_engines *copy;
 	unsigned int n;
 
-	copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+	copy = alloc_engines(e->num_engines);
 	if (!copy)
 		return ERR_PTR(-ENOMEM);
 
-	init_rcu_head(&copy->rcu);
 	for (n = 0; n < e->num_engines; n++) {
 		if (e->engines[n])
 			copy->engines[n] = intel_context_get(e->engines[n]);
@@ -1852,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx,
 	return __context_set_persistence(ctx, args->value);
 }
 
-static void __apply_priority(struct intel_context *ce, void *arg)
+static int __apply_priority(struct intel_context *ce, void *arg)
 {
 	struct i915_gem_context *ctx = arg;
 
 	if (!intel_engine_has_semaphores(ce->engine))
-		return;
+		return 0;
 
 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
 		intel_context_set_use_semaphores(ce);
 	else
 		intel_context_clear_use_semaphores(ce);
+
+	return 0;
 }
 
 static int set_priority(struct i915_gem_context *ctx,
@@ -1955,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
 		ret = set_persistence(ctx, args);
 		break;
 
+	case I915_CONTEXT_PARAM_RINGSIZE:
+		ret = set_ringsize(ctx, args);
+		break;
+
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 	default:
 		ret = -EINVAL;
@@ -1983,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
 	return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
 }
 
+static int copy_ring_size(struct intel_context *dst,
+			  struct intel_context *src)
+{
+	long sz;
+
+	sz = intel_context_get_ring_size(src);
+	if (sz < 0)
+		return sz;
+
+	return intel_context_set_ring_size(dst, sz);
+}
+
 static int clone_engines(struct i915_gem_context *dst,
 			 struct i915_gem_context *src)
 {
@@ -1991,11 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst,
 	bool user_engines;
 	unsigned long n;
 
-	clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+	clone = alloc_engines(e->num_engines);
 	if (!clone)
 		goto err_unlock;
 
-	init_rcu_head(&clone->rcu);
 	for (n = 0; n < e->num_engines; n++) {
 		struct intel_engine_cs *engine;
 
@@ -2025,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst,
 		}
 
 		intel_context_set_gem(clone->engines[n], dst);
+
+		/* Copy across the preferred ringsize */
+		if (copy_ring_size(clone->engines[n], e->engines[n])) {
+			__free_engines(clone, n + 1);
+			goto err_unlock;
+		}
 	}
 	clone->num_engines = n;
 
@@ -2032,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst,
 	i915_gem_context_unlock_engines(src);
 
 	/* Serialised by constructor */
-	free_engines(__context_engines_static(dst));
-	RCU_INIT_POINTER(dst->engines, clone);
+	engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
 	if (user_engines)
 		i915_gem_context_set_user_engines(dst);
 	else
@@ -2213,8 +2351,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 
 	ext_data.fpriv = file->driver_priv;
 	if (client_is_banned(ext_data.fpriv)) {
-		DRM_DEBUG("client %s[%d] banned from creating ctx\n",
-			  current->comm, task_pid_nr(current));
+		drm_dbg(&i915->drm,
+			"client %s[%d] banned from creating ctx\n",
+			current->comm, task_pid_nr(current));
 		return -EIO;
 	}
 
@@ -2236,7 +2375,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 		goto err_ctx;
 
 	args->ctx_id = id;
-	DRM_DEBUG("HW context %d created\n", args->ctx_id);
+	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
 
 	return 0;
 
@@ -2386,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_persistent(ctx);
 		break;
 
+	case I915_CONTEXT_PARAM_RINGSIZE:
+		ret = get_ringsize(ctx, args);
+		break;
+
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 	default:
 		ret = -EINVAL;
@@ -2459,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
 	const struct i915_gem_engines *e = it->engines;
 	struct intel_context *ctx;
 
+	if (unlikely(!e))
+		return NULL;
+
 	do {
 		if (it->idx >= e->num_engines)
 			return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3ae61a3..f1d884d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
 static inline struct intel_context *
 i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
 {
-	struct intel_context *ce = ERR_PTR(-EINVAL);
+	struct intel_context *ce;
 
 	rcu_read_lock(); {
 		struct i915_gem_engines *e = rcu_dereference(ctx->engines);
-		if (likely(idx < e->num_engines && e->engines[idx]))
+		if (unlikely(!e)) /* context was closed! */
+			ce = ERR_PTR(-ENOENT);
+		else if (likely(idx < e->num_engines && e->engines[idx]))
 			ce = intel_context_get(e->engines[idx]);
+		else
+			ce = ERR_PTR(-EINVAL);
 	} rcu_read_unlock();
 
 	return ce;
@@ -207,7 +211,6 @@ static inline void
 i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
 			   struct i915_gem_engines *engines)
 {
-	GEM_BUG_ON(!engines);
 	it->engines = engines;
 	it->idx = 0;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 017ca80..28760bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -20,6 +20,7 @@
 #include "gt/intel_context_types.h"
 
 #include "i915_scheduler.h"
+#include "i915_sw_fence.h"
 
 struct pid;
 
@@ -30,7 +31,12 @@ struct intel_timeline;
 struct intel_ring;
 
 struct i915_gem_engines {
-	struct rcu_head rcu;
+	union {
+		struct list_head link;
+		struct rcu_head rcu;
+	};
+	struct i915_sw_fence fence;
+	struct i915_gem_context *ctx;
 	unsigned int num_engines;
 	struct intel_context *engines[];
 };
@@ -173,6 +179,11 @@ struct i915_gem_context {
 	 * context in messages.
 	 */
 	char name[TASK_COMM_LEN + 8];
+
+	struct {
+		spinlock_t lock;
+		struct list_head engines;
+	} stale;
 };
 
 #endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 372b57c..7db5a79 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -48,7 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 		src = sg_next(src);
 	}
 
-	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+	if (!dma_map_sg_attrs(attachment->dev,
+			      st->sgl, st->nents, dir,
+			      DMA_ATTR_SKIP_CPU_SYNC)) {
 		ret = -ENOMEM;
 		goto err_free_sg;
 	}
@@ -71,7 +73,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
 
-	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	dma_unmap_sg_attrs(attachment->dev,
+			   sg->sgl, sg->nents, dir,
+			   DMA_ATTR_SKIP_CPU_SYNC);
 	sg_free_table(sg);
 	kfree(sg);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7643a30..36d0695 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -10,7 +10,6 @@
 #include <linux/uaccess.h>
 
 #include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
 
 #include "display/intel_frontbuffer.h"
 
@@ -28,6 +27,19 @@
 #include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 
+struct eb_vma {
+	struct i915_vma *vma;
+	unsigned int flags;
+
+	/** This vma's place in the execbuf reservation list */
+	struct drm_i915_gem_exec_object2 *exec;
+	struct list_head bind_link;
+	struct list_head reloc_link;
+
+	struct hlist_node node;
+	u32 handle;
+};
+
 enum {
 	FORCE_CPU_RELOC = 1,
 	FORCE_GTT_RELOC,
@@ -35,17 +47,15 @@ enum {
 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
 };
 
-#define __EXEC_OBJECT_HAS_REF		BIT(31)
-#define __EXEC_OBJECT_HAS_PIN		BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_HAS_PIN		BIT(31)
+#define __EXEC_OBJECT_HAS_FENCE		BIT(30)
+#define __EXEC_OBJECT_NEEDS_MAP		BIT(29)
+#define __EXEC_OBJECT_NEEDS_BIAS	BIT(28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 28) /* all of the above */
 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
 
 #define __EXEC_HAS_RELOC	BIT(31)
-#define __EXEC_VALIDATED	BIT(30)
-#define __EXEC_INTERNAL_FLAGS	(~0u << 30)
+#define __EXEC_INTERNAL_FLAGS	(~0u << 31)
 #define UPDATE			PIN_OFFSET_FIXED
 
 #define BATCH_OFFSET_BIAS (256*1024)
@@ -220,15 +230,14 @@ struct i915_execbuffer {
 	struct drm_file *file; /** per-file lookup tables and limits */
 	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
 	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
-	struct i915_vma **vma;
-	unsigned int *flags;
+	struct eb_vma *vma;
 
 	struct intel_engine_cs *engine; /** engine to queue the request to */
 	struct intel_context *context; /* logical state for the request */
 	struct i915_gem_context *gem_context; /** caller's context */
 
 	struct i915_request *request; /** our request to build */
-	struct i915_vma *batch; /** identity of the batch obj/vma */
+	struct eb_vma *batch; /** identity of the batch obj/vma */
 	struct i915_vma *trampoline; /** trampoline used for chaining */
 
 	/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -276,8 +285,6 @@ struct i915_execbuffer {
 	struct hlist_head *buckets; /** ht for relocation handles */
 };
 
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
 {
 	return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
 static inline bool
 eb_pin_vma(struct i915_execbuffer *eb,
 	   const struct drm_i915_gem_exec_object2 *entry,
-	   struct i915_vma *vma)
+	   struct eb_vma *ev)
 {
-	unsigned int exec_flags = *vma->exec_flags;
+	struct i915_vma *vma = ev->vma;
 	u64 pin_flags;
 
 	if (vma->node.size)
@@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb,
 		pin_flags = entry->offset & PIN_OFFSET_MASK;
 
 	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
-	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
 		pin_flags |= PIN_GLOBAL;
 
 	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
 		return false;
 
-	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
 		if (unlikely(i915_vma_pin_fence(vma))) {
 			i915_vma_unpin(vma);
 			return false;
 		}
 
 		if (vma->fence)
-			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
 	}
 
-	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
-	return !eb_vma_misplaced(entry, vma, exec_flags);
+	ev->flags |= __EXEC_OBJECT_HAS_PIN;
+	return !eb_vma_misplaced(entry, vma, ev->flags);
 }
 
 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
@@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
 }
 
 static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
+eb_unreserve_vma(struct eb_vma *ev)
 {
-	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
+	if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
 		return;
 
-	__eb_unreserve_vma(vma, *flags);
-	*flags &= ~__EXEC_OBJECT_RESERVED;
+	__eb_unreserve_vma(ev->vma, ev->flags);
+	ev->flags &= ~__EXEC_OBJECT_RESERVED;
 }
 
 static int
@@ -442,13 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
 	} else {
 		entry->pad_to_size = 0;
 	}
-
-	if (unlikely(vma->exec_flags)) {
-		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
-			  entry->handle, (int)(entry - eb->exec));
-		return -EINVAL;
-	}
-
 	/*
 	 * From drm_mm perspective address space is continuous,
 	 * so from this point we're always using non-canonical
@@ -471,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb,
 	return 0;
 }
 
-static int
+static void
 eb_add_vma(struct i915_execbuffer *eb,
 	   unsigned int i, unsigned batch_idx,
 	   struct i915_vma *vma)
 {
 	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
-	int err;
+	struct eb_vma *ev = &eb->vma[i];
 
 	GEM_BUG_ON(i915_vma_is_closed(vma));
 
-	if (!(eb->args->flags & __EXEC_VALIDATED)) {
-		err = eb_validate_vma(eb, entry, vma);
-		if (unlikely(err))
-			return err;
-	}
+	ev->vma = i915_vma_get(vma);
+	ev->exec = entry;
+	ev->flags = entry->flags;
 
 	if (eb->lut_size > 0) {
-		vma->exec_handle = entry->handle;
-		hlist_add_head(&vma->exec_node,
+		ev->handle = entry->handle;
+		hlist_add_head(&ev->node,
 			       &eb->buckets[hash_32(entry->handle,
 						    eb->lut_size)]);
 	}
 
 	if (entry->relocation_count)
-		list_add_tail(&vma->reloc_link, &eb->relocs);
-
-	/*
-	 * Stash a pointer from the vma to execobj, so we can query its flags,
-	 * size, alignment etc as provided by the user. Also we stash a pointer
-	 * to the vma inside the execobj so that we can use a direct lookup
-	 * to find the right target VMA when doing relocations.
-	 */
-	eb->vma[i] = vma;
-	eb->flags[i] = entry->flags;
-	vma->exec_flags = &eb->flags[i];
+		list_add_tail(&ev->reloc_link, &eb->relocs);
 
 	/*
 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -518,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb,
 	 */
 	if (i == batch_idx) {
 		if (entry->relocation_count &&
-		    !(eb->flags[i] & EXEC_OBJECT_PINNED))
-			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+		    !(ev->flags & EXEC_OBJECT_PINNED))
+			ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
 		if (eb->reloc_cache.has_fence)
-			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+			ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
 
-		eb->batch = vma;
+		eb->batch = ev;
 	}
 
-	err = 0;
-	if (eb_pin_vma(eb, entry, vma)) {
+	if (eb_pin_vma(eb, entry, ev)) {
 		if (entry->offset != vma->node.start) {
 			entry->offset = vma->node.start | UPDATE;
 			eb->args->flags |= __EXEC_HAS_RELOC;
 		}
 	} else {
-		eb_unreserve_vma(vma, vma->exec_flags);
-
-		list_add_tail(&vma->exec_link, &eb->unbound);
-		if (drm_mm_node_allocated(&vma->node))
-			err = i915_vma_unbind(vma);
-		if (unlikely(err))
-			vma->exec_flags = NULL;
+		eb_unreserve_vma(ev);
+		list_add_tail(&ev->bind_link, &eb->unbound);
 	}
-	return err;
 }
 
 static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -562,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
 }
 
 static int eb_reserve_vma(const struct i915_execbuffer *eb,
-			  struct i915_vma *vma)
+			  struct eb_vma *ev,
+			  u64 pin_flags)
 {
-	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
-	unsigned int exec_flags = *vma->exec_flags;
-	u64 pin_flags;
+	struct drm_i915_gem_exec_object2 *entry = ev->exec;
+	unsigned int exec_flags = ev->flags;
+	struct i915_vma *vma = ev->vma;
 	int err;
 
-	pin_flags = PIN_USER | PIN_NONBLOCK;
 	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
 		pin_flags |= PIN_GLOBAL;
 
@@ -583,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
 		pin_flags |= PIN_MAPPABLE;
 
-	if (exec_flags & EXEC_OBJECT_PINNED) {
+	if (exec_flags & EXEC_OBJECT_PINNED)
 		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
-		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
-	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+	else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
 		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+	if (drm_mm_node_allocated(&vma->node) &&
+	    eb_vma_misplaced(entry, vma, ev->flags)) {
+		err = i915_vma_unbind(vma);
+		if (err)
+			return err;
 	}
 
 	err = i915_vma_pin(vma,
@@ -612,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
 	}
 
-	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
-	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+	ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+	GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
 
 	return 0;
 }
@@ -621,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
 static int eb_reserve(struct i915_execbuffer *eb)
 {
 	const unsigned int count = eb->buffer_count;
+	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
 	struct list_head last;
-	struct i915_vma *vma;
+	struct eb_vma *ev;
 	unsigned int i, pass;
-	int err;
+	int err = 0;
 
 	/*
 	 * Attempt to pin all of the buffers into the GTT.
@@ -640,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb)
 	 * room for the earlier objects *unless* we need to defragment.
 	 */
 
+	if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
+		return -EINTR;
+
 	pass = 0;
-	err = 0;
 	do {
-		list_for_each_entry(vma, &eb->unbound, exec_link) {
-			err = eb_reserve_vma(eb, vma);
+		list_for_each_entry(ev, &eb->unbound, bind_link) {
+			err = eb_reserve_vma(eb, ev, pin_flags);
 			if (err)
 				break;
 		}
-		if (err != -ENOSPC)
-			return err;
+		if (!(err == -ENOSPC || err == -EAGAIN))
+			break;
 
 		/* Resort *all* the objects into priority order */
 		INIT_LIST_HEAD(&eb->unbound);
 		INIT_LIST_HEAD(&last);
 		for (i = 0; i < count; i++) {
-			unsigned int flags = eb->flags[i];
-			struct i915_vma *vma = eb->vma[i];
+			unsigned int flags;
 
+			ev = &eb->vma[i];
+			flags = ev->flags;
 			if (flags & EXEC_OBJECT_PINNED &&
 			    flags & __EXEC_OBJECT_HAS_PIN)
 				continue;
 
-			eb_unreserve_vma(vma, &eb->flags[i]);
+			eb_unreserve_vma(ev);
 
 			if (flags & EXEC_OBJECT_PINNED)
 				/* Pinned must have their slot */
-				list_add(&vma->exec_link, &eb->unbound);
+				list_add(&ev->bind_link, &eb->unbound);
 			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
 				/* Map require the lowest 256MiB (aperture) */
-				list_add_tail(&vma->exec_link, &eb->unbound);
+				list_add_tail(&ev->bind_link, &eb->unbound);
 			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
 				/* Prioritise 4GiB region for restricted bo */
-				list_add(&vma->exec_link, &last);
+				list_add(&ev->bind_link, &last);
 			else
-				list_add_tail(&vma->exec_link, &last);
+				list_add_tail(&ev->bind_link, &last);
 		}
 		list_splice_tail(&last, &eb->unbound);
 
+		if (err == -EAGAIN) {
+			mutex_unlock(&eb->i915->drm.struct_mutex);
+			flush_workqueue(eb->i915->mm.userptr_wq);
+			mutex_lock(&eb->i915->drm.struct_mutex);
+			continue;
+		}
+
 		switch (pass++) {
 		case 0:
 			break;
@@ -688,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
 			err = i915_gem_evict_vm(eb->context->vm);
 			mutex_unlock(&eb->context->vm->mutex);
 			if (err)
-				return err;
+				goto unlock;
 			break;
 
 		default:
-			return -ENOSPC;
+			err = -ENOSPC;
+			goto unlock;
 		}
+
+		pin_flags = PIN_USER;
 	} while (1);
+
+unlock:
+	mutex_unlock(&eb->i915->drm.struct_mutex);
+	return err;
 }
 
 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -731,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	unsigned int i, batch;
 	int err;
 
+	if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
+		return -ENOENT;
+
 	INIT_LIST_HEAD(&eb->relocs);
 	INIT_LIST_HEAD(&eb->unbound);
 
 	batch = eb_batch_index(eb);
 
-	mutex_lock(&eb->gem_context->mutex);
-	if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
-		err = -ENOENT;
-		goto err_ctx;
-	}
-
 	for (i = 0; i < eb->buffer_count; i++) {
 		u32 handle = eb->exec[i].handle;
 		struct i915_lut_handle *lut;
@@ -786,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 		i915_gem_object_unlock(obj);
 
 add_vma:
-		err = eb_add_vma(eb, i, batch, vma);
+		err = eb_validate_vma(eb, &eb->exec[i], vma);
 		if (unlikely(err))
 			goto err_vma;
 
-		GEM_BUG_ON(vma != eb->vma[i]);
-		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
-		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
-			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
+		eb_add_vma(eb, i, batch, vma);
 	}
 
-	mutex_unlock(&eb->gem_context->mutex);
-
-	eb->args->flags |= __EXEC_VALIDATED;
-	return eb_reserve(eb);
+	return 0;
 
 err_obj:
 	i915_gem_object_put(obj);
 err_vma:
-	eb->vma[i] = NULL;
-err_ctx:
-	mutex_unlock(&eb->gem_context->mutex);
+	eb->vma[i].vma = NULL;
 	return err;
 }
 
-static struct i915_vma *
+static struct eb_vma *
 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
 {
 	if (eb->lut_size < 0) {
 		if (handle >= -eb->lut_size)
 			return NULL;
-		return eb->vma[handle];
+		return &eb->vma[handle];
 	} else {
 		struct hlist_head *head;
-		struct i915_vma *vma;
+		struct eb_vma *ev;
 
 		head = &eb->buckets[hash_32(handle, eb->lut_size)];
-		hlist_for_each_entry(vma, head, exec_node) {
-			if (vma->exec_handle == handle)
-				return vma;
+		hlist_for_each_entry(ev, head, node) {
+			if (ev->handle == handle)
+				return ev;
 		}
 		return NULL;
 	}
@@ -836,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
 	unsigned int i;
 
 	for (i = 0; i < count; i++) {
-		struct i915_vma *vma = eb->vma[i];
-		unsigned int flags = eb->flags[i];
+		struct eb_vma *ev = &eb->vma[i];
+		struct i915_vma *vma = ev->vma;
 
 		if (!vma)
 			break;
 
-		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
-		vma->exec_flags = NULL;
-		eb->vma[i] = NULL;
+		eb->vma[i].vma = NULL;
 
-		if (flags & __EXEC_OBJECT_HAS_PIN)
-			__eb_unreserve_vma(vma, flags);
+		if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+			__eb_unreserve_vma(vma, ev->flags);
 
-		if (flags & __EXEC_OBJECT_HAS_REF)
-			i915_vma_put(vma);
+		i915_vma_put(vma);
 	}
 }
 
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
-	eb_release_vmas(eb);
-	if (eb->lut_size > 0)
-		memset(eb->buckets, 0,
-		       sizeof(struct hlist_head) << eb->lut_size);
-}
-
 static void eb_destroy(const struct i915_execbuffer *eb)
 {
 	GEM_BUG_ON(eb->reloc_cache.rq);
@@ -1197,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	goto out_pool;
 
 skip_request:
-	i915_request_skip(rq, err);
+	i915_request_set_error_once(rq, err);
 err_request:
 	i915_request_add(rq);
 err_unpin:
@@ -1328,10 +1310,11 @@ relocate_entry(struct i915_vma *vma,
 
 static u64
 eb_relocate_entry(struct i915_execbuffer *eb,
-		  struct i915_vma *vma,
+		  struct eb_vma *ev,
 		  const struct drm_i915_gem_relocation_entry *reloc)
 {
-	struct i915_vma *target;
+	struct drm_i915_private *i915 = eb->i915;
+	struct eb_vma *target;
 	int err;
 
 	/* we've already hold a reference to all valid objects */
@@ -1341,7 +1324,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 
 	/* Validate that the target is in a valid r/w GPU domain */
 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
-		DRM_DEBUG("reloc with multiple write domains: "
+		drm_dbg(&i915->drm, "reloc with multiple write domains: "
 			  "target %d offset %d "
 			  "read %08x write %08x",
 			  reloc->target_handle,
@@ -1352,7 +1335,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 	}
 	if (unlikely((reloc->write_domain | reloc->read_domains)
 		     & ~I915_GEM_GPU_DOMAINS)) {
-		DRM_DEBUG("reloc with read/write non-GPU domains: "
+		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
 			  "target %d offset %d "
 			  "read %08x write %08x",
 			  reloc->target_handle,
@@ -1363,7 +1346,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 	}
 
 	if (reloc->write_domain) {
-		*target->exec_flags |= EXEC_OBJECT_WRITE;
+		target->flags |= EXEC_OBJECT_WRITE;
 
 		/*
 		 * Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1373,7 +1356,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 		 */
 		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
 		    IS_GEN(eb->i915, 6)) {
-			err = i915_vma_bind(target, target->obj->cache_level,
+			err = i915_vma_bind(target->vma,
+					    target->vma->obj->cache_level,
 					    PIN_GLOBAL, NULL);
 			if (WARN_ONCE(err,
 				      "Unexpected failure to bind target VMA!"))
@@ -1386,21 +1370,21 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 	 * more work needs to be done.
 	 */
 	if (!DBG_FORCE_RELOC &&
-	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
+	    gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
 		return 0;
 
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
-		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
-		DRM_DEBUG("Relocation beyond object bounds: "
+		     ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
+		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
 			  "target %d offset %d size %d.\n",
 			  reloc->target_handle,
 			  (int)reloc->offset,
-			  (int)vma->size);
+			  (int)ev->vma->size);
 		return -EINVAL;
 	}
 	if (unlikely(reloc->offset & 3)) {
-		DRM_DEBUG("Relocation not 4-byte aligned: "
+		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
 			  "target %d offset %d.\n",
 			  reloc->target_handle,
 			  (int)reloc->offset);
@@ -1415,18 +1399,18 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 	 * do relocations we are already stalling, disable the user's opt
 	 * out of our synchronisation.
 	 */
-	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+	ev->flags &= ~EXEC_OBJECT_ASYNC;
 
 	/* and update the user's relocation entry */
-	return relocate_entry(vma, reloc, eb, target);
+	return relocate_entry(ev->vma, reloc, eb, target->vma);
 }
 
-static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry __user *urelocs;
-	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
 	unsigned int remain;
 
 	urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1456,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
 		 * we would try to acquire the struct mutex again. Obviously
 		 * this is bad and so lockdep complains vehemently.
 		 */
-		pagefault_disable();
-		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
-		pagefault_enable();
+		copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
 		if (unlikely(copied)) {
 			remain = -EFAULT;
 			goto out;
@@ -1466,7 +1448,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
 
 		remain -= count;
 		do {
-			u64 offset = eb_relocate_entry(eb, vma, r);
+			u64 offset = eb_relocate_entry(eb, ev, r);
 
 			if (likely(offset == 0)) {
 			} else if ((s64)offset < 0) {
@@ -1508,281 +1490,34 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
 	return remain;
 }
 
-static int
-eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
-{
-	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
-	struct drm_i915_gem_relocation_entry *relocs =
-		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < entry->relocation_count; i++) {
-		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
-
-		if ((s64)offset < 0) {
-			err = (int)offset;
-			goto err;
-		}
-	}
-	err = 0;
-err:
-	reloc_cache_reset(&eb->reloc_cache);
-	return err;
-}
-
-static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
-{
-	const char __user *addr, *end;
-	unsigned long size;
-	char __maybe_unused c;
-
-	size = entry->relocation_count;
-	if (size == 0)
-		return 0;
-
-	if (size > N_RELOC(ULONG_MAX))
-		return -EINVAL;
-
-	addr = u64_to_user_ptr(entry->relocs_ptr);
-	size *= sizeof(struct drm_i915_gem_relocation_entry);
-	if (!access_ok(addr, size))
-		return -EFAULT;
-
-	end = addr + size;
-	for (; addr < end; addr += PAGE_SIZE) {
-		int err = __get_user(c, addr);
-		if (err)
-			return err;
-	}
-	return __get_user(c, end - 1);
-}
-
-static int eb_copy_relocations(const struct i915_execbuffer *eb)
-{
-	struct drm_i915_gem_relocation_entry *relocs;
-	const unsigned int count = eb->buffer_count;
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < count; i++) {
-		const unsigned int nreloc = eb->exec[i].relocation_count;
-		struct drm_i915_gem_relocation_entry __user *urelocs;
-		unsigned long size;
-		unsigned long copied;
-
-		if (nreloc == 0)
-			continue;
-
-		err = check_relocations(&eb->exec[i]);
-		if (err)
-			goto err;
-
-		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
-		size = nreloc * sizeof(*relocs);
-
-		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
-		if (!relocs) {
-			err = -ENOMEM;
-			goto err;
-		}
-
-		/* copy_from_user is limited to < 4GiB */
-		copied = 0;
-		do {
-			unsigned int len =
-				min_t(u64, BIT_ULL(31), size - copied);
-
-			if (__copy_from_user((char *)relocs + copied,
-					     (char __user *)urelocs + copied,
-					     len))
-				goto end;
-
-			copied += len;
-		} while (copied < size);
-
-		/*
-		 * As we do not update the known relocation offsets after
-		 * relocating (due to the complexities in lock handling),
-		 * we need to mark them as invalid now so that we force the
-		 * relocation processing next time. Just in case the target
-		 * object is evicted and then rebound into its old
-		 * presumed_offset before the next execbuffer - if that
-		 * happened we would make the mistake of assuming that the
-		 * relocations were valid.
-		 */
-		if (!user_access_begin(urelocs, size))
-			goto end;
-
-		for (copied = 0; copied < nreloc; copied++)
-			unsafe_put_user(-1,
-					&urelocs[copied].presumed_offset,
-					end_user);
-		user_access_end();
-
-		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
-	}
-
-	return 0;
-
-end_user:
-	user_access_end();
-end:
-	kvfree(relocs);
-	err = -EFAULT;
-err:
-	while (i--) {
-		relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
-		if (eb->exec[i].relocation_count)
-			kvfree(relocs);
-	}
-	return err;
-}
-
-static int eb_prefault_relocations(const struct i915_execbuffer *eb)
-{
-	const unsigned int count = eb->buffer_count;
-	unsigned int i;
-
-	if (unlikely(i915_modparams.prefault_disable))
-		return 0;
-
-	for (i = 0; i < count; i++) {
-		int err;
-
-		err = check_relocations(&eb->exec[i]);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
-{
-	struct drm_device *dev = &eb->i915->drm;
-	bool have_copy = false;
-	struct i915_vma *vma;
-	int err = 0;
-
-repeat:
-	if (signal_pending(current)) {
-		err = -ERESTARTSYS;
-		goto out;
-	}
-
-	/* We may process another execbuffer during the unlock... */
-	eb_reset_vmas(eb);
-	mutex_unlock(&dev->struct_mutex);
-
-	/*
-	 * We take 3 passes through the slowpatch.
-	 *
-	 * 1 - we try to just prefault all the user relocation entries and
-	 * then attempt to reuse the atomic pagefault disabled fast path again.
-	 *
-	 * 2 - we copy the user entries to a local buffer here outside of the
-	 * local and allow ourselves to wait upon any rendering before
-	 * relocations
-	 *
-	 * 3 - we already have a local copy of the relocation entries, but
-	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
-	 */
-	if (!err) {
-		err = eb_prefault_relocations(eb);
-	} else if (!have_copy) {
-		err = eb_copy_relocations(eb);
-		have_copy = err == 0;
-	} else {
-		cond_resched();
-		err = 0;
-	}
-	if (err) {
-		mutex_lock(&dev->struct_mutex);
-		goto out;
-	}
-
-	/* A frequent cause for EAGAIN are currently unavailable client pages */
-	flush_workqueue(eb->i915->mm.userptr_wq);
-
-	err = i915_mutex_lock_interruptible(dev);
-	if (err) {
-		mutex_lock(&dev->struct_mutex);
-		goto out;
-	}
-
-	/* reacquire the objects */
-	err = eb_lookup_vmas(eb);
-	if (err)
-		goto err;
-
-	GEM_BUG_ON(!eb->batch);
-
-	list_for_each_entry(vma, &eb->relocs, reloc_link) {
-		if (!have_copy) {
-			pagefault_disable();
-			err = eb_relocate_vma(eb, vma);
-			pagefault_enable();
-			if (err)
-				goto repeat;
-		} else {
-			err = eb_relocate_vma_slow(eb, vma);
-			if (err)
-				goto err;
-		}
-	}
-
-	/*
-	 * Leave the user relocations as are, this is the painfully slow path,
-	 * and we want to avoid the complication of dropping the lock whilst
-	 * having buffers reserved in the aperture and so causing spurious
-	 * ENOSPC for random operations.
-	 */
-
-err:
-	if (err == -EAGAIN)
-		goto repeat;
-
-out:
-	if (have_copy) {
-		const unsigned int count = eb->buffer_count;
-		unsigned int i;
-
-		for (i = 0; i < count; i++) {
-			const struct drm_i915_gem_exec_object2 *entry =
-				&eb->exec[i];
-			struct drm_i915_gem_relocation_entry *relocs;
-
-			if (!entry->relocation_count)
-				continue;
-
-			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
-			kvfree(relocs);
-		}
-	}
-
-	return err;
-}
-
 static int eb_relocate(struct i915_execbuffer *eb)
 {
-	if (eb_lookup_vmas(eb))
-		goto slow;
+	int err;
+
+	mutex_lock(&eb->gem_context->mutex);
+	err = eb_lookup_vmas(eb);
+	mutex_unlock(&eb->gem_context->mutex);
+	if (err)
+		return err;
+
+	if (!list_empty(&eb->unbound)) {
+		err = eb_reserve(eb);
+		if (err)
+			return err;
+	}
 
 	/* The objects are in their final locations, apply the relocations. */
 	if (eb->args->flags & __EXEC_HAS_RELOC) {
-		struct i915_vma *vma;
+		struct eb_vma *ev;
 
-		list_for_each_entry(vma, &eb->relocs, reloc_link) {
-			if (eb_relocate_vma(eb, vma))
-				goto slow;
+		list_for_each_entry(ev, &eb->relocs, reloc_link) {
+			err = eb_relocate_vma(eb, ev);
+			if (err)
+				return err;
 		}
 	}
 
 	return 0;
-
-slow:
-	return eb_relocate_slow(eb);
 }
 
 static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1795,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 	ww_acquire_init(&acquire, &reservation_ww_class);
 
 	for (i = 0; i < count; i++) {
-		struct i915_vma *vma = eb->vma[i];
+		struct eb_vma *ev = &eb->vma[i];
+		struct i915_vma *vma = ev->vma;
 
 		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
-		if (!err)
-			continue;
-
-		GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
-
 		if (err == -EDEADLK) {
 			GEM_BUG_ON(i == 0);
 			do {
 				int j = i - 1;
 
-				ww_mutex_unlock(&eb->vma[j]->resv->lock);
+				ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
 
-				swap(eb->flags[i], eb->flags[j]);
 				swap(eb->vma[i],  eb->vma[j]);
-				eb->vma[i]->exec_flags = &eb->flags[i];
 			} while (--i);
-			GEM_BUG_ON(vma != eb->vma[0]);
-			vma->exec_flags = &eb->flags[0];
 
 			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
 							       &acquire);
@@ -1826,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 	ww_acquire_done(&acquire);
 
 	while (i--) {
-		unsigned int flags = eb->flags[i];
-		struct i915_vma *vma = eb->vma[i];
+		struct eb_vma *ev = &eb->vma[i];
+		struct i915_vma *vma = ev->vma;
+		unsigned int flags = ev->flags;
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		assert_vma_held(vma);
@@ -1871,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 		i915_vma_unlock(vma);
 
 		__eb_unreserve_vma(vma, flags);
-		vma->exec_flags = NULL;
+		i915_vma_put(vma);
 
-		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
-			i915_vma_put(vma);
+		ev->vma = NULL;
 	}
 	ww_acquire_fini(&acquire);
 
@@ -1888,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 	return 0;
 
 err_skip:
-	i915_request_skip(eb->request, err);
+	i915_request_set_error_once(eb->request, err);
 	return err;
 }
 
@@ -1922,7 +1649,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 	int i;
 
 	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
-		DRM_DEBUG("sol reset is gen7/rcs only\n");
+		drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
@@ -2009,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
 	if (!pw)
 		return -ENOMEM;
 
-	err = i915_active_acquire(&eb->batch->active);
+	err = i915_active_acquire(&eb->batch->vma->active);
 	if (err)
 		goto err_free;
 
@@ -2026,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
 	dma_fence_work_init(&pw->base, &eb_parse_ops);
 
 	pw->engine = eb->engine;
-	pw->batch = eb->batch;
+	pw->batch = eb->batch->vma;
 	pw->batch_offset = eb->batch_start_offset;
 	pw->batch_length = eb->batch_len;
 	pw->shadow = shadow;
@@ -2068,7 +1795,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
 err_shadow:
 	i915_active_release(&shadow->active);
 err_batch:
-	i915_active_release(&eb->batch->active);
+	i915_active_release(&eb->batch->vma->active);
 err_free:
 	kfree(pw);
 	return err;
@@ -2076,6 +1803,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
 
 static int eb_parse(struct i915_execbuffer *eb)
 {
+	struct drm_i915_private *i915 = eb->i915;
 	struct intel_engine_pool_node *pool;
 	struct i915_vma *shadow, *trampoline;
 	unsigned int len;
@@ -2091,7 +1819,8 @@ static int eb_parse(struct i915_execbuffer *eb)
 		 * post-scan tampering
 		 */
 		if (!eb->context->vm->has_read_only) {
-			DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+			drm_dbg(&i915->drm,
+				"Cannot prevent post-scan tampering without RO capable vm\n");
 			return -EINVAL;
 		}
 	} else {
@@ -2129,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb)
 	if (err)
 		goto err_trampoline;
 
-	eb->vma[eb->buffer_count] = i915_vma_get(shadow);
-	eb->flags[eb->buffer_count] =
-		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
-	shadow->exec_flags = &eb->flags[eb->buffer_count];
-	eb->buffer_count++;
+	eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
+	eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
+	eb->batch = &eb->vma[eb->buffer_count++];
 
 	eb->trampoline = trampoline;
 	eb->batch_start_offset = 0;
-	eb->batch = shadow;
 
 	shadow->private = pool;
 	return 0;
@@ -2164,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file)
 	spin_unlock(&file_priv->mm.lock);
 }
 
-static int eb_submit(struct i915_execbuffer *eb)
+static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 {
 	int err;
 
@@ -2191,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb)
 	}
 
 	err = eb->engine->emit_bb_start(eb->request,
-					eb->batch->node.start +
+					batch->node.start +
 					eb->batch_start_offset,
 					eb->batch_len,
 					eb->batch_flags);
@@ -2326,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
 	intel_context_timeline_unlock(tl);
 
 	if (rq) {
-		if (i915_request_wait(rq,
-				      I915_WAIT_INTERRUPTIBLE,
-				      MAX_SCHEDULE_TIMEOUT) < 0) {
-			i915_request_put(rq);
-			err = -EINTR;
+		bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+		long timeout;
+
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (nonblock)
+			timeout = 0;
+
+		timeout = i915_request_wait(rq,
+					    I915_WAIT_INTERRUPTIBLE,
+					    timeout);
+		i915_request_put(rq);
+
+		if (timeout < 0) {
+			err = nonblock ? -EWOULDBLOCK : timeout;
 			goto err_exit;
 		}
-
-		i915_request_put(rq);
 	}
 
 	eb->engine = ce->engine;
@@ -2372,8 +2105,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
 
 	if (user_ring_id != I915_EXEC_BSD &&
 	    (args->flags & I915_EXEC_BSD_MASK)) {
-		DRM_DEBUG("execbuf with non bsd ring but with invalid "
-			  "bsd dispatch flags: %d\n", (int)(args->flags));
+		drm_dbg(&i915->drm,
+			"execbuf with non bsd ring but with invalid "
+			"bsd dispatch flags: %d\n", (int)(args->flags));
 		return -1;
 	}
 
@@ -2387,8 +2121,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
 			bsd_idx--;
 		} else {
-			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
-				  bsd_idx);
+			drm_dbg(&i915->drm,
+				"execbuf with unknown bsd ring: %u\n",
+				bsd_idx);
 			return -1;
 		}
 
@@ -2396,7 +2131,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
 	}
 
 	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
-		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
+			user_ring_id);
 		return -1;
 	}
 
@@ -2556,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb,
 	}
 }
 
+static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+{
+	struct i915_request *rq, *rn;
+
+	list_for_each_entry_safe(rq, rn, &tl->requests, link)
+		if (rq == end || !i915_request_retire(rq))
+			break;
+}
+
+static void eb_request_add(struct i915_execbuffer *eb)
+{
+	struct i915_request *rq = eb->request;
+	struct intel_timeline * const tl = i915_request_timeline(rq);
+	struct i915_sched_attr attr = {};
+	struct i915_request *prev;
+
+	lockdep_assert_held(&tl->mutex);
+	lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+	trace_i915_request_add(rq);
+
+	prev = __i915_request_commit(rq);
+
+	/* Check that the context wasn't destroyed before submission */
+	if (likely(!intel_context_is_closed(eb->context))) {
+		attr = eb->gem_context->sched;
+
+		/*
+		 * Boost actual workloads past semaphores!
+		 *
+		 * With semaphores we spin on one engine waiting for another,
+		 * simply to reduce the latency of starting our work when
+		 * the signaler completes. However, if there is any other
+		 * work that we could be doing on this engine instead, that
+		 * is better utilisation and will reduce the overall duration
+		 * of the current work. To avoid PI boosting a semaphore
+		 * far in the distance past over useful work, we keep a history
+		 * of any semaphore use along our dependency chain.
+		 */
+		if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+			attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+		/*
+		 * Boost priorities to new clients (new request flows).
+		 *
+		 * Allow interactive/synchronous clients to jump ahead of
+		 * the bulk clients. (FQ_CODEL)
+		 */
+		if (list_empty(&rq->sched.signalers_list))
+			attr.priority |= I915_PRIORITY_WAIT;
+	} else {
+		/* Serialise with context_close via the add_to_timeline */
+		i915_request_set_error_once(rq, -ENOENT);
+		__i915_request_skip(rq);
+	}
+
+	local_bh_disable();
+	__i915_request_queue(rq, &attr);
+	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+	/* Try to clean up the client's timeline after submitting the request */
+	if (prev)
+		retire_requests(tl, prev);
+
+	mutex_unlock(&tl->mutex);
+}
+
 static int
 i915_gem_do_execbuffer(struct drm_device *dev,
 		       struct drm_file *file,
@@ -2568,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	struct dma_fence *in_fence = NULL;
 	struct dma_fence *exec_fence = NULL;
 	struct sync_file *out_fence = NULL;
+	struct i915_vma *batch;
 	int out_fence_fd = -1;
 	int err;
 
@@ -2582,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		args->flags |= __EXEC_HAS_RELOC;
 
 	eb.exec = exec;
-	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
-	eb.vma[0] = NULL;
-	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+	eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+	eb.vma[0].vma = NULL;
 
 	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
 	reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2652,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_context;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto err_engine;
-
 	err = eb_relocate(&eb);
 	if (err) {
 		/*
@@ -2669,20 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		goto err_vma;
 	}
 
-	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
-		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+	if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
+		drm_dbg(&i915->drm,
+			"Attempting to use self-modifying batch buffer\n");
 		err = -EINVAL;
 		goto err_vma;
 	}
-	if (eb.batch_start_offset > eb.batch->size ||
-	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
-		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+
+	if (range_overflows_t(u64,
+			      eb.batch_start_offset, eb.batch_len,
+			      eb.batch->vma->size)) {
+		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
 		err = -EINVAL;
 		goto err_vma;
 	}
 
 	if (eb.batch_len == 0)
-		eb.batch_len = eb.batch->size - eb.batch_start_offset;
+		eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
 
 	err = eb_parse(&eb);
 	if (err)
@@ -2692,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
+	batch = eb.batch->vma;
 	if (eb.batch_flags & I915_DISPATCH_SECURE) {
 		struct i915_vma *vma;
 
@@ -2705,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
+		vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
-			goto err_vma;
+			goto err_parse;
 		}
 
-		eb.batch = vma;
+		batch = vma;
 	}
 
 	/* All GPU relocation batches must be submitted prior to the user rq */
@@ -2758,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 */
-	eb.request->batch = eb.batch;
-	if (eb.batch->private)
-		intel_engine_pool_mark_active(eb.batch->private, eb.request);
+	eb.request->batch = batch;
+	if (batch->private)
+		intel_engine_pool_mark_active(batch->private, eb.request);
 
 	trace_i915_request_queue(eb.request, eb.batch_flags);
-	err = eb_submit(&eb);
+	err = eb_submit(&eb, batch);
 err_request:
 	add_to_client(eb.request, file);
 	i915_request_get(eb.request);
-	i915_request_add(eb.request);
+	eb_request_add(&eb);
 
 	if (fences)
 		signal_fence_array(&eb, fences);
@@ -2786,16 +2589,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 
 err_batch_unpin:
 	if (eb.batch_flags & I915_DISPATCH_SECURE)
-		i915_vma_unpin(eb.batch);
-	if (eb.batch->private)
-		intel_engine_pool_put(eb.batch->private);
+		i915_vma_unpin(batch);
+err_parse:
+	if (batch->private)
+		intel_engine_pool_put(batch->private);
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
 	if (eb.trampoline)
 		i915_vma_unpin(eb.trampoline);
-	mutex_unlock(&dev->struct_mutex);
-err_engine:
 	eb_unpin_engine(&eb);
 err_context:
 	i915_gem_context_put(eb.gem_context);
@@ -2813,9 +2615,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 
 static size_t eb_element_size(void)
 {
-	return (sizeof(struct drm_i915_gem_exec_object2) +
-		sizeof(struct i915_vma *) +
-		sizeof(unsigned int));
+	return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
 }
 
 static bool check_buffer_count(size_t count)
@@ -2839,6 +2639,7 @@ int
 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *file)
 {
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_execbuffer *args = data;
 	struct drm_i915_gem_execbuffer2 exec2;
 	struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -2848,7 +2649,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 	int err;
 
 	if (!check_buffer_count(count)) {
-		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
 		return -EINVAL;
 	}
 
@@ -2873,8 +2674,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
 				    __GFP_NOWARN | GFP_KERNEL);
 	if (exec_list == NULL || exec2_list == NULL) {
-		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
-			  args->buffer_count);
+		drm_dbg(&i915->drm,
+			"Failed to allocate exec list for %d buffers\n",
+			args->buffer_count);
 		kvfree(exec_list);
 		kvfree(exec2_list);
 		return -ENOMEM;
@@ -2883,8 +2685,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 			     u64_to_user_ptr(args->buffers_ptr),
 			     sizeof(*exec_list) * count);
 	if (err) {
-		DRM_DEBUG("copy %d exec entries failed %d\n",
-			  args->buffer_count, err);
+		drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
+			args->buffer_count, err);
 		kvfree(exec_list);
 		kvfree(exec2_list);
 		return -EFAULT;
@@ -2931,6 +2733,7 @@ int
 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file)
 {
+	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_execbuffer2 *args = data;
 	struct drm_i915_gem_exec_object2 *exec2_list;
 	struct drm_syncobj **fences = NULL;
@@ -2938,7 +2741,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 	int err;
 
 	if (!check_buffer_count(count)) {
-		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
 		return -EINVAL;
 	}
 
@@ -2950,14 +2753,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
 				    __GFP_NOWARN | GFP_KERNEL);
 	if (exec2_list == NULL) {
-		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
-			  count);
+		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
+			count);
 		return -ENOMEM;
 	}
 	if (copy_from_user(exec2_list,
 			   u64_to_user_ptr(args->buffers_ptr),
 			   sizeof(*exec2_list) * count)) {
-		DRM_DEBUG("copy %zd exec entries failed\n", count);
+		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
 		kvfree(exec2_list);
 		return -EFAULT;
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e4..cbbff81 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -8,8 +8,6 @@
 #include <linux/slab.h>
 #include <linux/swiotlb.h>
 
-#include <drm/i915_drm.h>
-
 #include "i915_drv.h"
 #include "i915_gem.h"
 #include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0b6a442..b39c24d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -613,8 +613,7 @@ __assign_mmap_offset(struct drm_file *file,
 	if (!obj)
 		return -ENOENT;
 
-	if (mmap_type == I915_MMAP_TYPE_GTT &&
-	    i915_gem_object_never_bind_ggtt(obj)) {
+	if (i915_gem_object_never_mmap(obj)) {
 		err = -ENODEV;
 		goto out;
 	}
@@ -776,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
 	struct file *file;
 
 	rcu_read_lock();
-	file = i915->gem.mmap_singleton;
+	file = READ_ONCE(i915->gem.mmap_singleton);
 	if (file && !get_file_rcu(file))
 		file = NULL;
 	rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 9c86f2d..2faa481 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -11,8 +11,6 @@
 #include <drm/drm_file.h>
 #include <drm/drm_device.h>
 
-#include <drm/i915_drm.h>
-
 #include "display/intel_frontbuffer.h"
 #include "i915_gem_object_types.h"
 #include "i915_gem_gtt.h"
@@ -194,9 +192,9 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 }
 
 static inline bool
-i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
 {
-	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
+	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
 }
 
 static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 70809d8..e0079215 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
 					0);
 out_request:
 	if (unlikely(err))
-		i915_request_skip(rq, err);
+		i915_request_set_error_once(rq, err);
 
 	i915_request_add(rq);
 out_batch:
@@ -196,6 +196,17 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
 	return err;
 }
 
+/* Wa_1209644611:icl,ehl */
+static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
+{
+	u32 height = size >> PAGE_SHIFT;
+
+	if (!IS_GEN(i915, 11))
+		return false;
+
+	return height % 4 == 3 && height <= 8;
+}
+
 struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
 					 struct i915_vma *src,
 					 struct i915_vma *dst)
@@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
 		size = min_t(u64, rem, block_size);
 		GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
 
-		if (INTEL_GEN(i915) >= 9) {
+		if (INTEL_GEN(i915) >= 9 &&
+		    !wa_1209644611_applies(i915, size)) {
 			*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
 			*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
 			*cmd++ = 0;
@@ -385,7 +397,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
 	drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
 out_request:
 	if (unlikely(err))
-		i915_request_skip(rq, err);
+		i915_request_set_error_once(rq, err);
 
 	i915_request_add(rq);
 out_batch:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c2174da..a0b10bcd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -34,7 +34,7 @@ struct drm_i915_gem_object_ops {
 #define I915_GEM_OBJECT_HAS_IOMEM	BIT(1)
 #define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(2)
 #define I915_GEM_OBJECT_IS_PROXY	BIT(3)
-#define I915_GEM_OBJECT_NO_GGTT		BIT(4)
+#define I915_GEM_OBJECT_NO_MMAP		BIT(4)
 #define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(5)
 
 	/* Interface between the GEM object and its backing storage.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 54aca5c..24f4cad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -83,10 +83,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	int err;
 
 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
-		DRM_DEBUG("Attempting to obtain a purgeable object\n");
+		drm_dbg(&i915->drm,
+			"Attempting to obtain a purgeable object\n");
 		return -EFAULT;
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b07bb40..698e224 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -194,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	/* Perma-pin (until release) the physical set of pages */
 	__i915_gem_object_pin_pages(obj);
 
-	if (!IS_ERR_OR_NULL(pages)) {
+	if (!IS_ERR_OR_NULL(pages))
 		i915_gem_shmem_ops.put_pages(obj, pages);
-		i915_gem_object_release_memory_region(obj);
-	}
+
+	i915_gem_object_release_memory_region(obj);
+
 	mutex_unlock(&obj->mm.lock);
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index c8264eb..3d21516 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -85,7 +85,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
 			i915_gem_object_lock(obj);
-			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+			drm_WARN_ON(&i915->drm,
+			    i915_gem_object_set_to_gtt_domain(obj, false));
 			i915_gem_object_unlock(obj);
 			i915_gem_object_put(obj);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index a2a980d..5d5d7ee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -148,7 +148,8 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 		last_pfn = page_to_pfn(page);
 
 		/* Check that the i965g/gm workaround works. */
-		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+		drm_WARN_ON(&i915->drm,
+			    (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
 	if (sg) { /* loop terminated early; short sg table */
 		sg_page_sizes |= sg->length;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 59b387a..03e5eb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,7 +12,6 @@
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 #include <linux/vmalloc.h>
-#include <drm/i915_drm.h>
 
 #include "i915_trace.h"
 
@@ -401,19 +400,22 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
 	i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
 	i915->mm.shrinker.seeks = DEFAULT_SEEKS;
 	i915->mm.shrinker.batch = 4096;
-	WARN_ON(register_shrinker(&i915->mm.shrinker));
+	drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
 
 	i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
-	WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
+	drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
 
 	i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
-	WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
+	drm_WARN_ON(&i915->drm,
+		    register_vmap_purge_notifier(&i915->mm.vmap_notifier));
 }
 
 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
 {
-	WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
-	WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
+	drm_WARN_ON(&i915->drm,
+		    unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
+	drm_WARN_ON(&i915->drm,
+		    unregister_oom_notifier(&i915->mm.oom_notifier));
 	unregister_shrinker(&i915->mm.shrinker);
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 451f307..5557dfa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
 #include "gem/i915_gem_region.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
+#include "i915_vgpu.h"
 
 /*
  * The BIOS typically reserves some of the system's memory for the exclusive
@@ -110,8 +111,11 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
 
 		if (stolen[0].start != stolen[1].start ||
 		    stolen[0].end != stolen[1].end) {
-			DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
-			DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
+			drm_dbg(&i915->drm,
+				"GTT within stolen memory at %pR\n",
+				&ggtt_res);
+			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
+				dsm);
 		}
 	}
 
@@ -142,8 +146,9 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
 		 * range. Apparently this works.
 		 */
 		if (!r && !IS_GEN(i915, 3)) {
-			DRM_ERROR("conflict detected with stolen region: %pR\n",
-				  dsm);
+			drm_err(&i915->drm,
+				"conflict detected with stolen region: %pR\n",
+				dsm);
 
 			return -EBUSY;
 		}
@@ -171,8 +176,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
 					ELK_STOLEN_RESERVED);
 	resource_size_t stolen_top = i915->dsm.end + 1;
 
-	DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
-			 IS_GM45(i915) ? "CTG" : "ELK", reg_val);
+	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
+		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
 
 	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
 		return;
@@ -181,14 +186,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
 	 * Whether ILK really reuses the ELK register for this is unclear.
 	 * Let's see if we catch anyone with this supposedly enabled on ILK.
 	 */
-	WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
-	     reg_val);
+	drm_WARN(&i915->drm, IS_GEN(i915, 5),
+		 "ILK stolen reserved found? 0x%08x\n",
+		 reg_val);
 
 	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
 		return;
 
 	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
-	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
+	drm_WARN_ON(&i915->drm,
+		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
 
 	*size = stolen_top - *base;
 }
@@ -200,7 +207,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
 {
 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 
-	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
@@ -234,7 +241,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 	resource_size_t stolen_top = i915->dsm.end + 1;
 
-	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
@@ -262,7 +269,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
 {
 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 
-	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
@@ -289,7 +296,7 @@ static void chv_get_stolen_reserved(struct drm_i915_private *i915,
 {
 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 
-	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
@@ -323,7 +330,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
 	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
 	resource_size_t stolen_top = i915->dsm.end + 1;
 
-	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
 
 	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
 		return;
@@ -342,7 +349,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
 {
 	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
 
-	DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
 
 	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
 
@@ -453,8 +460,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
 	 * it likely means we failed to read the registers correctly.
 	 */
 	if (!reserved_base) {
-		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
-			  &reserved_base, &reserved_size);
+		drm_err(&i915->drm,
+			"inconsistent reservation %pa + %pa; ignoring\n",
+			&reserved_base, &reserved_size);
 		reserved_base = stolen_top;
 		reserved_size = 0;
 	}
@@ -463,8 +471,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
 		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
 
 	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
-		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
-			  &i915->dsm_reserved, &i915->dsm);
+		drm_err(&i915->drm,
+			"Stolen reserved area %pR outside stolen memory %pR\n",
+			&i915->dsm_reserved, &i915->dsm);
 		return 0;
 	}
 
@@ -472,9 +481,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
 	 * memory, so just consider the start. */
 	reserved_total = stolen_top - reserved_base;
 
-	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
-			 (u64)resource_size(&i915->dsm) >> 10,
-			 ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
+	drm_dbg(&i915->drm,
+		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
+		(u64)resource_size(&i915->dsm) >> 10,
+		((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
 
 	i915->stolen_usable_size =
 		resource_size(&i915->dsm) - reserved_total;
@@ -677,26 +687,24 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 					       resource_size_t stolen_offset,
-					       resource_size_t gtt_offset,
 					       resource_size_t size)
 {
 	struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
-	struct i915_ggtt *ggtt = &i915->ggtt;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *stolen;
-	struct i915_vma *vma;
 	int ret;
 
 	if (!drm_mm_initialized(&i915->mm.stolen))
 		return ERR_PTR(-ENODEV);
 
-	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
-			 &stolen_offset, &gtt_offset, &size);
+	drm_dbg(&i915->drm,
+		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
+		&stolen_offset, &size);
 
 	/* KISS and expect everything to be page-aligned */
-	if (WARN_ON(size == 0) ||
-	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
-	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
+	if (GEM_WARN_ON(size == 0) ||
+	    GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
 		return ERR_PTR(-EINVAL);
 
 	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -709,68 +717,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
 	mutex_unlock(&i915->mm.stolen_lock);
 	if (ret) {
-		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
-		kfree(stolen);
-		return ERR_PTR(ret);
+		obj = ERR_PTR(ret);
+		goto err_free;
 	}
 
 	obj = __i915_gem_object_create_stolen(mem, stolen);
-	if (IS_ERR(obj)) {
-		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
-		i915_gem_stolen_remove_node(i915, stolen);
-		kfree(stolen);
-		return obj;
-	}
+	if (IS_ERR(obj))
+		goto err_stolen;
 
-	/* Some objects just need physical mem from stolen space */
-	if (gtt_offset == I915_GTT_OFFSET_NONE)
-		return obj;
-
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		goto err;
-
-	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		goto err_pages;
-	}
-
-	/* To simplify the initialisation sequence between KMS and GTT,
-	 * we allow construction of the stolen object prior to
-	 * setting up the GTT space. The actual reservation will occur
-	 * later.
-	 */
-	mutex_lock(&ggtt->vm.mutex);
-	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
-				   size, gtt_offset, obj->cache_level,
-				   0);
-	if (ret) {
-		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
-		mutex_unlock(&ggtt->vm.mutex);
-		goto err_pages;
-	}
-
-	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
-	GEM_BUG_ON(vma->pages);
-	vma->pages = obj->mm.pages;
-	atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
-
-	set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
-	__i915_vma_set_map_and_fenceable(vma);
-
-	list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
-	mutex_unlock(&ggtt->vm.mutex);
-
-	GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
-	atomic_inc(&obj->bind_count);
-
+	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
 	return obj;
 
-err_pages:
-	i915_gem_object_unpin_pages(obj);
-err:
-	i915_gem_object_put(obj);
-	return ERR_PTR(ret);
+err_stolen:
+	i915_gem_stolen_remove_node(i915, stolen);
+err_free:
+	kfree(stolen);
+	return obj;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index c104062..e15c0ad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -28,7 +28,6 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
 					       resource_size_t stolen_offset,
-					       resource_size_t gtt_offset,
 					       resource_size_t size);
 
 #endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a..37f77ae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -6,7 +6,6 @@
 
 #include <linux/string.h>
 #include <linux/bitops.h>
-#include <drm/i915_drm.h>
 
 #include "i915_drv.h"
 #include "i915_gem.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 580319b..7ffd7af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -10,8 +10,6 @@
 #include <linux/swap.h>
 #include <linux/sched/mm.h>
 
-#include <drm/i915_drm.h>
-
 #include "i915_drv.h"
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
@@ -704,7 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
 		 I915_GEM_OBJECT_IS_SHRINKABLE |
-		 I915_GEM_OBJECT_NO_GGTT |
+		 I915_GEM_OBJECT_NO_MMAP |
 		 I915_GEM_OBJECT_ASYNC_CANCEL,
 	.get_pages = i915_gem_userptr_get_pages,
 	.put_pages = i915_gem_userptr_put_pages,
@@ -770,6 +768,23 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 			    I915_USERPTR_UNSYNCHRONIZED))
 		return -EINVAL;
 
+	/*
+	 * XXX: There is a prevalence of the assumption that we fit the
+	 * object's page count inside a 32bit _signed_ variable. Let's document
+	 * this and catch if we ever need to fix it. In the meantime, if you do
+	 * spot such a local variable, please consider fixing!
+	 *
+	 * Aside from our own locals (for which we have no excuse!):
+	 * - sg_table embeds unsigned int for num_pages
+	 * - get_user_pages*() mixed ints with longs
+	 */
+
+	if (args->user_size >> PAGE_SHIFT > INT_MAX)
+		return -E2BIG;
+
+	if (overflows_type(args->user_size, obj->base.size))
+		return -E2BIG;
+
 	if (!args->user_size)
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 9311250..2d0fd50 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1208,107 +1208,6 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	return err;
 }
 
-static int igt_ppgtt_exhaust_huge(void *arg)
-{
-	struct i915_gem_context *ctx = arg;
-	struct drm_i915_private *i915 = ctx->i915;
-	unsigned long supported = INTEL_INFO(i915)->page_sizes;
-	static unsigned int pages[ARRAY_SIZE(page_sizes)];
-	struct drm_i915_gem_object *obj;
-	unsigned int size_mask;
-	unsigned int page_mask;
-	int n, i;
-	int err = -ENODEV;
-
-	if (supported == I915_GTT_PAGE_SIZE_4K)
-		return 0;
-
-	/*
-	 * Sanity check creating objects with a varying mix of page sizes --
-	 * ensuring that our writes lands in the right place.
-	 */
-
-	n = 0;
-	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
-		pages[n++] = BIT(i);
-
-	for (size_mask = 2; size_mask < BIT(n); size_mask++) {
-		unsigned int size = 0;
-
-		for (i = 0; i < n; i++) {
-			if (size_mask & BIT(i))
-				size |= pages[i];
-		}
-
-		/*
-		 * For our page mask we want to enumerate all the page-size
-		 * combinations which will fit into our chosen object size.
-		 */
-		for (page_mask = 2; page_mask <= size_mask; page_mask++) {
-			unsigned int page_sizes = 0;
-
-			for (i = 0; i < n; i++) {
-				if (page_mask & BIT(i))
-					page_sizes |= pages[i];
-			}
-
-			/*
-			 * Ensure that we can actually fill the given object
-			 * with our chosen page mask.
-			 */
-			if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
-				continue;
-
-			obj = huge_pages_object(i915, size, page_sizes);
-			if (IS_ERR(obj)) {
-				err = PTR_ERR(obj);
-				goto out_device;
-			}
-
-			err = i915_gem_object_pin_pages(obj);
-			if (err) {
-				i915_gem_object_put(obj);
-
-				if (err == -ENOMEM) {
-					pr_info("unable to get pages, size=%u, pages=%u\n",
-						size, page_sizes);
-					err = 0;
-					break;
-				}
-
-				pr_err("pin_pages failed, size=%u, pages=%u\n",
-				       size_mask, page_mask);
-
-				goto out_device;
-			}
-
-			/* Force the page-size for the gtt insertion */
-			obj->mm.page_sizes.sg = page_sizes;
-
-			err = igt_write_huge(ctx, obj);
-			if (err) {
-				pr_err("exhaust write-huge failed with size=%u\n",
-				       size);
-				goto out_unpin;
-			}
-
-			i915_gem_object_unpin_pages(obj);
-			__i915_gem_object_put_pages(obj);
-			i915_gem_object_put(obj);
-		}
-	}
-
-	goto out_device;
-
-out_unpin:
-	i915_gem_object_unpin_pages(obj);
-	i915_gem_object_put(obj);
-out_device:
-	mkwrite_device_info(i915)->page_sizes = supported;
-
-	return err;
-}
-
 typedef struct drm_i915_gem_object *
 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
 
@@ -1900,7 +1799,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_shrink_thp),
 		SUBTEST(igt_ppgtt_pin_update),
 		SUBTEST(igt_tmpfs_fallback),
-		SUBTEST(igt_ppgtt_exhaust_huge),
 		SUBTEST(igt_ppgtt_smoke_huge),
 		SUBTEST(igt_ppgtt_sanity_check),
 	};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7fc4686..54b86cf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 	return 0;
 
 skip_request:
-	i915_request_skip(rq, err);
+	i915_request_set_error_once(rq, err);
 err_request:
 	i915_request_add(rq);
 err_batch:
@@ -1465,9 +1465,12 @@ static int igt_ctx_readonly(void *arg)
 
 static int check_scratch(struct i915_address_space *vm, u64 offset)
 {
-	struct drm_mm_node *node =
-		__drm_mm_interval_first(&vm->mm,
-					offset, offset + sizeof(u32) - 1);
+	struct drm_mm_node *node;
+
+	mutex_lock(&vm->mutex);
+	node = __drm_mm_interval_first(&vm->mm,
+				       offset, offset + sizeof(u32) - 1);
+	mutex_unlock(&vm->mutex);
 	if (!node || node->start > offset)
 		return 0;
 
@@ -1492,6 +1495,10 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 
 	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
 
+	err = check_scratch(ctx_vm(ctx), offset);
+	if (err)
+		return err;
+
 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
@@ -1528,10 +1535,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 	if (err)
 		goto out_vm;
 
-	err = check_scratch(vm, offset);
-	if (err)
-		goto err_unpin;
-
 	rq = igt_request_alloc(ctx, engine);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
@@ -1556,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 
 	goto out_vm;
 skip_request:
-	i915_request_skip(rq, err);
+	i915_request_set_error_once(rq, err);
 err_request:
 	i915_request_add(rq);
 err_unpin:
@@ -1575,72 +1578,103 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 	struct drm_i915_private *i915 = ctx->i915;
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm;
-	const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
 	const u32 result = 0x100;
 	struct i915_request *rq;
 	struct i915_vma *vma;
+	unsigned int flags;
 	u32 *cmd;
 	int err;
 
 	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
 
+	err = check_scratch(ctx_vm(ctx), offset);
+	if (err)
+		return err;
+
 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
-	if (IS_ERR(cmd)) {
-		err = PTR_ERR(cmd);
-		goto out;
-	}
-
-	memset(cmd, POISON_INUSE, PAGE_SIZE);
 	if (INTEL_GEN(i915) >= 8) {
+		const u32 GPR0 = engine->mmio_base + 0x600;
+
+		vm = i915_gem_context_get_vm_rcu(ctx);
+		vma = i915_vma_instance(obj, vm, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto out_vm;
+		}
+
+		err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+		if (err)
+			goto out_vm;
+
+		cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(cmd)) {
+			err = PTR_ERR(cmd);
+			goto out;
+		}
+
+		memset(cmd, POISON_INUSE, PAGE_SIZE);
 		*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
-		*cmd++ = RCS_GPR0;
+		*cmd++ = GPR0;
 		*cmd++ = lower_32_bits(offset);
 		*cmd++ = upper_32_bits(offset);
 		*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
-		*cmd++ = RCS_GPR0;
+		*cmd++ = GPR0;
 		*cmd++ = result;
 		*cmd++ = 0;
-	} else {
-		*cmd++ = MI_LOAD_REGISTER_MEM;
-		*cmd++ = RCS_GPR0;
-		*cmd++ = offset;
-		*cmd++ = MI_STORE_REGISTER_MEM;
-		*cmd++ = RCS_GPR0;
-		*cmd++ = result;
-	}
-	*cmd = MI_BATCH_BUFFER_END;
+		*cmd = MI_BATCH_BUFFER_END;
 
-	i915_gem_object_flush_map(obj);
-	i915_gem_object_unpin_map(obj);
+		i915_gem_object_flush_map(obj);
+		i915_gem_object_unpin_map(obj);
+
+		flags = 0;
+	} else {
+		const u32 reg = engine->mmio_base + 0x420;
+
+		/* hsw: register access even to 3DPRIM! is protected */
+		vm = i915_vm_get(&engine->gt->ggtt->vm);
+		vma = i915_vma_instance(obj, vm, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto out_vm;
+		}
+
+		err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+		if (err)
+			goto out_vm;
+
+		cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+		if (IS_ERR(cmd)) {
+			err = PTR_ERR(cmd);
+			goto out;
+		}
+
+		memset(cmd, POISON_INUSE, PAGE_SIZE);
+		*cmd++ = MI_LOAD_REGISTER_MEM;
+		*cmd++ = reg;
+		*cmd++ = offset;
+		*cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+		*cmd++ = reg;
+		*cmd++ = vma->node.start + result;
+		*cmd = MI_BATCH_BUFFER_END;
+
+		i915_gem_object_flush_map(obj);
+		i915_gem_object_unpin_map(obj);
+
+		flags = I915_DISPATCH_SECURE;
+	}
 
 	intel_gt_chipset_flush(engine->gt);
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	vma = i915_vma_instance(obj, vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto out_vm;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
-	if (err)
-		goto out_vm;
-
-	err = check_scratch(vm, offset);
-	if (err)
-		goto err_unpin;
-
 	rq = igt_request_alloc(ctx, engine);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_unpin;
 	}
 
-	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
 	if (err)
 		goto err_request;
 
@@ -1674,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 
 	goto out_vm;
 skip_request:
-	i915_request_skip(rq, err);
+	i915_request_set_error_once(rq, err);
 err_request:
 	i915_request_add(rq);
 err_unpin:
@@ -1686,6 +1720,39 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 	return err;
 }
 
+static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
+{
+	struct i915_address_space *vm;
+	struct page *page;
+	u32 *vaddr;
+	int err = 0;
+
+	vm = ctx_vm(ctx);
+	if (!vm)
+		return -ENODEV;
+
+	page = vm->scratch[0].base.page;
+	if (!page) {
+		pr_err("No scratch page!\n");
+		return -EINVAL;
+	}
+
+	vaddr = kmap(page);
+	if (!vaddr) {
+		pr_err("No (mappable) scratch page!\n");
+		return -EINVAL;
+	}
+
+	memcpy(out, vaddr, sizeof(*out));
+	if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
+		pr_err("Inconsistent initial state of scratch page!\n");
+		err = -EINVAL;
+	}
+	kunmap(page);
+
+	return err;
+}
+
 static int igt_vm_isolation(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -1696,6 +1763,7 @@ static int igt_vm_isolation(void *arg)
 	I915_RND_STATE(prng);
 	struct file *file;
 	u64 vm_total;
+	u32 expected;
 	int err;
 
 	if (INTEL_GEN(i915) < 7)
@@ -1730,9 +1798,17 @@ static int igt_vm_isolation(void *arg)
 	if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
 		goto out_file;
 
+	/* Read the initial state of the scratch page */
+	err = check_scratch_page(ctx_a, &expected);
+	if (err)
+		goto out_file;
+
+	err = check_scratch_page(ctx_b, &expected);
+	if (err)
+		goto out_file;
+
 	vm_total = ctx_vm(ctx_a)->total;
 	GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
-	vm_total -= I915_GTT_PAGE_SIZE;
 
 	count = 0;
 	num_engines = 0;
@@ -1743,14 +1819,18 @@ static int igt_vm_isolation(void *arg)
 		if (!intel_engine_can_store_dword(engine))
 			continue;
 
+		/* Not all engines have their own GPR! */
+		if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
+			continue;
+
 		while (!__igt_timeout(end_time, NULL)) {
 			u32 value = 0xc5c5c5c5;
 			u64 offset;
 
-			div64_u64_rem(i915_prandom_u64_state(&prng),
-				      vm_total, &offset);
-			offset = round_down(offset, alignof_dword);
-			offset += I915_GTT_PAGE_SIZE;
+			/* Leave enough space at offset 0 for the batch */
+			offset = igt_random_offset(&prng,
+						   I915_GTT_PAGE_SIZE, vm_total,
+						   sizeof(u32), alignof_dword);
 
 			err = write_to_scratch(ctx_a, engine,
 					       offset, 0xdeadbeef);
@@ -1760,7 +1840,7 @@ static int igt_vm_isolation(void *arg)
 			if (err)
 				goto out_file;
 
-			if (value) {
+			if (value != expected) {
 				pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
 				       engine->name, value,
 				       upper_32_bits(offset),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe..31549ad 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
 	struct intel_context *ce;
 	unsigned int prio;
 	IGT_TIMEOUT(end);
+	u64 total, max;
 	int err;
 
 	ctx = thread->ctx;
@@ -225,27 +226,32 @@ static int igt_fill_blt_thread(void *arg)
 	ce = i915_gem_context_get_engine(ctx, BCS0);
 	GEM_BUG_ON(IS_ERR(ce));
 
+	/*
+	 * If we have a tiny shared address space, like for the GGTT
+	 * then we can't be too greedy.
+	 */
+	max = ce->vm->total;
+	if (i915_is_ggtt(ce->vm) || thread->ctx)
+		max = div_u64(max, thread->n_cpus);
+	max >>= 4;
+
+	total = PAGE_SIZE;
 	do {
-		const u32 max_block_size = S16_MAX * PAGE_SIZE;
+		/* Aim to keep the runtime under reasonable bounds! */
+		const u32 max_phys_size = SZ_64K;
 		u32 val = prandom_u32_state(prng);
-		u64 total = ce->vm->total;
 		u32 phys_sz;
 		u32 sz;
 		u32 *vaddr;
 		u32 i;
 
-		/*
-		 * If we have a tiny shared address space, like for the GGTT
-		 * then we can't be too greedy.
-		 */
-		if (i915_is_ggtt(ce->vm))
-			total = div64_u64(total, thread->n_cpus);
-
-		sz = min_t(u64, total >> 4, prandom_u32_state(prng));
-		phys_sz = sz % (max_block_size + 1);
+		total = min(total, max);
+		sz = i915_prandom_u32_max_state(total, prng) + 1;
+		phys_sz = sz % max_phys_size + 1;
 
 		sz = round_up(sz, PAGE_SIZE);
 		phys_sz = round_up(phys_sz, PAGE_SIZE);
+		phys_sz = min(phys_sz, sz);
 
 		pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
 			 phys_sz, sz, val);
@@ -276,13 +282,14 @@ static int igt_fill_blt_thread(void *arg)
 		if (err)
 			goto err_unpin;
 
-		i915_gem_object_lock(obj);
-		err = i915_gem_object_set_to_cpu_domain(obj, false);
-		i915_gem_object_unlock(obj);
+		err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
 		if (err)
 			goto err_unpin;
 
-		for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+		for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+			if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+				drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
 			if (vaddr[i] != val) {
 				pr_err("vaddr[%u]=%x, expected=%x\n", i,
 				       vaddr[i], val);
@@ -293,6 +300,8 @@ static int igt_fill_blt_thread(void *arg)
 
 		i915_gem_object_unpin_map(obj);
 		i915_gem_object_put(obj);
+
+		total <<= 1;
 	} while (!time_after(jiffies, end));
 
 	goto err_flush;
@@ -319,6 +328,7 @@ static int igt_copy_blt_thread(void *arg)
 	struct intel_context *ce;
 	unsigned int prio;
 	IGT_TIMEOUT(end);
+	u64 total, max;
 	int err;
 
 	ctx = thread->ctx;
@@ -334,23 +344,32 @@ static int igt_copy_blt_thread(void *arg)
 	ce = i915_gem_context_get_engine(ctx, BCS0);
 	GEM_BUG_ON(IS_ERR(ce));
 
+	/*
+	 * If we have a tiny shared address space, like for the GGTT
+	 * then we can't be too greedy.
+	 */
+	max = ce->vm->total;
+	if (i915_is_ggtt(ce->vm) || thread->ctx)
+		max = div_u64(max, thread->n_cpus);
+	max >>= 4;
+
+	total = PAGE_SIZE;
 	do {
-		const u32 max_block_size = S16_MAX * PAGE_SIZE;
+		/* Aim to keep the runtime under reasonable bounds! */
+		const u32 max_phys_size = SZ_64K;
 		u32 val = prandom_u32_state(prng);
-		u64 total = ce->vm->total;
 		u32 phys_sz;
 		u32 sz;
 		u32 *vaddr;
 		u32 i;
 
-		if (i915_is_ggtt(ce->vm))
-			total = div64_u64(total, thread->n_cpus);
-
-		sz = min_t(u64, total >> 4, prandom_u32_state(prng));
-		phys_sz = sz % (max_block_size + 1);
+		total = min(total, max);
+		sz = i915_prandom_u32_max_state(total, prng) + 1;
+		phys_sz = sz % max_phys_size + 1;
 
 		sz = round_up(sz, PAGE_SIZE);
 		phys_sz = round_up(phys_sz, PAGE_SIZE);
+		phys_sz = min(phys_sz, sz);
 
 		pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
 			 phys_sz, sz, val);
@@ -397,13 +416,14 @@ static int igt_copy_blt_thread(void *arg)
 		if (err)
 			goto err_unpin;
 
-		i915_gem_object_lock(dst);
-		err = i915_gem_object_set_to_cpu_domain(dst, false);
-		i915_gem_object_unlock(dst);
+		err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
 		if (err)
 			goto err_unpin;
 
-		for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+		for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+			if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+				drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
 			if (vaddr[i] != val) {
 				pr_err("vaddr[%u]=%x, expected=%x\n", i,
 				       vaddr[i], val);
@@ -416,6 +436,8 @@ static int igt_copy_blt_thread(void *arg)
 
 		i915_gem_object_put(src);
 		i915_gem_object_put(dst);
+
+		total <<= 1;
 	} while (!time_after(jiffies, end));
 
 	goto err_flush;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 6718da2..772d8cb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
 	return 0;
 
 skip_request:
-	i915_request_skip(rq, err);
+	i915_request_set_error_once(rq, err);
 err_request:
 	i915_request_add(rq);
 err_batch:
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 384143a..e7e3c62 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915,
 	INIT_LIST_HEAD(&ctx->link);
 	ctx->i915 = i915;
 
+	spin_lock_init(&ctx->stale.lock);
+	INIT_LIST_HEAD(&ctx->stale.engines);
+
 	i915_gem_context_set_persistence(ctx);
 
 	mutex_init(&ctx->engines_mutex);
@@ -37,7 +40,7 @@ mock_context(struct drm_i915_private *i915,
 	if (name) {
 		struct i915_ppgtt *ppgtt;
 
-		strncpy(ctx->name, name, sizeof(ctx->name));
+		strncpy(ctx->name, name, sizeof(ctx->name) - 1);
 
 		ppgtt = mock_ppgtt(i915, name);
 		if (!ppgtt)
@@ -83,6 +86,8 @@ live_context(struct drm_i915_private *i915, struct file *file)
 	if (IS_ERR(ctx))
 		return ctx;
 
+	i915_gem_context_set_no_error_capture(ctx);
+
 	err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
 	if (err < 0)
 		goto err_ctx;
@@ -105,6 +110,7 @@ kernel_context(struct drm_i915_private *i915)
 
 	i915_gem_context_clear_bannable(ctx);
 	i915_gem_context_set_persistence(ctx);
+	i915_gem_context_set_no_error_capture(ctx);
 
 	return ctx;
 }
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 0000000..de595b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+
+#define MAX_URB_ENTRIES 64
+#define STATE_SIZE (4 * 1024)
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+	const void *data;
+	u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+	struct i915_vma *vma;
+	u32 offset;
+	u32 *start;
+	u32 *end;
+	u32 max_items;
+};
+
+struct batch_vals {
+	u32 max_primitives;
+	u32 max_urb_entries;
+	u32 cmd_size;
+	u32 state_size;
+	u32 state_start;
+	u32 batch_size;
+	u32 surface_height;
+	u32 surface_width;
+	u32 scratch_size;
+	u32 max_size;
+};
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+	if (IS_HASWELL(i915)) {
+		bv->max_primitives = 280;
+		bv->max_urb_entries = MAX_URB_ENTRIES;
+		bv->surface_height = 16 * 16;
+		bv->surface_width = 32 * 2 * 16;
+	} else {
+		bv->max_primitives = 128;
+		bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+		bv->surface_height = 16 * 8;
+		bv->surface_width = 32 * 16;
+	}
+	bv->cmd_size = bv->max_primitives * 4096;
+	bv->state_size = STATE_SIZE;
+	bv->state_start = bv->cmd_size;
+	bv->batch_size = bv->cmd_size + bv->state_size;
+	bv->scratch_size = bv->surface_height * bv->surface_width;
+	bv->max_size = bv->batch_size + bv->scratch_size;
+}
+
+static void batch_init(struct batch_chunk *bc,
+		       struct i915_vma *vma,
+		       u32 *start, u32 offset, u32 max_bytes)
+{
+	bc->vma = vma;
+	bc->offset = offset;
+	bc->start = start + bc->offset / sizeof(*bc->start);
+	bc->end = bc->start;
+	bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+	return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+	return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+	GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+	*bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+	u32 *map;
+
+	if (align) {
+		u32 *end = PTR_ALIGN(bc->end, align);
+
+		memset32(bc->end, 0, end - bc->end);
+		bc->end = end;
+	}
+
+	map = bc->end;
+	bc->end += items;
+
+	return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+	GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+	return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+			const u32 dst_offset,
+			const struct batch_vals *bv)
+{
+	u32 surface_h = bv->surface_height;
+	u32 surface_w = bv->surface_width;
+	u32 *cs = batch_alloc_items(state, 32, 8);
+	u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+	*cs++ = SURFACE_2D << 29 |
+		(SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+		(RENDER_CACHE_READ_WRITE << 8);
+
+	*cs++ = batch_addr(state) + dst_offset;
+
+	*cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+	*cs++ = surface_w;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+	(((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+	*cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+	batch_advance(state, cs);
+
+	return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+			const struct batch_vals *bv)
+{
+	u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+	u32 *cs = batch_alloc_items(state, 32, 8);
+	u32 offset = batch_offset(state, cs);
+
+	*cs++ = surface_start - state->offset;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	batch_advance(state, cs);
+
+	return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+		      const u32 *data,
+		      const u32 size)
+{
+	return batch_offset(state,
+			    memcpy(batch_alloc_bytes(state, 64, size),
+				   data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+			       const struct batch_vals *bv,
+			       const struct cb_kernel *kernel,
+			       unsigned int count)
+{
+	u32 kernel_offset =
+		gen7_fill_kernel_data(state, kernel->data, kernel->size);
+	u32 binding_table = gen7_fill_binding_table(state, bv);
+	u32 *cs = batch_alloc_items(state, 32, 8 * count);
+	u32 offset = batch_offset(state, cs);
+
+	*cs++ = kernel_offset;
+	*cs++ = (1 << 7) | (1 << 13);
+	*cs++ = 0;
+	*cs++ = (binding_table - state->offset) | 1;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+
+	/* 1 - 63dummy idds */
+	memset32(cs, 0x00, (count - 1) * 8);
+	batch_advance(state, cs + (count - 1) * 8);
+
+	return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+			     u32 surface_state_base)
+{
+	u32 *cs = batch_alloc_items(batch, 0, 12);
+
+	*cs++ = STATE_BASE_ADDRESS | (12 - 2);
+	/* general */
+	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+	/* surface */
+	*cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+	/* dynamic */
+	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+	/* indirect */
+	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+	/* instruction */
+	*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+	/* general/dynamic/indirect/instruction access Bound */
+	*cs++ = 0;
+	*cs++ = BASE_ADDRESS_MODIFY;
+	*cs++ = 0;
+	*cs++ = BASE_ADDRESS_MODIFY;
+	*cs++ = 0;
+	*cs++ = 0;
+	batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+		    const struct batch_vals *bv,
+		    u32 urb_size, u32 curbe_size,
+		    u32 mode)
+{
+	u32 urb_entries = bv->max_urb_entries;
+	u32 threads = bv->max_primitives - 1;
+	u32 *cs = batch_alloc_items(batch, 32, 8);
+
+	*cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+	/* scratch buffer */
+	*cs++ = 0;
+
+	/* number of threads & urb entries for GPGPU vs Media Mode */
+	*cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+
+	*cs++ = 0;
+
+	/* urb entry size & curbe size in 256 bits unit */
+	*cs++ = urb_size << 16 | curbe_size;
+
+	/* scoreboard */
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+				    const u32 interface_descriptor,
+				    unsigned int count)
+{
+	u32 *cs = batch_alloc_items(batch, 8, 4);
+
+	*cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+	*cs++ = 0;
+	*cs++ = count * 8 * sizeof(*cs);
+
+	/*
+	 * interface descriptor address - it is relative to the dynamics base
+	 * address
+	 */
+	*cs++ = interface_descriptor;
+	batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+		       unsigned int media_object_index)
+{
+	unsigned int x_offset = (media_object_index % 16) * 64;
+	unsigned int y_offset = (media_object_index / 16) * 16;
+	unsigned int inline_data_size;
+	unsigned int media_batch_size;
+	unsigned int i;
+	u32 *cs;
+
+	inline_data_size = 112 * 8;
+	media_batch_size = inline_data_size + 6;
+
+	cs = batch_alloc_items(batch, 8, media_batch_size);
+
+	*cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+
+	/* interface descriptor offset */
+	*cs++ = 0;
+
+	/* without indirect data */
+	*cs++ = 0;
+	*cs++ = 0;
+
+	/* scoreboard */
+	*cs++ = 0;
+	*cs++ = 0;
+
+	/* inline */
+	*cs++ = (y_offset << 16) | (x_offset);
+	*cs++ = 0;
+	*cs++ = GT3_INLINE_DATA_DELAYS;
+	for (i = 3; i < inline_data_size; i++)
+		*cs++ = 0;
+
+	batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+	u32 *cs = batch_alloc_items(batch, 0, 5);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+		PIPE_CONTROL_GLOBAL_GTT_IVB;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+		       u32 *start,
+		       const struct batch_vals *bv)
+{
+	struct drm_i915_private *i915 = vma->vm->i915;
+	unsigned int desc_count = 64;
+	const u32 urb_size = 112;
+	struct batch_chunk cmds, state;
+	u32 interface_descriptor;
+	unsigned int i;
+
+	batch_init(&cmds, vma, start, 0, bv->cmd_size);
+	batch_init(&state, vma, start, bv->state_start, bv->state_size);
+
+	interface_descriptor =
+		gen7_fill_interface_descriptor(&state, bv,
+					       IS_HASWELL(i915) ?
+					       &cb_kernel_hsw :
+					       &cb_kernel_ivb,
+					       desc_count);
+	gen7_emit_pipeline_flush(&cmds);
+	batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+	batch_add(&cmds, MI_NOOP);
+	gen7_emit_state_base_address(&cmds, interface_descriptor);
+	gen7_emit_pipeline_flush(&cmds);
+
+	gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+
+	gen7_emit_interface_descriptor_load(&cmds,
+					    interface_descriptor,
+					    desc_count);
+
+	for (i = 0; i < bv->max_primitives; i++)
+		gen7_emit_media_object(&cmds, i);
+
+	batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+			    struct i915_vma * const vma)
+{
+	struct batch_vals bv;
+	u32 *batch;
+
+	batch_get_defaults(engine->i915, &bv);
+	if (!vma)
+		return bv.max_size;
+
+	GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+
+	batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+	if (IS_ERR(batch))
+		return PTR_ERR(batch);
+
+	emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+
+	i915_gem_object_flush_map(vma->obj);
+	i915_gem_object_unpin_map(vma->obj);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 0000000..bb10074
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+			    struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4d1de2d..94e746a 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -8,6 +8,7 @@
 #include "gen8_ppgtt.h"
 #include "i915_scatterlist.h"
 #include "i915_trace.h"
+#include "i915_pvinfo.h"
 #include "i915_vgpu.h"
 #include "intel_gt.h"
 #include "intel_gtt.h"
@@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
 	return pde;
 }
 
+static u64 gen8_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
+{
+	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+	if (unlikely(flags & PTE_READ_ONLY))
+		pte &= ~_PAGE_RW;
+
+	switch (level) {
+	case I915_CACHE_NONE:
+		pte |= PPAT_UNCACHED;
+		break;
+	case I915_CACHE_WT:
+		pte |= PPAT_DISPLAY_ELLC;
+		break;
+	default:
+		pte |= PPAT_CACHED;
+		break;
+	}
+
+	return pte;
+}
+
 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 {
 	struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
 	ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
 	ppgtt->vm.clear_range = gen8_ppgtt_clear;
 
+	ppgtt->vm.pte_encode = gen8_pte_encode;
+
 	if (intel_vgpu_active(gt->i915))
 		gen8_ppgtt_notify_vgt(ppgtt, true);
 
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 0000000..b47f9d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+	0x00000001, 0x26020128, 0x00000024, 0x00000000,
+	0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+	0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+	0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+	0x00600001, 0x20600061, 0x00000000, 0x00000000,
+	0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+	0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+	0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+	0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+	0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+	0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+	0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+	0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+	0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+	0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+	0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+	0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+	0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+	0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+	0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+	0x00000001, 0x20840021, 0x00000068, 0x00000000,
+	0x00000001, 0x20880061, 0x00000000, 0x00000003,
+	0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+	0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+	0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+	0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+	0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+	0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+	0x00000001, 0x26020128, 0x00000024, 0x00000000,
+	0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+	0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+	0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+	0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+	0x00200001, 0x20400121, 0x00450020, 0x00000000,
+	0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+	0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+	0x00800001, 0x20600061, 0x00000000, 0x00000000,
+	0x00800001, 0x20800061, 0x00000000, 0x00000000,
+	0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+	0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+	0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+	0x00800001, 0x21000061, 0x00000000, 0x00000000,
+	0x00800001, 0x21200061, 0x00000000, 0x00000000,
+	0x00800001, 0x21400061, 0x00000000, 0x00000000,
+	0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+	0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+	0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+	0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+	0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+	0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+	0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+	0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 57e8a05..aea992e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce)
 		return -EINTR;
 
 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+		if (intel_context_is_banned(ce)) {
+			err = -EIO;
+			goto unlock;
+		}
+
 		err = ce->ops->alloc(ce);
 		if (unlikely(err))
 			goto unlock;
@@ -92,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce)
 {
 	int err;
 
+	GEM_BUG_ON(intel_context_is_closed(ce));
+
 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
 		err = intel_context_alloc_state(ce);
 		if (err)
@@ -116,7 +123,8 @@ int __intel_context_do_pin(struct intel_context *ce)
 		if (unlikely(err))
 			goto err_active;
 
-		CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+			 i915_ggtt_offset(ce->ring->vma),
 			 ce->ring->head, ce->ring->tail);
 
 		smp_mb__before_atomic(); /* flush pin before it is visible */
@@ -219,7 +227,9 @@ static void __intel_context_retire(struct i915_active *active)
 {
 	struct intel_context *ce = container_of(active, typeof(*ce), active);
 
-	CE_TRACE(ce, "retire\n");
+	CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
+		 intel_context_get_total_runtime_ns(ce),
+		 intel_context_get_avg_runtime_ns(ce));
 
 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
 	if (ce->state)
@@ -280,6 +290,8 @@ intel_context_init(struct intel_context *ce,
 	ce->sseu = engine->sseu;
 	ce->ring = __intel_context_ring_size(SZ_4K);
 
+	ewma_runtime_init(&ce->runtime.avg);
+
 	ce->vm = i915_vm_get(engine->gt->vm);
 
 	INIT_LIST_HEAD(&ce->signal_link);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 30bd248..07be021 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 
 #include "i915_active.h"
+#include "i915_drv.h"
 #include "intel_context_types.h"
 #include "intel_engine_types.h"
 #include "intel_ring_types.h"
@@ -35,6 +36,9 @@ int intel_context_alloc_state(struct intel_context *ce);
 
 void intel_context_free(struct intel_context *ce);
 
+int intel_context_reconfigure_sseu(struct intel_context *ce,
+				   const struct intel_sseu sseu);
+
 /**
  * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
  * @ce - the context
@@ -169,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce)
 	return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
 }
 
+static inline bool intel_context_is_closed(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+}
+
 static inline bool intel_context_use_semaphores(const struct intel_context *ce)
 {
 	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
@@ -224,4 +233,20 @@ intel_context_clear_nopreempt(struct intel_context *ce)
 	clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
 }
 
+static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
+{
+	const u32 period =
+		RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+	return READ_ONCE(ce->runtime.total) * period;
+}
+
+static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+	const u32 period =
+		RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+	return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
+}
+
 #endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
new file mode 100644
index 0000000..65dcd09
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active.h"
+#include "intel_context.h"
+#include "intel_context_param.h"
+#include "intel_ring.h"
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz)
+{
+	int err;
+
+	if (intel_context_lock_pinned(ce))
+		return -EINTR;
+
+	err = i915_active_wait(&ce->active);
+	if (err < 0)
+		goto unlock;
+
+	if (intel_context_is_pinned(ce)) {
+		err = -EBUSY; /* In active use, come back later! */
+		goto unlock;
+	}
+
+	if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+		struct intel_ring *ring;
+
+		/* Replace the existing ringbuffer */
+		ring = intel_engine_create_ring(ce->engine, sz);
+		if (IS_ERR(ring)) {
+			err = PTR_ERR(ring);
+			goto unlock;
+		}
+
+		intel_ring_put(ce->ring);
+		ce->ring = ring;
+
+		/* Context image will be updated on next pin */
+	} else {
+		ce->ring = __intel_context_ring_size(sz);
+	}
+
+unlock:
+	intel_context_unlock_pinned(ce);
+	return err;
+}
+
+long intel_context_get_ring_size(struct intel_context *ce)
+{
+	long sz = (unsigned long)READ_ONCE(ce->ring);
+
+	if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+		if (intel_context_lock_pinned(ce))
+			return -EINTR;
+
+		sz = ce->ring->size;
+		intel_context_unlock_pinned(ce);
+	}
+
+	return sz;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 0000000..f053d86
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+struct intel_context;
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz);
+long intel_context_get_ring_size(struct intel_context *ce);
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
new file mode 100644
index 0000000..57a3095
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_ring.h"
+#include "intel_sseu.h"
+
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+				 const struct intel_context *ce,
+				 const struct intel_sseu sseu)
+{
+	u64 offset;
+	u32 *cs;
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	offset = i915_ggtt_offset(ce->state) +
+		 LRC_STATE_PN * PAGE_SIZE +
+		 CTX_R_PWR_CLK_STATE * 4;
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static int
+gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
+{
+	struct i915_request *rq;
+	int ret;
+
+	lockdep_assert_held(&ce->pin_mutex);
+
+	/*
+	 * If the context is not idle, we have to submit an ordered request to
+	 * modify its context image via the kernel context (writing to our own
+	 * image, or into the registers directory, does not stick). Pristine
+	 * and idle contexts will be configured on pinning.
+	 */
+	if (!intel_context_pin_if_active(ce))
+		return 0;
+
+	rq = intel_engine_create_kernel_request(ce->engine);
+	if (IS_ERR(rq)) {
+		ret = PTR_ERR(rq);
+		goto out_unpin;
+	}
+
+	/* Serialise with the remote context */
+	ret = intel_context_prepare_remote_request(ce, rq);
+	if (ret == 0)
+		ret = gen8_emit_rpcs_config(rq, ce, sseu);
+
+	i915_request_add(rq);
+out_unpin:
+	intel_context_unpin(ce);
+	return ret;
+}
+
+int
+intel_context_reconfigure_sseu(struct intel_context *ce,
+			       const struct intel_sseu sseu)
+{
+	int ret;
+
+	GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+
+	ret = intel_context_lock_pinned(ce);
+	if (ret)
+		return ret;
+
+	/* Nothing to do if unmodified. */
+	if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+		goto unlock;
+
+	ret = gen8_modify_rpcs(ce, sseu);
+	if (!ret)
+		ce->sseu = sseu;
+
+unlock:
+	intel_context_unlock_pinned(ce);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ca1420f..07cb83a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -7,6 +7,7 @@
 #ifndef __INTEL_CONTEXT_TYPES__
 #define __INTEL_CONTEXT_TYPES__
 
+#include <linux/average.h>
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
@@ -19,6 +20,8 @@
 
 #define CONTEXT_REDZONE POISON_INUSE
 
+DECLARE_EWMA(runtime, 3, 8);
+
 struct i915_gem_context;
 struct i915_vma;
 struct intel_context;
@@ -42,8 +45,8 @@ struct intel_context {
 
 	struct intel_engine_cs *engine;
 	struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
 
 	struct i915_address_space *vm;
 	struct i915_gem_context __rcu *gem_context;
@@ -59,15 +62,25 @@ struct intel_context {
 #define CONTEXT_BARRIER_BIT		0
 #define CONTEXT_ALLOC_BIT		1
 #define CONTEXT_VALID_BIT		2
-#define CONTEXT_USE_SEMAPHORES		3
-#define CONTEXT_BANNED			4
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	5
-#define CONTEXT_NOPREEMPT		6
+#define CONTEXT_CLOSED_BIT		3
+#define CONTEXT_USE_SEMAPHORES		4
+#define CONTEXT_BANNED			5
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	6
+#define CONTEXT_NOPREEMPT		7
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
 	u32 tag; /* cookie passed to HW to track this context on submission */
 
+	/* Time on GPU as tracked by the hw. */
+	struct {
+		struct ewma_runtime avg;
+		u64 total;
+		u32 last;
+		I915_SELFTEST_DECLARE(u32 num_underflow);
+		I915_SELFTEST_DECLARE(u32 max_underflow);
+	} runtime;
+
 	unsigned int active_count; /* protected by timeline->mutex */
 
 	atomic_t pin_count;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 5df0030..b469de0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
 static inline struct i915_request *
 execlists_active(const struct intel_engine_execlists *execlists)
 {
-	return *READ_ONCE(execlists->active);
+	struct i915_request * const *cur, * const *old, *active;
+
+	cur = READ_ONCE(execlists->active);
+	smp_rmb(); /* pairs with overwrite protection in process_csb() */
+	do {
+		old = cur;
+
+		active = READ_ONCE(*cur);
+		cur = READ_ONCE(execlists->active);
+
+		smp_rmb(); /* and complete the seqlock retry */
+	} while (unlikely(cur != old));
+
+	return active;
 }
 
 static inline void
@@ -192,6 +205,8 @@ void intel_engines_free(struct intel_gt *gt);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
+int intel_engine_resume(struct intel_engine_cs *engine);
+
 int intel_ring_submission_setup(struct intel_engine_cs *engine);
 
 int intel_engine_stop_cs(struct intel_engine_cs *engine);
@@ -303,26 +318,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine);
 
 u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
 
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
-	if (!execlists->preempt_hang.inject_hang)
-		return false;
-
-	complete(&execlists->preempt_hang.completion);
-	return true;
-}
-
-#else
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
-	return false;
-}
-
-#endif
-
 void intel_engine_init_active(struct intel_engine_cs *engine,
 			      unsigned int subclass);
 #define ENGINE_PHYSICAL	0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 06ff769..3aa8a65 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -35,6 +35,7 @@
 #include "intel_engine_user.h"
 #include "intel_gt.h"
 #include "intel_gt_requests.h"
+#include "intel_gt_pm.h"
 #include "intel_lrc.h"
 #include "intel_reset.h"
 #include "intel_ring.h"
@@ -199,10 +200,10 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
 			 * out in the wash.
 			 */
 			cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
-			DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
-					 INTEL_GEN(gt->i915),
-					 cxt_size * 64,
-					 cxt_size - 1);
+			drm_dbg(&gt->i915->drm,
+				"gen%d CXT_SIZE = %d bytes [0x%08x]\n",
+				INTEL_GEN(gt->i915), cxt_size * 64,
+				cxt_size - 1);
 			return round_up(cxt_size * 64, PAGE_SIZE);
 		case 3:
 		case 2:
@@ -274,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 {
 	const struct engine_info *info = &intel_engines[id];
+	struct drm_i915_private *i915 = gt->i915;
 	struct intel_engine_cs *engine;
 
 	BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
@@ -300,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	engine->id = id;
 	engine->legacy_idx = INVALID_ENGINE;
 	engine->mask = BIT(id);
-	engine->i915 = gt->i915;
+	engine->i915 = i915;
 	engine->gt = gt;
 	engine->uncore = gt->uncore;
 	engine->hw_id = engine->guc_id = info->hw_id;
-	engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+	engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
 
 	engine->class = info->class;
 	engine->instance = info->instance;
@@ -312,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 
 	engine->props.heartbeat_interval_ms =
 		CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+	engine->props.max_busywait_duration_ns =
+		CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
 	engine->props.preempt_timeout_ms =
 		CONFIG_DRM_I915_PREEMPT_TIMEOUT;
 	engine->props.stop_timeout_ms =
@@ -319,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	engine->props.timeslice_duration_ms =
 		CONFIG_DRM_I915_TIMESLICE_DURATION;
 
+	/* Override to uninterruptible for OpenCL workloads. */
+	if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+		engine->props.preempt_timeout_ms = 0;
+
 	engine->context_size = intel_engine_context_size(gt, engine->class);
 	if (WARN_ON(engine->context_size > BIT(20)))
 		engine->context_size = 0;
 	if (engine->context_size)
-		DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
+		DRIVER_CAPS(i915)->has_logical_contexts = true;
 
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
@@ -339,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	gt->engine_class[info->class][info->instance] = engine;
 	gt->engine[id] = engine;
 
-	gt->i915->engine[id] = engine;
+	i915->engine[id] = engine;
 
 	return 0;
 }
@@ -392,8 +400,24 @@ void intel_engines_release(struct intel_gt *gt)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
+	/*
+	 * Before we release the resources held by engine, we must be certain
+	 * that the HW is no longer accessing them -- having the GPU scribble
+	 * to or read from a page being used for something else causes no end
+	 * of fun.
+	 *
+	 * The GPU should be reset by this point, but assume the worst just
+	 * in case we aborted before completely initialising the engines.
+	 */
+	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+		__intel_gt_reset(gt, ALL_ENGINES);
+
 	/* Decouple the backend; but keep the layout for late GPU resets */
 	for_each_engine(engine, gt, id) {
+		intel_wakeref_wait_for_idle(&engine->wakeref);
+		GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
 		if (!engine->release)
 			continue;
 
@@ -432,9 +456,9 @@ int intel_engines_init_mmio(struct intel_gt *gt)
 	unsigned int i;
 	int err;
 
-	WARN_ON(engine_mask == 0);
-	WARN_ON(engine_mask &
-		GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+	drm_WARN_ON(&i915->drm, engine_mask == 0);
+	drm_WARN_ON(&i915->drm, engine_mask &
+		    GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
 
 	if (i915_inject_probe_failure(i915))
 		return -ENODEV;
@@ -455,7 +479,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
 	 * are added to the driver by a warning and disabling the forgotten
 	 * engines.
 	 */
-	if (WARN_ON(mask != engine_mask))
+	if (drm_WARN_ON(&i915->drm, mask != engine_mask))
 		device_info->engine_mask = mask;
 
 	RUNTIME_INFO(i915)->num_engines = hweight32(mask);
@@ -510,7 +534,6 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
 {
 	unsigned int flags;
 
-	flags = PIN_GLOBAL;
 	if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
 		/*
 		 * On g33, we cannot place HWS above 256MiB, so
@@ -523,11 +546,11 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
 		 * above the mappable region (even though we never
 		 * actually map it).
 		 */
-		flags |= PIN_MAPPABLE;
+		flags = PIN_MAPPABLE;
 	else
-		flags |= PIN_HIGH;
+		flags = PIN_HIGH;
 
-	return i915_vma_pin(vma, 0, 0, flags);
+	return i915_ggtt_pin(vma, 0, flags);
 }
 
 static int init_status_page(struct intel_engine_cs *engine)
@@ -546,7 +569,8 @@ static int init_status_page(struct intel_engine_cs *engine)
 	 */
 	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
 	if (IS_ERR(obj)) {
-		DRM_ERROR("Failed to allocate status page\n");
+		drm_err(&engine->i915->drm,
+			"Failed to allocate status page\n");
 		return PTR_ERR(obj);
 	}
 
@@ -614,15 +638,15 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 
 struct measure_breadcrumb {
 	struct i915_request rq;
-	struct intel_timeline timeline;
 	struct intel_ring ring;
 	u32 cs[1024];
 };
 
-static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
+static int measure_breadcrumb_dw(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	struct measure_breadcrumb *frame;
-	int dw = -ENOMEM;
+	int dw;
 
 	GEM_BUG_ON(!engine->gt->scratch);
 
@@ -630,39 +654,27 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 	if (!frame)
 		return -ENOMEM;
 
-	if (intel_timeline_init(&frame->timeline,
-				engine->gt,
-				engine->status_page.vma))
-		goto out_frame;
-
-	mutex_lock(&frame->timeline.mutex);
+	frame->rq.i915 = engine->i915;
+	frame->rq.engine = engine;
+	frame->rq.context = ce;
+	rcu_assign_pointer(frame->rq.timeline, ce->timeline);
 
 	frame->ring.vaddr = frame->cs;
 	frame->ring.size = sizeof(frame->cs);
 	frame->ring.effective_size = frame->ring.size;
 	intel_ring_update_space(&frame->ring);
-
-	frame->rq.i915 = engine->i915;
-	frame->rq.engine = engine;
 	frame->rq.ring = &frame->ring;
-	rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
 
-	dw = intel_timeline_pin(&frame->timeline);
-	if (dw < 0)
-		goto out_timeline;
-
+	mutex_lock(&ce->timeline->mutex);
 	spin_lock_irq(&engine->active.lock);
+
 	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
 	spin_unlock_irq(&engine->active.lock);
+	mutex_unlock(&ce->timeline->mutex);
 
 	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
 
-	intel_timeline_unpin(&frame->timeline);
-
-out_timeline:
-	mutex_unlock(&frame->timeline.mutex);
-	intel_timeline_fini(&frame->timeline);
-out_frame:
 	kfree(frame);
 	return dw;
 }
@@ -737,12 +749,6 @@ static int engine_init_common(struct intel_engine_cs *engine)
 
 	engine->set_default_submission(engine);
 
-	ret = measure_breadcrumb_dw(engine);
-	if (ret < 0)
-		return ret;
-
-	engine->emit_fini_breadcrumb_dw = ret;
-
 	/*
 	 * We may need to do things with the shrinker which
 	 * require us to immediately switch back to the default
@@ -755,9 +761,18 @@ static int engine_init_common(struct intel_engine_cs *engine)
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
+	ret = measure_breadcrumb_dw(ce);
+	if (ret < 0)
+		goto err_context;
+
+	engine->emit_fini_breadcrumb_dw = ret;
 	engine->kernel_context = ce;
 
 	return 0;
+
+err_context:
+	intel_context_put(ce);
+	return ret;
 }
 
 int intel_engines_init(struct intel_gt *gt)
@@ -824,6 +839,20 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	intel_wa_list_free(&engine->whitelist);
 }
 
+/**
+ * intel_engine_resume - re-initializes the HW state of the engine
+ * @engine: Engine to resume.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_resume(struct intel_engine_cs *engine)
+{
+	intel_engine_apply_workarounds(engine);
+	intel_engine_apply_whitelist(engine);
+
+	return engine->resume(engine);
+}
+
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *i915 = engine->i915;
@@ -982,6 +1011,12 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
 
 		instdone->slice_common =
 			intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+		if (INTEL_GEN(i915) >= 12) {
+			instdone->slice_common_extra[0] =
+				intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
+			instdone->slice_common_extra[1] =
+				intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
+		}
 		for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
 			instdone->sampler[slice][subslice] =
 				read_subslice_reg(engine, slice, subslice,
@@ -1276,8 +1311,14 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 	}
 
 	if (INTEL_GEN(dev_priv) >= 6) {
-		drm_printf(m, "\tRING_IMR: %08x\n",
+		drm_printf(m, "\tRING_IMR:   0x%08x\n",
 			   ENGINE_READ(engine, RING_IMR));
+		drm_printf(m, "\tRING_ESR:   0x%08x\n",
+			   ENGINE_READ(engine, RING_ESR));
+		drm_printf(m, "\tRING_EMR:   0x%08x\n",
+			   ENGINE_READ(engine, RING_EMR));
+		drm_printf(m, "\tRING_EIR:   0x%08x\n",
+			   ENGINE_READ(engine, RING_EIR));
 	}
 
 	addr = intel_engine_get_active_head(engine);
@@ -1342,25 +1383,27 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 		execlists_active_lock_bh(execlists);
 		rcu_read_lock();
 		for (port = execlists->active; (rq = *port); port++) {
-			char hdr[80];
+			char hdr[160];
 			int len;
 
-			len = snprintf(hdr, sizeof(hdr),
-				       "\t\tActive[%d]: ",
-				       (int)(port - execlists->active));
+			len = scnprintf(hdr, sizeof(hdr),
+					"\t\tActive[%d]: ",
+					(int)(port - execlists->active));
 			if (!i915_request_signaled(rq)) {
 				struct intel_timeline *tl = get_timeline(rq);
 
-				len += snprintf(hdr + len, sizeof(hdr) - len,
-						"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
-						i915_ggtt_offset(rq->ring->vma),
-						tl ? tl->hwsp_offset : 0,
-						hwsp_seqno(rq));
+				len += scnprintf(hdr + len, sizeof(hdr) - len,
+						 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+						 i915_ggtt_offset(rq->ring->vma),
+						 tl ? tl->hwsp_offset : 0,
+						 hwsp_seqno(rq),
+						 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+								       1000 * 1000));
 
 				if (tl)
 					intel_timeline_put(tl);
 			}
-			snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+			scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
 			print_request(m, rq, hdr);
 		}
 		for (port = execlists->pending; (rq = *port); port++) {
@@ -1657,6 +1700,23 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 	 * we only care about the snapshot of this moment.
 	 */
 	lockdep_assert_held(&engine->active.lock);
+
+	rcu_read_lock();
+	request = execlists_active(&engine->execlists);
+	if (request) {
+		struct intel_timeline *tl = request->context->timeline;
+
+		list_for_each_entry_from_reverse(request, &tl->requests, link) {
+			if (i915_request_completed(request))
+				break;
+
+			active = request;
+		}
+	}
+	rcu_read_unlock();
+	if (active)
+		return active;
+
 	list_for_each_entry(request, &engine->active.requests, sched.link) {
 		if (i915_request_completed(request))
 			continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 6c6fd18..dd82571 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -180,7 +180,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
 	struct intel_context *ce = engine->kernel_context;
 	struct i915_request *rq;
-	int err = 0;
+	int err;
 
 	if (!intel_engine_has_preemption(engine))
 		return -ENODEV;
@@ -188,8 +188,10 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 	if (!intel_engine_pm_get_if_awake(engine))
 		return 0;
 
-	if (mutex_lock_interruptible(&ce->timeline->mutex))
+	if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+		err = -EINTR;
 		goto out_rpm;
+	}
 
 	intel_context_enter(ce);
 	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
@@ -204,6 +206,8 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 
 	__i915_request_commit(rq);
 	__i915_request_queue(rq, &attr);
+	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+	err = 0;
 
 out_unlock:
 	mutex_unlock(&ce->timeline->mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ea90ab3..b6cf284 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -112,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq,
 {
 	struct intel_gt_timelines *timelines = &engine->gt->timelines;
 
-	ENGINE_TRACE(engine, "\n");
+	ENGINE_TRACE(engine, "parking\n");
 
 	/*
 	 * We have to serialise all potential retirement paths with our
@@ -249,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	if (!switch_to_kernel_context(engine))
 		return -EBUSY;
 
-	ENGINE_TRACE(engine, "\n");
+	ENGINE_TRACE(engine, "parked\n");
 
 	call_idle_barriers(engine); /* cleanup after wedging */
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 92be41a..80cdde7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -75,6 +75,7 @@ struct intel_instdone {
 	u32 instdone;
 	/* The following exist only in the RCS engine */
 	u32 slice_common;
+	u32 slice_common_extra[2];
 	u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 };
@@ -126,7 +127,6 @@ DECLARE_EWMA(_engine_latency, 6, 4)
 struct st_preempt_hang {
 	struct completion completion;
 	unsigned int count;
-	bool inject_hang;
 };
 
 /**
@@ -157,6 +157,16 @@ struct intel_engine_execlists {
 	struct i915_priolist default_priolist;
 
 	/**
+	 * @error_interrupt: CS Master EIR
+	 *
+	 * The CS generates an interrupt when it detects an error. We capture
+	 * the first error interrupt, record the EIR and schedule the tasklet.
+	 * In the tasklet, we process the pending CS events to ensure we have
+	 * the guilty request, and then reset the engine.
+	 */
+	u32 error_interrupt;
+
+	/**
 	 * @no_priolist: priority lists disabled
 	 */
 	bool no_priolist;
@@ -537,6 +547,7 @@ struct intel_engine_cs {
 
 	struct {
 		unsigned long heartbeat_interval_ms;
+		unsigned long max_busywait_duration_ns;
 		unsigned long preempt_timeout_ms;
 		unsigned long stop_timeout_ms;
 		unsigned long timeslice_duration_ms;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 9e7f12b..848dece 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -278,7 +278,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
 			}
 		}
 
-		if (WARN(errors, "Invalid UABI engine mapping found"))
+		if (drm_WARN(&i915->drm, errors,
+			     "Invalid UABI engine mapping found"))
 			i915->uabi_engines = RB_ROOT;
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 531d501..aed498a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,6 +8,8 @@
 #include <asm/set_memory.h>
 #include <asm/smp.h>
 
+#include <drm/i915_drm.h>
+
 #include "intel_gt.h"
 #include "i915_drv.h"
 #include "i915_scatterlist.h"
@@ -104,27 +106,17 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
 	return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
 }
 
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
 {
-	struct drm_i915_private *i915 = ggtt->vm.i915;
+	struct i915_vma *vma;
 
-	/*
-	 * Don't bother messing with faults pre GEN6 as we have little
-	 * documentation supporting that it's a good idea.
-	 */
-	if (INTEL_GEN(i915) < 6)
-		return;
-
-	intel_gt_check_and_clear_faults(ggtt->vm.gt);
+	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+		i915_vma_wait_for_bind(vma);
 
 	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
 	ggtt->invalidate(ggtt);
-}
 
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
-	ggtt_suspend_mappings(&i915->ggtt);
+	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 }
 
 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -167,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
 	intel_gtt_chipset_flush();
 }
 
+static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+				enum i915_cache_level level,
+				u32 flags)
+{
+	return addr | _PAGE_PRESENT;
+}
+
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 {
 	writeq(pte, addr);
@@ -182,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 	gen8_pte_t __iomem *pte =
 		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
 
-	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+	gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
 
 	ggtt->invalidate(ggtt);
 }
@@ -195,7 +194,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	struct sgt_iter sgt_iter;
 	gen8_pte_t __iomem *gtt_entries;
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+	const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
 	dma_addr_t addr;
 
 	/*
@@ -350,31 +349,6 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
 	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
 }
 
-struct clear_range {
-	struct i915_address_space *vm;
-	u64 start;
-	u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
-	struct clear_range *arg = _arg;
-
-	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
-	bxt_vtd_ggtt_wa(arg->vm);
-
-	return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
-					  u64 start,
-					  u64 length)
-{
-	struct clear_range arg = { vm, start, length };
-
-	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 				  u64 start, u64 length)
 {
@@ -462,7 +436,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
 	u64 size;
 	int ret;
 
-	if (!USES_GUC(ggtt->vm.i915))
+	if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
 		return 0;
 
 	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
@@ -472,7 +446,8 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
 				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
 				   PIN_NOEVICT);
 	if (ret)
-		DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+		drm_dbg(&ggtt->vm.i915->drm,
+			"Failed to reserve top of GGTT for GuC\n");
 
 	return ret;
 }
@@ -544,8 +519,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
 
 	/* Clear any non-preallocated blocks */
 	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
-		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
-			      hole_start, hole_end);
+		drm_dbg_kms(&ggtt->vm.i915->drm,
+			    "clearing unused GTT space: [%lx, %lx]\n",
+			    hole_start, hole_end);
 		ggtt->vm.clear_range(&ggtt->vm, hole_start,
 				     hole_end - hole_start);
 	}
@@ -879,8 +855,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	    IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
 		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
 		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
-		if (ggtt->vm.clear_range != nop_clear_range)
-			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+		ggtt->vm.bind_async_flags =
+			I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 	}
 
 	ggtt->invalidate = gen8_ggtt_invalidate;
@@ -890,7 +866,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
-	ggtt->vm.pte_encode = gen8_pte_encode;
+	ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
 
 	setup_private_pat(ggtt->vm.gt->uncore);
 
@@ -1180,7 +1156,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
 	ggtt->invalidate(ggtt);
 }
 
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
 {
 	struct i915_vma *vma;
 	bool flush = false;
@@ -1188,8 +1164,6 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 
 	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 
-	mutex_lock(&ggtt->vm.mutex);
-
 	/* First fill our portion of the GTT with scratch pages */
 	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
@@ -1216,19 +1190,10 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
 	atomic_set(&ggtt->vm.open, open);
 	ggtt->invalidate(ggtt);
 
-	mutex_unlock(&ggtt->vm.mutex);
-
 	if (flush)
 		wbinvd_on_all_cpus();
-}
 
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
-	struct i915_ggtt *ggtt = &i915->ggtt;
-
-	ggtt_restore_mappings(ggtt);
-
-	if (INTEL_GEN(i915) >= 8)
+	if (INTEL_GEN(ggtt->vm.i915) >= 8)
 		setup_private_pat(ggtt->vm.gt->uncore);
 }
 
@@ -1267,6 +1232,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
 		   struct drm_i915_gem_object *obj)
 {
 	unsigned int size = intel_rotation_info_size(rot_info);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *st;
 	struct scatterlist *sg;
 	int ret = -ENOMEM;
@@ -1296,8 +1262,9 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
 	kfree(st);
 err_st_alloc:
 
-	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
-			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+	drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+		obj->base.size, rot_info->plane[0].width,
+		rot_info->plane[0].height, size);
 
 	return ERR_PTR(ret);
 }
@@ -1349,6 +1316,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
 		  struct drm_i915_gem_object *obj)
 {
 	unsigned int size = intel_remapped_info_size(rem_info);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct sg_table *st;
 	struct scatterlist *sg;
 	int ret = -ENOMEM;
@@ -1380,8 +1348,9 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
 	kfree(st);
 err_st_alloc:
 
-	DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
-			 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+	drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+		obj->base.size, rem_info->plane[0].width,
+		rem_info->plane[0].height, size);
 
 	return ERR_PTR(ret);
 }
@@ -1479,8 +1448,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 	if (IS_ERR(vma->pages)) {
 		ret = PTR_ERR(vma->pages);
 		vma->pages = NULL;
-		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
-			  vma->ggtt_view.type, ret);
+		drm_err(&vma->vm->i915->drm,
+			"Failed to get pages for VMA view type %u (%d)!\n",
+			vma->ggtt_view.type, ret);
 	}
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 51b8718..f04214a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -292,10 +292,21 @@
 #define MI_STORE_URB_MEM        MI_INSTR(0x2D, 0)
 #define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
 
-#define PIPELINE_SELECT                ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS   ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE                ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define STATE_BASE_ADDRESS \
+	((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY		REG_BIT(0)
+#define PIPELINE_SELECT \
+	((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA	       REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+	((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+	((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
 #define  MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+	((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+	((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
 #define GPGPU_OBJECT                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
 #define GPGPU_WALKER                   ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
 #define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index da2b6e2..d09f759 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -198,16 +198,16 @@ static void gen6_check_faults(struct intel_gt *gt)
 	for_each_engine(engine, gt, id) {
 		fault = GEN6_RING_FAULT_REG_READ(engine);
 		if (fault & RING_FAULT_VALID) {
-			DRM_DEBUG_DRIVER("Unexpected fault\n"
-					 "\tAddr: 0x%08lx\n"
-					 "\tAddress space: %s\n"
-					 "\tSource ID: %d\n"
-					 "\tType: %d\n",
-					 fault & PAGE_MASK,
-					 fault & RING_FAULT_GTTSEL_MASK ?
-					 "GGTT" : "PPGTT",
-					 RING_FAULT_SRCID(fault),
-					 RING_FAULT_FAULT_TYPE(fault));
+			drm_dbg(&engine->i915->drm, "Unexpected fault\n"
+				"\tAddr: 0x%08lx\n"
+				"\tAddress space: %s\n"
+				"\tSource ID: %d\n"
+				"\tType: %d\n",
+				fault & PAGE_MASK,
+				fault & RING_FAULT_GTTSEL_MASK ?
+				"GGTT" : "PPGTT",
+				RING_FAULT_SRCID(fault),
+				RING_FAULT_FAULT_TYPE(fault));
 		}
 	}
 }
@@ -239,18 +239,17 @@ static void gen8_check_faults(struct intel_gt *gt)
 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
 			     ((u64)fault_data0 << 12);
 
-		DRM_DEBUG_DRIVER("Unexpected fault\n"
-				 "\tAddr: 0x%08x_%08x\n"
-				 "\tAddress space: %s\n"
-				 "\tEngine ID: %d\n"
-				 "\tSource ID: %d\n"
-				 "\tType: %d\n",
-				 upper_32_bits(fault_addr),
-				 lower_32_bits(fault_addr),
-				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
-				 GEN8_RING_FAULT_ENGINE_ID(fault),
-				 RING_FAULT_SRCID(fault),
-				 RING_FAULT_FAULT_TYPE(fault));
+		drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
+			"\tAddr: 0x%08x_%08x\n"
+			"\tAddress space: %s\n"
+			"\tEngine ID: %d\n"
+			"\tSource ID: %d\n"
+			"\tType: %d\n",
+			upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+			fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+			GEN8_RING_FAULT_ENGINE_ID(fault),
+			RING_FAULT_SRCID(fault),
+			RING_FAULT_FAULT_TYPE(fault));
 	}
 }
 
@@ -345,7 +344,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
 		goto err_unref;
 	}
 
-	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	ret = i915_ggtt_pin(vma, 0, PIN_HIGH);
 	if (ret)
 		goto err_unref;
 
@@ -455,6 +454,11 @@ static int __engines_record_defaults(struct intel_gt *gt)
 		if (!rq)
 			continue;
 
+		if (rq->fence.error) {
+			err = -EIO;
+			goto out;
+		}
+
 		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
 		state = rq->context->state;
 		if (!state)
@@ -538,6 +542,10 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
 			err = -EIO;
 	}
 
+	/* Flush and restore the kernel context for safety */
+	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
+		err = -EIO;
+
 	return err;
 }
 
@@ -584,7 +592,9 @@ int intel_gt_init(struct intel_gt *gt)
 	if (err)
 		goto err_engines;
 
-	intel_uc_init(&gt->uc);
+	err = intel_uc_init(&gt->uc);
+	if (err)
+		goto err_engines;
 
 	err = intel_gt_resume(gt);
 	if (err)
@@ -634,6 +644,13 @@ void intel_gt_driver_remove(struct intel_gt *gt)
 void intel_gt_driver_unregister(struct intel_gt *gt)
 {
 	intel_rps_driver_unregister(&gt->rps);
+
+	/*
+	 * Upon unregistering the device to prevent any new users, cancel
+	 * all in-flight requests so that we can quickly unbind the active
+	 * resources.
+	 */
+	intel_gt_set_wedged(gt);
 }
 
 void intel_gt_driver_release(struct intel_gt *gt)
@@ -650,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt)
 
 void intel_gt_driver_late_release(struct intel_gt *gt)
 {
+	/* We need to wait for inflight RCU frees to release their grip */
+	rcu_barrier();
+
 	intel_uc_driver_late_release(&gt->uc);
 	intel_gt_fini_requests(gt);
 	intel_gt_fini_reset(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 1dac441..4fac043 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -14,7 +14,7 @@ struct drm_i915_private;
 
 #define GT_TRACE(gt, fmt, ...) do {					\
 	const struct intel_gt *gt__ __maybe_unused = (gt);		\
-	GEM_TRACE("%s  " fmt, dev_name(gt__->i915->drm.dev),		\
+	GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev),		\
 		  ##__VA_ARGS__);					\
 } while (0)
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f796bdf..f0e7fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,21 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 {
 	bool tasklet = false;
 
+	if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+		u32 eir;
+
+		eir = ENGINE_READ(engine, RING_EIR);
+		ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+		/* Disable the error interrupt until after the reset */
+		if (likely(eir)) {
+			ENGINE_WRITE(engine, RING_EMR, ~0u);
+			ENGINE_WRITE(engine, RING_EIR, eir);
+			WRITE_ONCE(engine->execlists.error_interrupt, eir);
+			tasklet = true;
+		}
+	}
+
 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
 		tasklet = true;
 
@@ -210,7 +225,10 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
 
 void gen11_gt_irq_postinstall(struct intel_gt *gt)
 {
-	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+	const u32 irqs =
+		GT_CS_MASTER_ERROR_INTERRUPT |
+		GT_RENDER_USER_INTERRUPT |
+		GT_CONTEXT_SWITCH_INTERRUPT;
 	struct intel_uncore *uncore = gt->uncore;
 	const u32 dmask = irqs << 16 | irqs;
 	const u32 smask = irqs << 16;
@@ -279,66 +297,56 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
 
 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
 		      GT_BSD_CS_ERROR_INTERRUPT |
-		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+		      GT_CS_MASTER_ERROR_INTERRUPT))
 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
 
 	if (gt_iir & GT_PARITY_ERROR(gt->i915))
 		gen7_parity_error_irq_handler(gt, gt_iir);
 }
 
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
 {
 	void __iomem * const regs = gt->uncore->regs;
+	u32 iir;
 
 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-		gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
-		if (likely(gt_iir[0]))
-			raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
+		iir = raw_reg_read(regs, GEN8_GT_IIR(0));
+		if (likely(iir)) {
+			cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+				       iir >> GEN8_RCS_IRQ_SHIFT);
+			cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+				       iir >> GEN8_BCS_IRQ_SHIFT);
+			raw_reg_write(regs, GEN8_GT_IIR(0), iir);
+		}
 	}
 
 	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
-		gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
-		if (likely(gt_iir[1]))
-			raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
-	}
-
-	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
-		gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
-		if (likely(gt_iir[2]))
-			raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
+		iir = raw_reg_read(regs, GEN8_GT_IIR(1));
+		if (likely(iir)) {
+			cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+				       iir >> GEN8_VCS0_IRQ_SHIFT);
+			cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+				       iir >> GEN8_VCS1_IRQ_SHIFT);
+			raw_reg_write(regs, GEN8_GT_IIR(1), iir);
+		}
 	}
 
 	if (master_ctl & GEN8_GT_VECS_IRQ) {
-		gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
-		if (likely(gt_iir[3]))
-			raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
-	}
-}
-
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
-{
-	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
-		cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
-			       gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
-		cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
-			       gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
-	}
-
-	if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
-		cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
-			       gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
-		cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
-			       gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
-	}
-
-	if (master_ctl & GEN8_GT_VECS_IRQ) {
-		cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
-			       gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+		iir = raw_reg_read(regs, GEN8_GT_IIR(3));
+		if (likely(iir)) {
+			cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+				       iir >> GEN8_VECS_IRQ_SHIFT);
+			raw_reg_write(regs, GEN8_GT_IIR(3), iir);
+		}
 	}
 
 	if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
-		gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
-		guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+		iir = raw_reg_read(regs, GEN8_GT_IIR(2));
+		if (likely(iir)) {
+			gen6_rps_irq_handler(&gt->rps, iir);
+			guc_irq_handler(&gt->uc.guc, iir >> 16);
+			raw_reg_write(regs, GEN8_GT_IIR(2), iir);
+		}
 	}
 }
 
@@ -354,25 +362,18 @@ void gen8_gt_irq_reset(struct intel_gt *gt)
 
 void gen8_gt_irq_postinstall(struct intel_gt *gt)
 {
-	struct intel_uncore *uncore = gt->uncore;
-
 	/* These are interrupts we'll toggle with the ring mask register */
-	u32 gt_interrupts[] = {
-		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
-		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
-		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
+	const u32 irqs =
+		GT_CS_MASTER_ERROR_INTERRUPT |
+		GT_RENDER_USER_INTERRUPT |
+		GT_CONTEXT_SWITCH_INTERRUPT;
+	const u32 gt_interrupts[] = {
+		irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
+		irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
 		0,
-
-		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+		irqs << GEN8_VECS_IRQ_SHIFT,
 	};
+	struct intel_uncore *uncore = gt->uncore;
 
 	gt->pm_ier = 0x0;
 	gt->pm_imr = ~gt->pm_ier;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index 8f37593..886c5cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -36,9 +36,8 @@ void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
 
 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
 
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
 void gen8_gt_irq_reset(struct intel_gt *gt);
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
 void gen8_gt_irq_postinstall(struct intel_gt *gt);
 
 #endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index d1c2f03..8b653c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -216,7 +216,7 @@ int intel_gt_resume(struct intel_gt *gt)
 		intel_engine_pm_get(engine);
 
 		engine->serial++; /* kernel context lost */
-		err = engine->resume(engine);
+		err = intel_engine_resume(engine);
 
 		intel_engine_pm_put(engine);
 		if (err) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 16acdc5..2a72cce 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm)
 {
 	struct i915_vma *vma, *vn;
 
-	mutex_lock(&vm->mutex);
+	if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+		return;
+
 	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
@@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm)
 		i915_gem_object_put(obj);
 	}
 	GEM_BUG_ON(!list_empty(&vm->bound_list));
+
 	mutex_unlock(&vm->mutex);
 }
 
@@ -299,6 +302,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
 	kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
 }
 
+static void poison_scratch_page(struct page *page, unsigned long size)
+{
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return;
+
+	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+	do {
+		void *vaddr;
+
+		vaddr = kmap(page);
+		memset(vaddr, POISON_FREE, PAGE_SIZE);
+		kunmap(page);
+
+		page = pfn_to_page(page_to_pfn(page) + 1);
+		size -= PAGE_SIZE;
+	} while (size);
+}
+
 int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 {
 	unsigned long size;
@@ -331,6 +353,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 		if (unlikely(!page))
 			goto skip;
 
+		/*
+		 * Use a non-zero scratch page for debugging.
+		 *
+		 * We want a value that should be reasonably obvious
+		 * to spot in the error state, while also causing a GPU hang
+		 * if executed. We prefer using a clear page in production, so
+		 * should it ever be accidentally used, the effect should be
+		 * fairly benign.
+		 */
+		poison_scratch_page(page, size);
+
 		addr = dma_map_page_attrs(vm->dma,
 					  page, 0, size,
 					  PCI_DMA_BIDIRECTIONAL,
@@ -448,36 +481,12 @@ void gtt_write_workarounds(struct intel_gt *gt)
 		intel_uncore_write(uncore,
 				   HSW_GTT_CACHE_EN,
 				   can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
-		WARN_ON_ONCE(can_use_gtt_cache &&
-			     intel_uncore_read(uncore,
-					       HSW_GTT_CACHE_EN) == 0);
+		drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
+				 intel_uncore_read(uncore,
+						   HSW_GTT_CACHE_EN) == 0);
 	}
 }
 
-u64 gen8_pte_encode(dma_addr_t addr,
-		    enum i915_cache_level level,
-		    u32 flags)
-{
-	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
-	if (unlikely(flags & PTE_READ_ONLY))
-		pte &= ~_PAGE_RW;
-
-	switch (level) {
-	case I915_CACHE_NONE:
-		pte |= PPAT_UNCACHED;
-		break;
-	case I915_CACHE_WT:
-		pte |= PPAT_DISPLAY_ELLC;
-		break;
-	default:
-		pte |= PPAT_CACHED;
-		break;
-	}
-
-	return pte;
-}
-
 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
 {
 	/* TGL doesn't support LLC or AGE settings */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 7da7681..b3116fe 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -429,8 +429,7 @@ static inline void
 i915_vm_close(struct i915_address_space *vm)
 {
 	GEM_BUG_ON(!atomic_read(&vm->open));
-	if (atomic_dec_and_test(&vm->open))
-		__i915_vm_close(vm);
+	__i915_vm_close(vm);
 
 	i915_vm_put(vm);
 }
@@ -512,12 +511,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
 
 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
 
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
-
-u64 gen8_pte_encode(dma_addr_t addr,
-		    enum i915_cache_level level,
-		    u32 flags);
+void i915_ggtt_suspend(struct i915_ggtt *gtt);
+void i915_ggtt_resume(struct i915_ggtt *ggtt);
 
 int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
 void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index ceb785b..e3f637b 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -50,6 +50,9 @@ static bool get_ia_constants(struct intel_llc *llc,
 	struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
 	struct intel_rps *rps = &llc_to_gt(llc)->rps;
 
+	if (!HAS_LLC(i915) || IS_DGFX(i915))
+		return false;
+
 	if (rps->max_freq <= rps->min_freq)
 		return false;
 
@@ -147,8 +150,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
 
 void intel_llc_enable(struct intel_llc *llc)
 {
-	if (HAS_LLC(llc_to_gt(llc)->i915))
-		gen6_update_ring_freq(llc);
+	gen6_update_ring_freq(llc);
 }
 
 void intel_llc_disable(struct intel_llc *llc)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 31455ec..683014e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -176,8 +176,6 @@
 
 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-#define WA_TAIL_DWORDS 2
-#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
 struct virtual_engine {
 	struct intel_engine_cs base;
@@ -247,7 +245,7 @@ static void mark_eio(struct i915_request *rq)
 
 	GEM_BUG_ON(i915_request_signaled(rq));
 
-	dma_fence_set_error(&rq->fence, -EIO);
+	i915_request_set_error_once(rq, -EIO);
 	i915_request_mark_complete(rq);
 }
 
@@ -295,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.attr.priority;
+	return READ_ONCE(rq->sched.attr.priority);
 }
 
 static int effective_prio(const struct i915_request *rq)
@@ -1006,7 +1004,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 				i915_request_cancel_breadcrumb(rq);
 				spin_unlock(&rq->lock);
 			}
-			rq->engine = owner;
+			WRITE_ONCE(rq->engine, owner);
 			owner->submit_request(rq);
 			active = NULL;
 		}
@@ -1197,6 +1195,48 @@ static void reset_active(struct i915_request *rq,
 	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 }
 
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+	/*
+	 * We can use either ppHWSP[16] which is recorded before the context
+	 * switch (and so excludes the cost of context switches) or use the
+	 * value from the context image itself, which is saved/restored earlier
+	 * and so includes the cost of the save.
+	 */
+	return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
+{
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+	ce->runtime.num_underflow += dt < 0;
+	ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
+}
+
+static void intel_context_update_runtime(struct intel_context *ce)
+{
+	u32 old;
+	s32 dt;
+
+	if (intel_context_is_barrier(ce))
+		return;
+
+	old = ce->runtime.last;
+	ce->runtime.last = intel_context_get_runtime(ce);
+	dt = ce->runtime.last - old;
+
+	if (unlikely(dt <= 0)) {
+		CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+			 old, ce->runtime.last, dt);
+		st_update_runtime_underflow(ce, dt);
+		return;
+	}
+
+	ewma_runtime_add(&ce->runtime.avg, dt);
+	ce->runtime.total += dt;
+}
+
 static inline struct intel_engine_cs *
 __execlists_schedule_in(struct i915_request *rq)
 {
@@ -1211,12 +1251,12 @@ __execlists_schedule_in(struct i915_request *rq)
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 		execlists_check_context(ce, engine);
 
+	ce->lrc_desc &= ~GENMASK_ULL(47, 37);
 	if (ce->tag) {
 		/* Use a fixed tag for OA and friends */
 		ce->lrc_desc |= (u64)ce->tag << 32;
 	} else {
 		/* We don't need a strict matching tag, just different values */
-		ce->lrc_desc &= ~GENMASK_ULL(47, 37);
 		ce->lrc_desc |=
 			(u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
 			GEN11_SW_CTX_ID_SHIFT;
@@ -1276,10 +1316,11 @@ __execlists_schedule_out(struct i915_request *rq,
 	 * If we have just completed this context, the engine may now be
 	 * idle and we want to re-enter powersaving.
 	 */
-	if (list_is_last(&rq->link, &ce->timeline->requests) &&
+	if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
 	    i915_request_completed(rq))
 		intel_engine_add_retire(engine, ce->timeline);
 
+	intel_context_update_runtime(ce);
 	intel_engine_context_out(engine);
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 	intel_gt_pm_put_async(engine->gt);
@@ -1395,15 +1436,26 @@ trace_ports(const struct intel_engine_execlists *execlists,
 		     ports[1] ? ports[1]->fence.seqno : 0);
 }
 
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
 static __maybe_unused bool
 assert_pending_valid(const struct intel_engine_execlists *execlists,
 		     const char *msg)
 {
 	struct i915_request * const *port, *rq;
 	struct intel_context *ce = NULL;
+	bool sentinel = false;
 
 	trace_ports(execlists, msg, execlists->pending);
 
+	/* We may be messing around with the lists during reset, lalala */
+	if (reset_in_progress(execlists))
+		return true;
+
 	if (!execlists->pending[0]) {
 		GEM_TRACE_ERR("Nothing pending for promotion!\n");
 		return false;
@@ -1430,6 +1482,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 		}
 		ce = rq->context;
 
+		/*
+		 * Sentinels are supposed to be lonely so they flush the
+		 * current exection off the HW. Check that they are the
+		 * only request in the pending submission.
+		 */
+		if (sentinel) {
+			GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+
+		sentinel = i915_request_has_sentinel(rq);
+		if (sentinel && port != execlists->pending) {
+			GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+
 		/* Hold tightly onto the lock to prevent concurrent retires! */
 		if (!spin_trylock_irqsave(&rq->lock, flags))
 			continue;
@@ -1525,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
 	return true;
 }
 
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+	return READ_ONCE(rq->fence.flags);
+}
+
 static bool can_merge_rq(const struct i915_request *prev,
 			 const struct i915_request *next)
 {
@@ -1542,7 +1619,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 	if (i915_request_completed(next))
 		return true;
 
-	if (unlikely((prev->fence.flags ^ next->fence.flags) &
+	if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
 		     (BIT(I915_FENCE_FLAG_NOPREEMPT) |
 		      BIT(I915_FENCE_FLAG_SENTINEL))))
 		return false;
@@ -1550,6 +1627,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 	if (!can_merge_ctx(prev->context, next->context))
 		return false;
 
+	GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
 	return true;
 }
 
@@ -1585,7 +1663,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
 }
 
 static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
-				     struct intel_engine_cs *engine)
+				     struct i915_request *rq)
 {
 	struct intel_engine_cs *old = ve->siblings[0];
 
@@ -1593,9 +1671,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 
 	spin_lock(&old->breadcrumbs.irq_lock);
 	if (!list_empty(&ve->context.signal_link)) {
-		list_move_tail(&ve->context.signal_link,
-			       &engine->breadcrumbs.signalers);
-		intel_engine_signal_breadcrumbs(engine);
+		list_del_init(&ve->context.signal_link);
+
+		/*
+		 * We cannot acquire the new engine->breadcrumbs.irq_lock
+		 * (as we are holding a breadcrumbs.irq_lock already),
+		 * so attach this request to the signaler on submission.
+		 * The queued irq_work will occur when we finally drop
+		 * the engine->active.lock after dequeue.
+		 */
+		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
+
+		/* Also transfer the pending irq_work for the old breadcrumb. */
+		intel_engine_signal_breadcrumbs(rq->engine);
 	}
 	spin_unlock(&old->breadcrumbs.irq_lock);
 }
@@ -1605,6 +1693,11 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 				     &(rq__)->sched.waiters_list, \
 				     wait_link)
 
+#define for_each_signaler(p__, rq__) \
+	list_for_each_entry_rcu(p__, \
+				&(rq__)->sched.signalers_list, \
+				signal_link)
+
 static void defer_request(struct i915_request *rq, struct list_head * const pl)
 {
 	LIST_HEAD(list);
@@ -1693,12 +1786,13 @@ timeslice(const struct intel_engine_cs *engine)
 static unsigned long
 active_timeslice(const struct intel_engine_cs *engine)
 {
-	const struct i915_request *rq = *engine->execlists.active;
+	const struct intel_engine_execlists *execlists = &engine->execlists;
+	const struct i915_request *rq = *execlists->active;
 
 	if (!rq || i915_request_completed(rq))
 		return 0;
 
-	if (engine->execlists.switch_priority_hint < effective_prio(rq))
+	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
 		return 0;
 
 	return timeslice(engine);
@@ -1715,8 +1809,11 @@ static void set_timeslice(struct intel_engine_cs *engine)
 static void start_timeslice(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
+	int prio = queue_prio(execlists);
 
-	execlists->switch_priority_hint = execlists->queue_priority_hint;
+	WRITE_ONCE(execlists->switch_priority_hint, prio);
+	if (prio == INT_MIN)
+		return;
 
 	if (timer_pending(&execlists->timer))
 		return;
@@ -1938,13 +2035,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				     "",
 				     yesno(engine != ve->siblings[0]));
 
-			ve->request = NULL;
-			ve->base.execlists.queue_priority_hint = INT_MIN;
+			WRITE_ONCE(ve->request, NULL);
+			WRITE_ONCE(ve->base.execlists.queue_priority_hint,
+				   INT_MIN);
 			rb_erase_cached(rb, &execlists->virtual);
 			RB_CLEAR_NODE(rb);
 
 			GEM_BUG_ON(!(rq->execution_mask & engine->mask));
-			rq->engine = engine;
+			WRITE_ONCE(rq->engine, engine);
 
 			if (engine != ve->siblings[0]) {
 				u32 *regs = ve->context.lrc_reg_state;
@@ -1957,7 +2055,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 									engine);
 
 				if (!list_empty(&ve->context.signals))
-					virtual_xfer_breadcrumbs(ve, engine);
+					virtual_xfer_breadcrumbs(ve, rq);
 
 				/*
 				 * Move the bound engine to the top of the list
@@ -2064,6 +2162,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				GEM_BUG_ON(last &&
 					   !can_merge_ctx(last->context,
 							  rq->context));
+				GEM_BUG_ON(last &&
+					   i915_seqno_passed(last->fence.seqno,
+							     rq->fence.seqno));
 
 				submit = true;
 				last = rq;
@@ -2134,6 +2235,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
 		execlists_schedule_out(*port);
 	clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
 
+	smp_wmb(); /* complete the seqlock for execlists_active() */
 	WRITE_ONCE(execlists->active, execlists->inflight);
 }
 
@@ -2144,12 +2246,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
 	clflush((void *)last);
 }
 
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
-	return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
 /*
  * Starting with Gen12, the status has a new format:
  *
@@ -2240,7 +2336,6 @@ static void process_csb(struct intel_engine_cs *engine)
 	 */
 	head = execlists->csb_head;
 	tail = READ_ONCE(*execlists->csb_write);
-	ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
 	if (unlikely(head == tail))
 		return;
 
@@ -2254,6 +2349,7 @@ static void process_csb(struct intel_engine_cs *engine)
 	 */
 	rmb();
 
+	ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
 	do {
 		bool promote;
 
@@ -2288,11 +2384,13 @@ static void process_csb(struct intel_engine_cs *engine)
 		if (promote) {
 			struct i915_request * const *old = execlists->active;
 
+			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+
+			ring_set_paused(engine, 0);
+
 			/* Point active to the new ELSP; prevent overwriting */
 			WRITE_ONCE(execlists->active, execlists->pending);
-
-			if (!inject_preempt_hang(execlists))
-				ring_set_paused(engine, 0);
+			smp_wmb(); /* notify execlists_active() */
 
 			/* cancel old inflight, prepare for switch */
 			trace_ports(execlists, "preempted", old);
@@ -2300,12 +2398,12 @@ static void process_csb(struct intel_engine_cs *engine)
 				execlists_schedule_out(*old++);
 
 			/* switch pending to inflight */
-			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
-			WRITE_ONCE(execlists->active,
-				   memcpy(execlists->inflight,
-					  execlists->pending,
-					  execlists_num_ports(execlists) *
-					  sizeof(*execlists->pending)));
+			memcpy(execlists->inflight,
+			       execlists->pending,
+			       execlists_num_ports(execlists) *
+			       sizeof(*execlists->pending));
+			smp_wmb(); /* complete the seqlock */
+			WRITE_ONCE(execlists->active, execlists->inflight);
 
 			WRITE_ONCE(execlists->pending[0], NULL);
 		} else {
@@ -2320,8 +2418,37 @@ static void process_csb(struct intel_engine_cs *engine)
 			 * coherent (visible from the CPU) before the
 			 * user interrupt and CSB is processed.
 			 */
-			GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
-				   !reset_in_progress(execlists));
+			if (GEM_SHOW_DEBUG() &&
+			    !i915_request_completed(*execlists->active) &&
+			    !reset_in_progress(execlists)) {
+				struct i915_request *rq __maybe_unused =
+					*execlists->active;
+				const u32 *regs __maybe_unused =
+					rq->context->lrc_reg_state;
+
+				ENGINE_TRACE(engine,
+					     "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+					     ENGINE_READ(engine, RING_START),
+					     ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+					     ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+					     ENGINE_READ(engine, RING_CTL),
+					     ENGINE_READ(engine, RING_MI_MODE));
+				ENGINE_TRACE(engine,
+					     "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+					     i915_ggtt_offset(rq->ring->vma),
+					     rq->head, rq->tail,
+					     rq->fence.context,
+					     lower_32_bits(rq->fence.seqno),
+					     hwsp_seqno(rq));
+				ENGINE_TRACE(engine,
+					     "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+					     regs[CTX_RING_START],
+					     regs[CTX_RING_HEAD],
+					     regs[CTX_RING_TAIL]);
+
+				GEM_BUG_ON("context completed before request");
+			}
+
 			execlists_schedule_out(*execlists->active++);
 
 			GEM_BUG_ON(execlists->active - execlists->inflight >
@@ -2349,7 +2476,7 @@ static void process_csb(struct intel_engine_cs *engine)
 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 {
 	lockdep_assert_held(&engine->active.lock);
-	if (!engine->execlists.pending[0]) {
+	if (!READ_ONCE(engine->execlists.pending[0])) {
 		rcu_read_lock(); /* protect peeking at execlists->active */
 		execlists_dequeue(engine);
 		rcu_read_unlock();
@@ -2366,12 +2493,12 @@ static void __execlists_hold(struct i915_request *rq)
 		if (i915_request_is_active(rq))
 			__i915_request_unsubmit(rq);
 
-		RQ_TRACE(rq, "on hold\n");
 		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 		list_move_tail(&rq->sched.link, &rq->engine->active.hold);
 		i915_request_set_hold(rq);
+		RQ_TRACE(rq, "on hold\n");
 
-		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+		for_each_waiter(p, rq) {
 			struct i915_request *w =
 				container_of(p->waiter, typeof(*w), sched);
 
@@ -2385,7 +2512,7 @@ static void __execlists_hold(struct i915_request *rq)
 			if (i915_request_completed(w))
 				continue;
 
-			if (i915_request_on_hold(rq))
+			if (i915_request_on_hold(w))
 				continue;
 
 			list_move_tail(&w->sched.link, &list);
@@ -2443,6 +2570,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
 	GEM_BUG_ON(i915_request_on_hold(rq));
 	GEM_BUG_ON(rq->engine != engine);
 	__execlists_hold(rq);
+	GEM_BUG_ON(list_empty(&engine->active.hold));
 
 unlock:
 	spin_unlock_irq(&engine->active.lock);
@@ -2452,23 +2580,27 @@ static bool execlists_hold(struct intel_engine_cs *engine,
 static bool hold_request(const struct i915_request *rq)
 {
 	struct i915_dependency *p;
+	bool result = false;
 
 	/*
 	 * If one of our ancestors is on hold, we must also be on hold,
 	 * otherwise we will bypass it and execute before it.
 	 */
-	list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+	rcu_read_lock();
+	for_each_signaler(p, rq) {
 		const struct i915_request *s =
 			container_of(p->signaler, typeof(*s), sched);
 
 		if (s->engine != rq->engine)
 			continue;
 
-		if (i915_request_on_hold(s))
-			return true;
+		result = i915_request_on_hold(s);
+		if (result)
+			break;
 	}
+	rcu_read_unlock();
 
-	return false;
+	return result;
 }
 
 static void __execlists_unhold(struct i915_request *rq)
@@ -2478,6 +2610,8 @@ static void __execlists_unhold(struct i915_request *rq)
 	do {
 		struct i915_dependency *p;
 
+		RQ_TRACE(rq, "hold release\n");
+
 		GEM_BUG_ON(!i915_request_on_hold(rq));
 		GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 
@@ -2486,21 +2620,24 @@ static void __execlists_unhold(struct i915_request *rq)
 			       i915_sched_lookup_priolist(rq->engine,
 							  rq_prio(rq)));
 		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-		RQ_TRACE(rq, "hold release\n");
 
 		/* Also release any children on this engine that are ready */
-		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+		for_each_waiter(p, rq) {
 			struct i915_request *w =
 				container_of(p->waiter, typeof(*w), sched);
 
+			/* Propagate any change in error status */
+			if (rq->fence.error)
+				i915_request_set_error_once(w, rq->fence.error);
+
 			if (w->engine != rq->engine)
 				continue;
 
-			if (!i915_request_on_hold(rq))
+			if (!i915_request_on_hold(w))
 				continue;
 
 			/* Check that no other parents are also on hold */
-			if (hold_request(rq))
+			if (hold_request(w))
 				continue;
 
 			list_move_tail(&w->sched.link, &list);
@@ -2615,13 +2752,13 @@ static bool execlists_capture(struct intel_engine_cs *engine)
 	if (!cap)
 		return true;
 
+	spin_lock_irq(&engine->active.lock);
 	cap->rq = execlists_active(&engine->execlists);
-	GEM_BUG_ON(!cap->rq);
-
-	rcu_read_lock();
-	cap->rq = active_request(cap->rq->context->timeline, cap->rq);
-	cap->rq = i915_request_get_rcu(cap->rq);
-	rcu_read_unlock();
+	if (cap->rq) {
+		cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+		cap->rq = i915_request_get_rcu(cap->rq);
+	}
+	spin_unlock_irq(&engine->active.lock);
 	if (!cap->rq)
 		goto err_free;
 
@@ -2660,27 +2797,25 @@ static bool execlists_capture(struct intel_engine_cs *engine)
 	return false;
 }
 
-static noinline void preempt_reset(struct intel_engine_cs *engine)
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
 {
 	const unsigned int bit = I915_RESET_ENGINE + engine->id;
 	unsigned long *lock = &engine->gt->reset.flags;
 
-	if (i915_modparams.reset < 3)
+	if (!intel_has_reset_engine(engine->gt))
 		return;
 
 	if (test_and_set_bit(bit, lock))
 		return;
 
+	ENGINE_TRACE(engine, "reset for %s\n", msg);
+
 	/* Mark this tasklet as disabled to avoid waiting for it to complete */
 	tasklet_disable_nosync(&engine->execlists.tasklet);
 
-	ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
-		     READ_ONCE(engine->props.preempt_timeout_ms),
-		     jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
-
 	ring_set_paused(engine, 1); /* Freeze the current request in place */
 	if (execlists_capture(engine))
-		intel_engine_reset(engine, "preemption time out");
+		intel_engine_reset(engine, msg);
 	else
 		ring_set_paused(engine, 0);
 
@@ -2711,6 +2846,13 @@ static void execlists_submission_tasklet(unsigned long data)
 	bool timeout = preempt_timeout(engine);
 
 	process_csb(engine);
+
+	if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+		engine->execlists.error_interrupt = 0;
+		if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
+			execlists_reset(engine, "CS error");
+	}
+
 	if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
 		unsigned long flags;
 
@@ -2719,8 +2861,8 @@ static void execlists_submission_tasklet(unsigned long data)
 		spin_unlock_irqrestore(&engine->active.lock, flags);
 
 		/* Recheck after serialising with direct-submission */
-		if (timeout && preempt_timeout(engine))
-			preempt_reset(engine);
+		if (unlikely(timeout && preempt_timeout(engine)))
+			execlists_reset(engine, "preemption time out");
 	}
 }
 
@@ -2793,6 +2935,7 @@ static void execlists_submit_request(struct i915_request *request)
 	spin_lock_irqsave(&engine->active.lock, flags);
 
 	if (unlikely(ancestor_on_hold(engine, request))) {
+		RQ_TRACE(request, "ancestor on hold\n");
 		list_add_tail(&request->sched.link, &engine->active.hold);
 		i915_request_set_hold(request);
 	} else {
@@ -2874,6 +3017,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
 	regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
 	regs[CTX_RING_HEAD] = head;
 	regs[CTX_RING_TAIL] = ring->tail;
+	regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
 
 	/* RPCS */
 	if (engine->class == RENDER_CLASS) {
@@ -2921,22 +3065,6 @@ static void execlists_context_reset(struct intel_context *ce)
 	CE_TRACE(ce, "reset\n");
 	GEM_BUG_ON(!intel_context_is_pinned(ce));
 
-	/*
-	 * Because we emit WA_TAIL_DWORDS there may be a disparity
-	 * between our bookkeeping in ce->ring->head and ce->ring->tail and
-	 * that stored in context. As we only write new commands from
-	 * ce->ring->tail onwards, everything before that is junk. If the GPU
-	 * starts reading from its RING_HEAD from the context, it may try to
-	 * execute that junk and die.
-	 *
-	 * The contexts that are stilled pinned on resume belong to the
-	 * kernel, and are local to each engine. All other contexts will
-	 * have their head/tail sanitized upon pinning before use, so they
-	 * will never see garbage,
-	 *
-	 * So to avoid that we reset the context images upon resume. For
-	 * simplicity, we just zero everything out.
-	 */
 	intel_ring_reset(ce->ring, ce->ring->emit);
 
 	/* Scrub away the garbage */
@@ -2964,7 +3092,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
 {
 	u32 *cs;
 
-	GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
+	if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+		return 0;
 
 	cs = intel_ring_begin(rq, 6);
 	if (IS_ERR(cs))
@@ -3257,7 +3386,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
 		goto err;
 	}
 
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	err = i915_ggtt_pin(vma, 0, PIN_HIGH);
 	if (err)
 		goto err;
 
@@ -3346,6 +3475,49 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	return ret;
 }
 
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+	u32 status;
+
+	engine->execlists.error_interrupt = 0;
+	ENGINE_WRITE(engine, RING_EMR, ~0u);
+	ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+	status = ENGINE_READ(engine, RING_ESR);
+	if (unlikely(status)) {
+		dev_err(engine->i915->drm.dev,
+			"engine '%s' resumed still in error: %08x\n",
+			engine->name, status);
+		__intel_gt_reset(engine->gt, engine->mask);
+	}
+
+	/*
+	 * On current gen8+, we have 2 signals to play with
+	 *
+	 * - I915_ERROR_INSTUCTION (bit 0)
+	 *
+	 *    Generate an error if the command parser encounters an invalid
+	 *    instruction
+	 *
+	 *    This is a fatal error.
+	 *
+	 * - CP_PRIV (bit 2)
+	 *
+	 *    Generate an error on privilege violation (where the CP replaces
+	 *    the instruction with a no-op). This also fires for writes into
+	 *    read-only scratch pages.
+	 *
+	 *    This is a non-fatal error, parsing continues.
+	 *
+	 * * there are a few others defined for odd HW that we do not use
+	 *
+	 * Since CP_PRIV fires for cases where we have chosen to ignore the
+	 * error (as the HW is validating and suppressing the mistakes), we
+	 * only unmask the instruction error bit.
+	 */
+	ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
 static void enable_execlists(struct intel_engine_cs *engine)
 {
 	u32 mode;
@@ -3367,6 +3539,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
 			i915_ggtt_offset(engine->status_page.vma));
 	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
 
+	enable_error_interrupt(engine);
+
 	engine->context_tag = 0;
 }
 
@@ -3384,9 +3558,6 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 
 static int execlists_resume(struct intel_engine_cs *engine)
 {
-	intel_engine_apply_workarounds(engine);
-	intel_engine_apply_whitelist(engine);
-
 	intel_mocs_init_engine(engine);
 
 	intel_engine_reset_breadcrumbs(engine);
@@ -3517,9 +3688,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	if (!rq)
 		goto unwind;
 
-	/* We still have requests in-flight; the engine should be active */
-	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
 	ce = rq->context;
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
@@ -3529,8 +3697,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 		goto out_replay;
 	}
 
+	/* We still have requests in-flight; the engine should be active */
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
 	/* Context has requests still in-flight; it should not be idle! */
 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
 	rq = active_request(ce->timeline, rq);
 	head = intel_ring_wrap(ce->ring, rq->head);
 	GEM_BUG_ON(head == ce->ring->tail);
@@ -3604,7 +3776,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
 static void nop_submission_tasklet(unsigned long data)
 {
+	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
 	/* The driver is wedged; don't process any more events. */
+	WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
 }
 
 static void execlists_reset_cancel(struct intel_engine_cs *engine)
@@ -4273,6 +4448,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
 
 	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
 	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+	engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
 }
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4514,8 +4690,13 @@ populate_lr_context(struct intel_context *ce,
 		inhibit = false;
 	}
 
-	/* The second page of the context object contains some fields which must
-	 * be set up prior to the first execution. */
+	/* Clear the ppHWSP (inc. per-context counters) */
+	memset(vaddr, 0, PAGE_SIZE);
+
+	/*
+	 * The second page of the context object contains some registers which
+	 * must be set up prior to the first execution.
+	 */
 	execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
 				 ce, engine, ring, inhibit);
 
@@ -4553,8 +4734,17 @@ static int __execlists_context_alloc(struct intel_context *ce,
 
 	if (!ce->timeline) {
 		struct intel_timeline *tl;
+		struct i915_vma *hwsp;
 
-		tl = intel_timeline_create(engine->gt, NULL);
+		/*
+		 * Use the static global HWSP for the kernel context, and
+		 * a dynamically allocated cacheline for everyone else.
+		 */
+		hwsp = NULL;
+		if (unlikely(intel_context_is_barrier(ce)))
+			hwsp = engine->status_page.vma;
+
+		tl = intel_timeline_create(engine->gt, hwsp);
 		if (IS_ERR(tl)) {
 			ret = PTR_ERR(tl);
 			goto error_deref_obj;
@@ -4723,7 +4913,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 	mask = rq->execution_mask;
 	if (unlikely(!mask)) {
 		/* Invalid selection, submit to a random engine in error */
-		i915_request_skip(rq, -ENODEV);
+		i915_request_set_error_once(rq, -ENODEV);
 		mask = ve->siblings[0]->mask;
 	}
 
@@ -4737,7 +4927,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 static void virtual_submission_tasklet(unsigned long data)
 {
 	struct virtual_engine * const ve = (struct virtual_engine *)data;
-	const int prio = ve->base.execlists.queue_priority_hint;
+	const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
 	intel_engine_mask_t mask;
 	unsigned int n;
 
@@ -5133,11 +5323,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 		show_request(m, last, "\t\tE ");
 	}
 
-	last = NULL;
-	count = 0;
+	if (execlists->switch_priority_hint != INT_MIN)
+		drm_printf(m, "\t\tSwitch priority hint: %d\n",
+			   READ_ONCE(execlists->switch_priority_hint));
 	if (execlists->queue_priority_hint != INT_MIN)
 		drm_printf(m, "\t\tQueue priority hint: %d\n",
-			   execlists->queue_priority_hint);
+			   READ_ONCE(execlists->queue_priority_hint));
+
+	last = NULL;
+	count = 0;
 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
 		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 		int i;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 08a3be6..d39b725 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -17,6 +17,7 @@
 #define CTX_RING_CTL			(0x0a + 1)
 #define CTX_BB_STATE			(0x10 + 1)
 #define CTX_BB_PER_CTX_PTR		(0x18 + 1)
+#define CTX_TIMESTAMP			(0x22 + 1)
 #define CTX_PDP3_UDW			(0x24 + 1)
 #define CTX_PDP3_LDW			(0x26 + 1)
 #define CTX_PDP2_UDW			(0x28 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index eeef90b..632e08a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -280,9 +280,32 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
 	GEN11_MOCS_ENTRIES
 };
 
-static bool get_mocs_settings(const struct drm_i915_private *i915,
-			      struct drm_i915_mocs_table *table)
+enum {
+	HAS_GLOBAL_MOCS = BIT(0),
+	HAS_ENGINE_MOCS = BIT(1),
+	HAS_RENDER_L3CC = BIT(2),
+};
+
+static bool has_l3cc(const struct drm_i915_private *i915)
 {
+	return true;
+}
+
+static bool has_global_mocs(const struct drm_i915_private *i915)
+{
+	return HAS_GLOBAL_MOCS_REGISTERS(i915);
+}
+
+static bool has_mocs(const struct drm_i915_private *i915)
+{
+	return !IS_DGFX(i915);
+}
+
+static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+				      struct drm_i915_mocs_table *table)
+{
+	unsigned int flags;
+
 	if (INTEL_GEN(i915) >= 12) {
 		table->size  = ARRAY_SIZE(tgl_mocs_table);
 		table->table = tgl_mocs_table;
@@ -300,13 +323,13 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
 		table->n_entries = GEN9_NUM_MOCS_ENTRIES;
 		table->table = broxton_mocs_table;
 	} else {
-		WARN_ONCE(INTEL_GEN(i915) >= 9,
-			  "Platform that should have a MOCS table does not.\n");
-		return false;
+		drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
+			      "Platform that should have a MOCS table does not.\n");
+		return 0;
 	}
 
 	if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
-		return false;
+		return 0;
 
 	/* WaDisableSkipCaching:skl,bxt,kbl,glk */
 	if (IS_GEN(i915, 9)) {
@@ -315,10 +338,20 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
 		for (i = 0; i < table->size; i++)
 			if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
 					      (L3_ESC(1) | L3_SCC(0x7))))
-				return false;
+				return 0;
 	}
 
-	return true;
+	flags = 0;
+	if (has_mocs(i915)) {
+		if (has_global_mocs(i915))
+			flags |= HAS_GLOBAL_MOCS;
+		else
+			flags |= HAS_ENGINE_MOCS;
+	}
+	if (has_l3cc(i915))
+		flags |= HAS_RENDER_L3CC;
+
+	return flags;
 }
 
 /*
@@ -411,18 +444,20 @@ static void init_l3cc_table(struct intel_engine_cs *engine,
 void intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_mocs_table table;
+	unsigned int flags;
 
 	/* Called under a blanket forcewake */
 	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
 
-	if (!get_mocs_settings(engine->i915, &table))
+	flags = get_mocs_settings(engine->i915, &table);
+	if (!flags)
 		return;
 
 	/* Platforms with global MOCS do not need per-engine initialization. */
-	if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+	if (flags & HAS_ENGINE_MOCS)
 		init_mocs_table(engine, &table);
 
-	if (engine->class == RENDER_CLASS)
+	if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
 		init_l3cc_table(engine, &table);
 }
 
@@ -431,26 +466,17 @@ static u32 global_mocs_offset(void)
 	return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
 }
 
-static void init_global_mocs(struct intel_gt *gt)
+void intel_mocs_init(struct intel_gt *gt)
 {
 	struct drm_i915_mocs_table table;
+	unsigned int flags;
 
 	/*
 	 * LLC and eDRAM control values are not applicable to dgfx
 	 */
-	if (IS_DGFX(gt->i915))
-		return;
-
-	if (!get_mocs_settings(gt->i915, &table))
-		return;
-
-	__init_mocs_table(gt->uncore, &table, global_mocs_offset());
-}
-
-void intel_mocs_init(struct intel_gt *gt)
-{
-	if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
-		init_global_mocs(gt);
+	flags = get_mocs_settings(gt->i915, &table);
+	if (flags & HAS_GLOBAL_MOCS)
+		__init_mocs_table(gt->uncore, &table, global_mocs_offset());
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9e303c29..3847ee4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,7 @@
 #include <linux/pm_runtime.h>
 
 #include "i915_drv.h"
+#include "i915_vgpu.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
 #include "intel_rc6.h"
@@ -226,10 +227,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
 
 	set(uncore, GEN6_RC_SLEEP, 0);
 	set(uncore, GEN6_RC1e_THRESHOLD, 1000);
-	if (IS_IVYBRIDGE(i915))
-		set(uncore, GEN6_RC6_THRESHOLD, 125000);
-	else
-		set(uncore, GEN6_RC6_THRESHOLD, 50000);
+	set(uncore, GEN6_RC6_THRESHOLD, 50000);
 	set(uncore, GEN6_RC6p_THRESHOLD, 150000);
 	set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
@@ -299,7 +297,6 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
 		pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
 		pctx = i915_gem_object_create_stolen_for_preallocated(i915,
 								      pcbr_offset,
-								      I915_GTT_OFFSET_NONE,
 								      pctx_size);
 		if (IS_ERR(pctx))
 			return PTR_ERR(pctx);
@@ -323,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
 		return PTR_ERR(pctx);
 	}
 
-	GEM_BUG_ON(range_overflows_t(u64,
-				     i915->dsm.start,
-				     pctx->stolen->start,
-				     U32_MAX));
+	GEM_BUG_ON(range_overflows_end_t(u64,
+					 i915->dsm.start,
+					 pctx->stolen->start,
+					 U32_MAX));
 	pctx_paddr = i915->dsm.start + pctx->stolen->start;
 	intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
 
@@ -542,6 +539,8 @@ void intel_rc6_init(struct intel_rc6 *rc6)
 
 void intel_rc6_sanitize(struct intel_rc6 *rc6)
 {
+	memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
+
 	if (rc6->enabled) { /* unbalanced suspend/resume */
 		rpm_get(rc6);
 		rc6->enabled = false;
@@ -604,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
 void intel_rc6_park(struct intel_rc6 *rc6)
 {
 	struct intel_uncore *uncore = rc6_to_uncore(rc6);
+	unsigned int target;
 
 	if (!rc6->enabled)
 		return;
@@ -618,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6)
 
 	/* Turn off the HW timers and go directly to rc6 */
 	set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
-	set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+
+	if (HAS_RC6pp(rc6_to_i915(rc6)))
+		target = 0x6; /* deepest rc6 */
+	else if (HAS_RC6p(rc6_to_i915(rc6)))
+		target = 0x5; /* deep rc6 */
+	else
+		target = 0x4; /* normal rc6 */
+	set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
 }
 
 void intel_rc6_disable(struct intel_rc6 *rc6)
@@ -713,7 +720,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
 	 */
 	i = (i915_mmio_reg_offset(reg) -
 	     i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
-	if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+	if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency)))
 		return 0;
 
 	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index beee0cf..80db3c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq)
 
 	lockdep_assert_held(&engine->active.lock);
 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
-		if (rq->context == hung_ctx)
-			i915_request_skip(rq, -EIO);
+		if (rq->context == hung_ctx) {
+			i915_request_set_error_once(rq, -EIO);
+			__i915_request_skip(rq);
+		}
 }
 
 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -72,9 +74,10 @@ static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
 	if (score) {
 		atomic_add(score, &file_priv->ban_score);
 
-		DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
-				 ctx->name, score,
-				 atomic_read(&file_priv->ban_score));
+		drm_dbg(&ctx->i915->drm,
+			"client %s: gained %u ban score, now %u\n",
+			ctx->name, score,
+			atomic_read(&file_priv->ban_score));
 	}
 }
 
@@ -85,19 +88,18 @@ static bool mark_guilty(struct i915_request *rq)
 	bool banned;
 	int i;
 
+	if (intel_context_is_closed(rq->context)) {
+		intel_context_set_banned(rq->context);
+		return true;
+	}
+
 	rcu_read_lock();
 	ctx = rcu_dereference(rq->context->gem_context);
 	if (ctx && !kref_get_unless_zero(&ctx->ref))
 		ctx = NULL;
 	rcu_read_unlock();
 	if (!ctx)
-		return false;
-
-	if (i915_gem_context_is_closed(ctx)) {
-		intel_context_set_banned(rq->context);
-		banned = true;
-		goto out;
-	}
+		return intel_context_is_banned(rq->context);
 
 	atomic_inc(&ctx->guilty_count);
 
@@ -122,8 +124,8 @@ static bool mark_guilty(struct i915_request *rq)
 	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
 		banned = true;
 	if (banned) {
-		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
-				 ctx->name, atomic_read(&ctx->guilty_count));
+		drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
+			ctx->name, atomic_read(&ctx->guilty_count));
 		intel_context_set_banned(rq->context);
 	}
 
@@ -153,11 +155,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
 
 	rcu_read_lock(); /* protect the GEM context */
 	if (guilty) {
-		i915_request_skip(rq, -EIO);
+		i915_request_set_error_once(rq, -EIO);
+		__i915_request_skip(rq);
 		if (mark_guilty(rq))
 			engine_skip_context(rq);
 	} else {
-		dma_fence_set_error(&rq->fence, -EAGAIN);
+		i915_request_set_error_once(rq, -EAGAIN);
 		mark_innocent(rq);
 	}
 	rcu_read_unlock();
@@ -226,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt,
 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
 	if (ret) {
-		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
 		goto out;
 	}
 
@@ -234,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt,
 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
 	if (ret) {
-		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
 		goto out;
 	}
 
@@ -260,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
 					   5000, 0,
 					   NULL);
 	if (ret) {
-		DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+		drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
 		goto out;
 	}
 
@@ -271,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
 					   5000, 0,
 					   NULL);
 	if (ret) {
-		DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+		drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
 		goto out;
 	}
 
@@ -300,8 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
 					   500, 0,
 					   NULL);
 	if (err)
-		DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
-				 hw_domain_mask);
+		drm_dbg(&gt->i915->drm,
+			"Wait for 0x%08x engines reset failed\n",
+			hw_domain_mask);
 
 	return err;
 }
@@ -401,7 +405,8 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
 		return 0;
 
 	if (ret) {
-		DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+		drm_dbg(&engine->i915->drm,
+			"Wait for SFC forced lock ack failed\n");
 		return ret;
 	}
 
@@ -515,9 +520,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
 	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
 					   700, 0, NULL);
 	if (ret)
-		DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
-			  engine->name, request,
-			  intel_uncore_read_fw(uncore, reg));
+		drm_err(&engine->i915->drm,
+			"%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+			engine->name, request,
+			intel_uncore_read_fw(uncore, reg));
 
 	return ret;
 }
@@ -781,7 +787,7 @@ static void nop_submit_request(struct i915_request *request)
 	unsigned long flags;
 
 	RQ_TRACE(request, "-EIO\n");
-	dma_fence_set_error(&request->fence, -EIO);
+	i915_request_set_error_once(request, -EIO);
 
 	spin_lock_irqsave(&engine->active.lock, flags);
 	__i915_request_submit(request);
@@ -800,13 +806,6 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
 	if (test_bit(I915_WEDGED, &gt->reset.flags))
 		return;
 
-	if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
-		struct drm_printer p = drm_debug_printer(__func__);
-
-		for_each_engine(engine, gt, id)
-			intel_engine_dump(engine, &p, "%s\n", engine->name);
-	}
-
 	GT_TRACE(gt, "start\n");
 
 	/*
@@ -845,10 +844,30 @@ void intel_gt_set_wedged(struct intel_gt *gt)
 {
 	intel_wakeref_t wakeref;
 
+	if (test_bit(I915_WEDGED, &gt->reset.flags))
+		return;
+
+	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 	mutex_lock(&gt->reset.mutex);
-	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
-		__intel_gt_set_wedged(gt);
+
+	if (GEM_SHOW_DEBUG()) {
+		struct drm_printer p = drm_debug_printer(__func__);
+		struct intel_engine_cs *engine;
+		enum intel_engine_id id;
+
+		drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
+		for_each_engine(engine, gt, id) {
+			if (intel_engine_is_idle(engine))
+				continue;
+
+			intel_engine_dump(engine, &p, "%s\n", engine->name);
+		}
+	}
+
+	__intel_gt_set_wedged(gt);
+
 	mutex_unlock(&gt->reset.mutex);
+	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 }
 
 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
@@ -969,7 +988,7 @@ static int resume(struct intel_gt *gt)
 	int ret;
 
 	for_each_engine(engine, gt, id) {
-		ret = engine->resume(engine);
+		ret = intel_engine_resume(engine);
 		if (ret)
 			return ret;
 	}
@@ -1022,7 +1041,7 @@ void intel_gt_reset(struct intel_gt *gt,
 		if (i915_modparams.reset)
 			dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
 		else
-			DRM_DEBUG_DRIVER("GPU reset disabled\n");
+			drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
 		goto error;
 	}
 
@@ -1049,8 +1068,9 @@ void intel_gt_reset(struct intel_gt *gt,
 	 */
 	ret = intel_gt_init_hw(gt);
 	if (ret) {
-		DRM_ERROR("Failed to initialise HW following reset (%d)\n",
-			  ret);
+		drm_err(&gt->i915->drm,
+			"Failed to initialise HW following reset (%d)\n",
+			ret);
 		goto taint;
 	}
 
@@ -1126,9 +1146,8 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 		ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
 	if (ret) {
 		/* If we fail here, we expect to fallback to a global reset */
-		DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
-				 uses_guc ? "GuC " : "",
-				 engine->name, ret);
+		drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
+			uses_guc ? "GuC " : "", engine->name, ret);
 		goto out;
 	}
 
@@ -1144,7 +1163,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 	 * have been reset to their default values. Follow the init_ring
 	 * process to program RING_MODE, HWSP and re-enable submission.
 	 */
-	ret = engine->resume(engine);
+	ret = intel_engine_resume(engine);
 
 out:
 	intel_engine_cancel_stop_cs(engine);
@@ -1165,7 +1184,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
 
 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
-	DRM_DEBUG_DRIVER("resetting chip\n");
+	drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
 
 	/* Use a watchdog to ensure that our reset completes */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 6ff803f..8cda1b7 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -31,17 +31,15 @@ int intel_ring_pin(struct intel_ring *ring)
 	if (atomic_fetch_inc(&ring->pin_count))
 		return 0;
 
-	flags = PIN_GLOBAL;
-
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-	flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
 
 	if (vma->obj->stolen)
 		flags |= PIN_MAPPABLE;
 	else
 		flags |= PIN_HIGH;
 
-	ret = i915_vma_pin(vma, 0, 0, flags);
+	ret = i915_ggtt_pin(vma, 0, flags);
 	if (unlikely(ret))
 		goto err_unpin;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bc44fe8..fdc3f10 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -29,11 +29,10 @@
 
 #include <linux/log2.h>
 
-#include <drm/i915_drm.h>
-
 #include "gem/i915_gem_context.h"
 
 #include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_context.h"
@@ -568,7 +567,8 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
 		return;
 
 	/* ring should be idle before issuing a sync flush*/
-	WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+	drm_WARN_ON(&dev_priv->drm,
+		    (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 
 	ENGINE_WRITE(engine, RING_INSTPM,
 		     _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -626,6 +626,27 @@ static bool stop_ring(struct intel_engine_cs *engine)
 	return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
 }
 
+static struct i915_address_space *vm_alias(struct i915_address_space *vm)
+{
+	if (i915_is_ggtt(vm))
+		vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+	return vm;
+}
+
+static void set_pp_dir(struct intel_engine_cs *engine)
+{
+	struct i915_address_space *vm = vm_alias(engine->gt->vm);
+
+	if (vm) {
+		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+		ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+		ENGINE_WRITE(engine, RING_PP_DIR_BASE,
+			     px_base(ppgtt->pd)->ggtt_offset << 10);
+	}
+}
+
 static int xcs_resume(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -684,6 +705,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 	intel_ring_update_space(ring);
 
+	set_pp_dir(engine);
+
 	/* First wake the ring up to an empty/idle ring */
 	ENGINE_WRITE(engine, RING_HEAD, ring->head);
 	ENGINE_WRITE(engine, RING_TAIL, ring->head);
@@ -857,43 +880,6 @@ static int rcs_resume(struct intel_engine_cs *engine)
 		intel_uncore_write(uncore, ECOSKPD,
 			   _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
 
-	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-	if (IS_GEN_RANGE(i915, 4, 6))
-		intel_uncore_write(uncore, MI_MODE,
-				   _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-
-	/* We need to disable the AsyncFlip performance optimisations in order
-	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
-	 * programmed to '1' on all products.
-	 *
-	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
-	 */
-	if (IS_GEN_RANGE(i915, 6, 7))
-		intel_uncore_write(uncore, MI_MODE,
-				   _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
-	/* Required for the hardware to program scanline values for waiting */
-	/* WaEnableFlushTlbInvalidationMode:snb */
-	if (IS_GEN(i915, 6))
-		intel_uncore_write(uncore, GFX_MODE,
-			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
-
-	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-	if (IS_GEN(i915, 7))
-		intel_uncore_write(uncore, GFX_MODE_GEN7,
-			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
-			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-
-	if (IS_GEN(i915, 6)) {
-		/* From the Sandybridge PRM, volume 1 part 3, page 24:
-		 * "If this bit is set, STCunit will have LRA as replacement
-		 *  policy. [...] This bit must be reset.  LRA replacement
-		 *  policy is not supported."
-		 */
-		intel_uncore_write(uncore, CACHE_MODE_0,
-			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-	}
-
 	if (IS_GEN_RANGE(i915, 6, 7))
 		intel_uncore_write(uncore, INSTPM,
 				   _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
@@ -910,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(request, &engine->active.requests, sched.link) {
-		if (!i915_request_signaled(request))
-			dma_fence_set_error(&request->fence, -EIO);
-
+		i915_request_set_error_once(request, -EIO);
 		i915_request_mark_complete(request);
 	}
 
@@ -1197,23 +1181,12 @@ static void ring_context_destroy(struct kref *ref)
 	intel_context_free(ce);
 }
 
-static struct i915_address_space *vm_alias(struct intel_context *ce)
-{
-	struct i915_address_space *vm;
-
-	vm = ce->vm;
-	if (i915_is_ggtt(vm))
-		vm = &i915_vm_to_ggtt(vm)->alias->vm;
-
-	return vm;
-}
-
 static int __context_pin_ppgtt(struct intel_context *ce)
 {
 	struct i915_address_space *vm;
 	int err = 0;
 
-	vm = vm_alias(ce);
+	vm = vm_alias(ce->vm);
 	if (vm)
 		err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
 
@@ -1224,7 +1197,7 @@ static void __context_unpin_ppgtt(struct intel_context *ce)
 {
 	struct i915_address_space *vm;
 
-	vm = vm_alias(ce);
+	vm = vm_alias(ce->vm);
 	if (vm)
 		gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
 }
@@ -1384,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq,
 	return rq->engine->emit_flush(rq, EMIT_FLUSH);
 }
 
-static inline int mi_set_context(struct i915_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq,
+				 struct intel_context *ce,
+				 u32 flags)
 {
 	struct drm_i915_private *i915 = rq->i915;
 	struct intel_engine_cs *engine = rq->engine;
@@ -1459,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->context->state) | flags;
+	*cs++ = i915_ggtt_offset(ce->state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1574,21 +1549,64 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
 	return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
 }
 
+static int clear_residuals(struct i915_request *rq)
+{
+	struct intel_engine_cs *engine = rq->engine;
+	int ret;
+
+	ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+	if (ret)
+		return ret;
+
+	if (engine->kernel_context->state) {
+		ret = mi_set_context(rq,
+				     engine->kernel_context,
+				     MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+		if (ret)
+			return ret;
+	}
+
+	ret = engine->emit_bb_start(rq,
+				    engine->wa_ctx.vma->node.start, 0,
+				    0);
+	if (ret)
+		return ret;
+
+	ret = engine->emit_flush(rq, EMIT_FLUSH);
+	if (ret)
+		return ret;
+
+	/* Always invalidate before the next switch_mm() */
+	return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
 static int switch_context(struct i915_request *rq)
 {
+	struct intel_engine_cs *engine = rq->engine;
 	struct intel_context *ce = rq->context;
+	void **residuals = NULL;
 	int ret;
 
 	GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
 
-	ret = switch_mm(rq, vm_alias(ce));
+	if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+		if (engine->wa_ctx.vma->private != ce) {
+			ret = clear_residuals(rq);
+			if (ret)
+				return ret;
+
+			residuals = &engine->wa_ctx.vma->private;
+		}
+	}
+
+	ret = switch_mm(rq, vm_alias(ce->vm));
 	if (ret)
 		return ret;
 
 	if (ce->state) {
 		u32 flags;
 
-		GEM_BUG_ON(rq->engine->id != RCS0);
+		GEM_BUG_ON(engine->id != RCS0);
 
 		/* For resource streamer on HSW+ and power context elsewhere */
 		BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
@@ -1600,7 +1618,7 @@ static int switch_context(struct i915_request *rq)
 		else
 			flags |= MI_RESTORE_INHIBIT;
 
-		ret = mi_set_context(rq, flags);
+		ret = mi_set_context(rq, ce, flags);
 		if (ret)
 			return ret;
 	}
@@ -1609,6 +1627,20 @@ static int switch_context(struct i915_request *rq)
 	if (ret)
 		return ret;
 
+	/*
+	 * Now past the point of no return, this request _will_ be emitted.
+	 *
+	 * Or at least this preamble will be emitted, the request may be
+	 * interrupted prior to submitting the user payload. If so, we
+	 * still submit the "empty" request in order to preserve global
+	 * state tracking such as this, our tracking of the current
+	 * dirty context.
+	 */
+	if (residuals) {
+		intel_context_put(*residuals);
+		*residuals = intel_context_get(ce);
+	}
+
 	return 0;
 }
 
@@ -1662,7 +1694,8 @@ static void gen6_bsd_submit_request(struct i915_request *request)
 					 GEN6_BSD_SLEEP_INDICATOR,
 					 0,
 					 1000, 0, NULL))
-		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+		drm_err(&uncore->i915->drm,
+			"timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
 	i9xx_submit_request(request);
@@ -1787,11 +1820,16 @@ static void ring_release(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
-	WARN_ON(INTEL_GEN(dev_priv) > 2 &&
-		(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+	drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
+		    (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 
 	intel_engine_cleanup_common(engine);
 
+	if (engine->wa_ctx.vma) {
+		intel_context_put(engine->wa_ctx.vma->private);
+		i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+	}
+
 	intel_ring_unpin(engine->legacy.ring);
 	intel_ring_put(engine->legacy.ring);
 
@@ -1939,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine)
 	engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
 }
 
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+				    struct i915_vma * const vma)
+{
+	return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int size;
+	int err;
+
+	size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+	if (size <= 0)
+		return size;
+
+	size = ALIGN(size, PAGE_SIZE);
+	obj = i915_gem_object_create_internal(engine->i915, size);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_obj;
+	}
+
+	vma->private = intel_context_create(engine); /* dummy residuals */
+	if (IS_ERR(vma->private)) {
+		err = PTR_ERR(vma->private);
+		goto err_obj;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+	if (err)
+		goto err_private;
+
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_unpin;
+
+	err = gen7_ctx_switch_bb_setup(engine, vma);
+	if (err)
+		goto err_unpin;
+
+	engine->wa_ctx.vma = vma;
+	return 0;
+
+err_unpin:
+	i915_vma_unpin(vma);
+err_private:
+	intel_context_put(vma->private);
+err_obj:
+	i915_gem_object_put(obj);
+	return err;
+}
+
 int intel_ring_submission_setup(struct intel_engine_cs *engine)
 {
 	struct intel_timeline *timeline;
@@ -1992,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 
 	GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
 
+	if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+		err = gen7_ctx_switch_bb_init(engine);
+		if (err)
+			goto err_ring_unpin;
+	}
+
 	/* Finally, take ownership and responsibility for cleanup! */
 	engine->release = ring_release;
 
 	return 0;
 
+err_ring_unpin:
+	intel_ring_unpin(ring);
 err_ring:
 	intel_ring_put(ring);
 err_timeline_unpin:
@@ -2007,3 +2111,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 	intel_engine_cleanup_common(engine);
 	return err;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d2a3d93..cfaf141 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -4,6 +4,8 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include <drm/i915_drm.h>
+
 #include "i915_drv.h"
 #include "intel_gt.h"
 #include "intel_gt_irq.h"
@@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
 	if (val < rps->max_freq_softlimit)
 		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
-	mask &= rps->pm_events;
+	mask &= READ_ONCE(rps->pm_events);
 
 	return rps_pm_sanitize_mask(rps, ~mask);
 }
@@ -68,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
 static void rps_enable_interrupts(struct intel_rps *rps)
 {
 	struct intel_gt *gt = rps_to_gt(rps);
+	u32 events;
 
 	rps_reset_ei(rps);
 
 	if (IS_VALLEYVIEW(gt->i915))
 		/* WaGsvRC0ResidencyMethod:vlv */
-		rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+		events = GEN6_PM_RP_UP_EI_EXPIRED;
 	else
-		rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
-				  GEN6_PM_RP_DOWN_THRESHOLD |
-				  GEN6_PM_RP_DOWN_TIMEOUT);
+		events = (GEN6_PM_RP_UP_THRESHOLD |
+			  GEN6_PM_RP_DOWN_THRESHOLD |
+			  GEN6_PM_RP_DOWN_TIMEOUT);
 
+	WRITE_ONCE(rps->pm_events, events);
 	spin_lock_irq(&gt->irq_lock);
 	gen6_gt_pm_enable_irq(gt, rps->pm_events);
 	spin_unlock_irq(&gt->irq_lock);
@@ -115,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
 {
 	struct intel_gt *gt = rps_to_gt(rps);
 
-	rps->pm_events = 0;
-
+	WRITE_ONCE(rps->pm_events, 0);
 	set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 
 	spin_lock_irq(&gt->irq_lock);
@@ -642,7 +645,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
 {
 	mutex_lock(&rps->power.mutex);
 	if (interactive) {
-		if (!rps->power.interactive++ && rps->active)
+		if (!rps->power.interactive++ && READ_ONCE(rps->active))
 			rps_set_power(rps, HIGH_POWER);
 	} else {
 		GEM_BUG_ON(!rps->power.interactive);
@@ -719,11 +722,15 @@ void intel_rps_unpark(struct intel_rps *rps)
 	 * performance, jump directly to RPe as our starting frequency.
 	 */
 	mutex_lock(&rps->lock);
-	rps->active = true;
+
+	WRITE_ONCE(rps->active, true);
+
 	freq = max(rps->cur_freq, rps->efficient_freq),
 	freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
 	intel_rps_set(rps, freq);
+
 	rps->last_adj = 0;
+
 	mutex_unlock(&rps->lock);
 
 	if (INTEL_GEN(rps_to_i915(rps)) >= 6)
@@ -743,7 +750,7 @@ void intel_rps_park(struct intel_rps *rps)
 	if (INTEL_GEN(i915) >= 6)
 		rps_disable_interrupts(rps);
 
-	rps->active = false;
+	WRITE_ONCE(rps->active, false);
 	if (rps->last_freq <= rps->idle_freq)
 		return;
 
@@ -763,14 +770,27 @@ void intel_rps_park(struct intel_rps *rps)
 	intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
 	rps_set(rps, rps->idle_freq, false);
 	intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+
+	/*
+	 * Since we will try and restart from the previously requested
+	 * frequency on unparking, treat this idle point as a downclock
+	 * interrupt and reduce the frequency for resume. If we park/unpark
+	 * more frequently than the rps worker can run, we will not respond
+	 * to any EI and never see a change in frequency.
+	 *
+	 * (Note we accommodate Cherryview's limitation of only using an
+	 * even bin by applying it to all.)
+	 */
+	rps->cur_freq =
+		max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
 }
 
 void intel_rps_boost(struct i915_request *rq)
 {
-	struct intel_rps *rps = &rq->engine->gt->rps;
+	struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
 	unsigned long flags;
 
-	if (i915_request_signaled(rq) || !rps->active)
+	if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
 		return;
 
 	/* Serializes with i915_request_retire() */
@@ -1026,7 +1046,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
 	vlv_punit_put(i915);
 
 	/* RPS code assumes GPLL is used */
-	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+	drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+		      "GPLL not enabled\n");
 
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1123,7 +1144,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
 	vlv_punit_put(i915);
 
 	/* RPS code assumes GPLL is used */
-	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+	drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+		      "GPLL not enabled\n");
 
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1191,11 +1213,11 @@ void intel_rps_enable(struct intel_rps *rps)
 	if (!rps->enabled)
 		return;
 
-	WARN_ON(rps->max_freq < rps->min_freq);
-	WARN_ON(rps->idle_freq > rps->max_freq);
+	drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
+	drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
 
-	WARN_ON(rps->efficient_freq < rps->min_freq);
-	WARN_ON(rps->efficient_freq > rps->max_freq);
+	drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
+	drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
 }
 
 static void gen6_rps_disable(struct intel_rps *rps)
@@ -1390,9 +1412,9 @@ static void chv_rps_init(struct intel_rps *rps)
 			BIT(VLV_IOSF_SB_NC) |
 			BIT(VLV_IOSF_SB_CCK));
 
-	WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
-		   rps->min_freq) & 1,
-		  "Odd GPU freq values\n");
+	drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
+				   rps->rp1_freq | rps->min_freq) & 1,
+		      "Odd GPU freq values\n");
 }
 
 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
@@ -1451,12 +1473,12 @@ static void rps_work(struct work_struct *work)
 	u32 pm_iir = 0;
 
 	spin_lock_irq(&gt->irq_lock);
-	pm_iir = fetch_and_zero(&rps->pm_iir);
+	pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
 	client_boost = atomic_read(&rps->num_waiters);
 	spin_unlock_irq(&gt->irq_lock);
 
 	/* Make sure we didn't queue anything we're not going to process. */
-	if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+	if (!pm_iir && !client_boost)
 		goto out;
 
 	mutex_lock(&rps->lock);
@@ -1552,11 +1574,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
 {
 	struct intel_gt *gt = rps_to_gt(rps);
+	u32 events;
 
-	if (pm_iir & rps->pm_events) {
+	events = pm_iir & READ_ONCE(rps->pm_events);
+	if (events) {
 		spin_lock(&gt->irq_lock);
-		gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
-		rps->pm_iir |= pm_iir & rps->pm_events;
+
+		gen6_gt_pm_mask_irq(gt, events);
+		rps->pm_iir |= events;
+
 		schedule_work(&rps->work);
 		spin_unlock(&gt->irq_lock);
 	}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index d8d9f11..91debbc 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -312,7 +312,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
 	if (atomic_add_unless(&tl->pin_count, 1, 0))
 		return 0;
 
-	err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
 	if (err)
 		return err;
 
@@ -410,6 +410,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 	void *vaddr;
 	int err;
 
+	might_lock(&tl->gt->ggtt->vm.mutex);
+
 	/*
 	 * If there is an outstanding GPU reference to this cacheline,
 	 * such as it being sampled by a HW semaphore on another timeline,
@@ -435,7 +437,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
 		goto err_rollback;
 	}
 
-	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+	err = i915_ggtt_pin(vma, 0, PIN_HIGH);
 	if (err) {
 		__idle_hwsp_free(vma->private, cacheline);
 		goto err_rollback;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 6c2f846..5176ad1 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -116,17 +116,17 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 		} else {
 			wa_ = &wal->list[mid];
 
-			if ((wa->mask & ~wa_->mask) == 0) {
-				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+			if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
+				DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
 					  i915_mmio_reg_offset(wa_->reg),
-					  wa_->mask, wa_->val);
+					  wa_->clr, wa_->set);
 
-				wa_->val &= ~wa->mask;
+				wa_->set &= ~wa->clr;
 			}
 
 			wal->wa_count++;
-			wa_->val |= wa->val;
-			wa_->mask |= wa->mask;
+			wa_->set |= wa->set;
+			wa_->clr |= wa->clr;
 			wa_->read |= wa->read;
 			return;
 		}
@@ -147,13 +147,13 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 	}
 }
 
-static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
-		   u32 val, u32 read_mask)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
+		   u32 clear, u32 set, u32 read_mask)
 {
 	struct i915_wa wa = {
 		.reg  = reg,
-		.mask = mask,
-		.val  = val,
+		.clr  = clear,
+		.set  = set,
 		.read = read_mask,
 	};
 
@@ -161,38 +161,43 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
 }
 
 static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
-		   u32 val)
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
 {
-	wa_add(wal, reg, mask, val, mask);
+	wa_add(wal, reg, clear, set, clear);
+}
+
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+{
+	wa_write_masked_or(wal, reg, ~0, set);
+}
+
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+{
+	wa_write_masked_or(wal, reg, set, set);
 }
 
 static void
 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-	wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
+	wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
 }
 
 static void
-wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-	wa_write_masked_or(wal, reg, ~0, val);
-}
-
-static void
-wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
-{
-	wa_write_masked_or(wal, reg, val, val);
+	wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
 }
 
 #define WA_SET_BIT_MASKED(addr, mask) \
-	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
+	wa_masked_en(wal, (addr), (mask))
 
 #define WA_CLR_BIT_MASKED(addr, mask) \
-	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
+	wa_masked_dis(wal, (addr), (mask))
 
 #define WA_SET_FIELD_MASKED(addr, mask, value) \
-	wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
+	wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
 
 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
 				      struct i915_wa_list *wal)
@@ -570,12 +575,29 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 	/* allow headerless messages for preemptible GPGPU context */
 	WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
 			  GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+	/* Wa_1604278689:icl,ehl */
+	wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+	wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
+			   0, /* write-only register; skip validation */
+			   0xFFFFFFFF);
+
+	/* Wa_1406306137:icl,ehl */
+	wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
 }
 
 static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
 				     struct i915_wa_list *wal)
 {
-	/* Wa_1409142259:tgl */
+	/*
+	 * Wa_1409142259:tgl
+	 * Wa_1409347922:tgl
+	 * Wa_1409252684:tgl
+	 * Wa_1409217633:tgl
+	 * Wa_1409207793:tgl
+	 * Wa_1409178076:tgl
+	 * Wa_1408979724:tgl
+	 */
 	WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
 			  GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
 
@@ -588,6 +610,11 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
 	 */
 	wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
 	       FF_MODE2_TDS_TIMER_128, 0);
+
+	/* WaDisableGPGPUMidThreadPreemption:tgl */
+	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
 }
 
 static void
@@ -657,7 +684,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
 	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
 		*cs++ = i915_mmio_reg_offset(wa->reg);
-		*cs++ = wa->val;
+		*cs++ = wa->set;
 	}
 	*cs++ = MI_NOOP;
 
@@ -822,7 +849,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
 		DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
 			 intel_sseu_get_subslices(sseu, slice), l3_en);
 		subslice = fls(l3_en);
-		WARN_ON(!subslice);
+		drm_WARN_ON(&i915->drm, !subslice);
 	}
 	subslice--;
 
@@ -893,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 			    SLICE_UNIT_LEVEL_CLKGATE,
 			    MSCUNIT_CLKGATE_DIS);
 
-	/* Wa_1406680159:icl */
-	wa_write_or(wal,
-		    SUBSLICE_UNIT_LEVEL_CLKGATE,
-		    GWUNIT_CLKGATE_DIS);
-
 	/* Wa_1406838659:icl (pre-prod) */
 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
 		wa_write_or(wal,
@@ -926,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 			    SUBSLICE_UNIT_LEVEL_CLKGATE2,
 			    CPSSUNIT_CLKGATE_DIS);
 
-	/* Wa_1409180338:tgl */
+	/* Wa_1607087056:tgl also know as BUG:1409180338 */
 	if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
 		wa_write_or(wal,
 			    SLICE_UNIT_LEVEL_CLKGATE,
@@ -986,11 +1008,10 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
 static bool
 wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
 {
-	if ((cur ^ wa->val) & wa->read) {
-		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+	if ((cur ^ wa->set) & wa->read) {
+		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
 			  name, from, i915_mmio_reg_offset(wa->reg),
-			  cur, cur & wa->read,
-			  wa->val, wa->mask);
+			  cur, cur & wa->read, wa->set);
 
 		return false;
 	}
@@ -1015,7 +1036,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
 	intel_uncore_forcewake_get__locked(uncore, fw);
 
 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
-		intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+		if (wa->clr)
+			intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
+		else
+			intel_uncore_write_fw(uncore, wa->reg, wa->set);
 		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 			wa_verify(wa,
 				  intel_uncore_read_fw(uncore, wa->reg),
@@ -1239,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
 	case RENDER_CLASS:
 		/*
 		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+		 * Wa_1408556865:tgl
 		 *
 		 * This covers 4 registers which are next to one another :
 		 *   - PS_INVOCATION_COUNT
@@ -1249,6 +1274,12 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
 		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
 				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
 				  RING_FORCE_TO_NONPRIV_RANGE_4);
+
+		/* Wa_1808121037:tgl */
+		whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+		/* Wa_1806527549:tgl */
+		whitelist_reg(w, HIZ_CHICKEN);
 		break;
 	default:
 		break;
@@ -1315,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 	struct drm_i915_private *i915 = engine->i915;
 
 	if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
-		/* Wa_1606700617:tgl */
-		wa_masked_en(wal,
-			     GEN9_CS_DEBUG_MODE1,
-			     FF_DOP_CLOCK_GATE_DISABLE);
-
-		/* Wa_1607138336:tgl */
+		/*
+		 * Wa_1607138336:tgl
+		 * Wa_1607063988:tgl
+		 */
 		wa_write_or(wal,
 			    GEN9_CTX_PREEMPT_REG,
 			    GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
 
-		/* Wa_1607030317:tgl */
-		/* Wa_1607186500:tgl */
-		/* Wa_1607297627:tgl */
+		/*
+		 * Wa_1607030317:tgl
+		 * Wa_1607186500:tgl
+		 * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
+		 * of then says it is fixed on B0 the other one says it is
+		 * permanent
+		 */
 		wa_masked_en(wal,
 			     GEN6_RC_SLEEP_PSMI_CONTROL,
 			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
@@ -1340,6 +1373,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 		wa_write_or(wal,
 			    GEN7_SARCHKMD,
 			    GEN7_DISABLE_SAMPLER_PREFETCH);
+
+		/* Wa_1407928979:tgl */
+		wa_write_or(wal,
+			    GEN7_FF_THREAD_MODE,
+			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+		/*
+		 * Wa_1409085225:tgl
+		 * Wa_14010229206:tgl
+		 */
+		wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+		/* Wa_1408615072:tgl */
+		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+			    VSUNIT_CLKGATE_DIS_TGL);
+	}
+
+	if (IS_TIGERLAKE(i915)) {
+		/* Wa_1606931601:tgl */
+		wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+		/* Wa_1409804808:tgl */
+		wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+			     GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+		/* Wa_1606700617:tgl */
+		wa_masked_en(wal,
+			     GEN9_CS_DEBUG_MODE1,
+			     FF_DOP_CLOCK_GATE_DISABLE);
 	}
 
 	if (IS_GEN(i915, 11)) {
@@ -1405,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 				   GEN11_SCRATCH2,
 				   GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
 				   0);
+
+		/* WaEnable32PlaneMode:icl */
+		wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+			     GEN11_ENABLE_32_PLANE_MODE);
+
+		/*
+		 * Wa_1408615072:icl,ehl  (vsunit)
+		 * Wa_1407596294:icl,ehl  (hsunit)
+		 */
+		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+			    VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+		/* Wa_1407352427:icl,ehl */
+		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+			    PSDUNIT_CLKGATE_DIS);
+
+		/* Wa_1406680159:icl,ehl */
+		wa_write_or(wal,
+			    SUBSLICE_UNIT_LEVEL_CLKGATE,
+			    GWUNIT_CLKGATE_DIS);
+
+		/*
+		 * Wa_1408767742:icl[a2..forever],ehl[all]
+		 * Wa_1605460711:icl[a0..c0]
+		 */
+		wa_write_or(wal,
+			    GEN7_FF_THREAD_MODE,
+			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
 	}
 
-	if (IS_GEN_RANGE(i915, 9, 11)) {
-		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
+	if (IS_GEN_RANGE(i915, 9, 12)) {
+		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
 		wa_masked_en(wal,
 			     GEN7_FF_SLICE_CS_CHICKEN1,
 			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1452,6 +1542,52 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 			    GEN8_L3SQCREG4,
 			    GEN8_LQSC_FLUSH_COHERENT_LINES);
 	}
+
+	if (IS_GEN(i915, 7))
+		/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+		wa_masked_en(wal,
+			     GFX_MODE_GEN7,
+			     GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+
+	if (IS_GEN_RANGE(i915, 6, 7))
+		/*
+		 * We need to disable the AsyncFlip performance optimisations in
+		 * order to use MI_WAIT_FOR_EVENT within the CS. It should
+		 * already be programmed to '1' on all products.
+		 *
+		 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+		 */
+		wa_masked_en(wal,
+			     MI_MODE,
+			     ASYNC_FLIP_PERF_DISABLE);
+
+	if (IS_GEN(i915, 6)) {
+		/*
+		 * Required for the hardware to program scanline values for
+		 * waiting
+		 * WaEnableFlushTlbInvalidationMode:snb
+		 */
+		wa_masked_en(wal,
+			     GFX_MODE,
+			     GFX_TLB_INVALIDATE_EXPLICIT);
+
+		/*
+		 * From the Sandybridge PRM, volume 1 part 3, page 24:
+		 * "If this bit is set, STCunit will have LRA as replacement
+		 *  policy. [...] This bit must be reset. LRA replacement
+		 *  policy is not supported."
+		 */
+		wa_masked_dis(wal,
+			      CACHE_MODE_0,
+			      CM0_STC_EVICT_DISABLE_LRA_SNB);
+	}
+
+	if (IS_GEN_RANGE(i915, 4, 6))
+		/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+		wa_add(wal, MI_MODE,
+		       0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+		       /* XXX bit doesn't stick on Broadwater */
+		       IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
 }
 
 static void
@@ -1470,7 +1606,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 static void
 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
-	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
 		return;
 
 	if (engine->class == RENDER_CLASS)
@@ -1483,7 +1619,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
 {
 	struct i915_wa_list *wal = &engine->wa_list;
 
-	if (INTEL_GEN(engine->i915) < 8)
+	if (INTEL_GEN(engine->i915) < 4)
 		return;
 
 	wa_init_start(wal, "engine", engine->name);
@@ -1626,6 +1762,16 @@ static int engine_wa_list_verify(struct intel_context *ce,
 		goto err_vma;
 	}
 
+	i915_vma_lock(vma);
+	err = i915_request_await_object(rq, vma->obj, true);
+	if (err == 0)
+		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(vma);
+	if (err) {
+		i915_request_add(rq);
+		goto err_vma;
+	}
+
 	err = wa_list_srm(rq, wal, vma);
 	if (err)
 		goto err_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index e27ab1b..d166a71 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -13,8 +13,8 @@
 
 struct i915_wa {
 	i915_reg_t	reg;
-	u32		mask;
-	u32		val;
+	u32		clr;
+	u32		set;
 	u32		read;
 };
 
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 0000000..610ca76
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+	0x00000001, 0x26020128, 0x00000024, 0x00000000,
+	0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+	0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+	0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+	0x00600001, 0x20600061, 0x00000000, 0x00000000,
+	0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+	0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+	0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+	0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+	0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+	0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+	0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+	0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+	0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+	0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+	0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+	0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+	0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+	0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+	0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+	0x00000001, 0x20840021, 0x00000068, 0x00000000,
+	0x00000001, 0x20880061, 0x00000000, 0x00000003,
+	0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+	0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+	0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+	0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+	0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+	0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+	0x00000001, 0x26020128, 0x00000024, 0x00000000,
+	0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+	0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+	0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+	0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+	0x00200001, 0x20400121, 0x00450020, 0x00000000,
+	0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+	0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+	0x00800001, 0x20600061, 0x00000000, 0x00000000,
+	0x00800001, 0x20800061, 0x00000000, 0x00000000,
+	0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+	0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+	0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+	0x00800001, 0x21000061, 0x00000000, 0x00000000,
+	0x00800001, 0x21200061, 0x00000000, 0x00000000,
+	0x00800001, 0x21400061, 0x00000000, 0x00000000,
+	0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+	0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+	0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+	0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+	0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+	0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+	0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+	0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index f2806381..4a53ded 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -65,6 +65,9 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 		return NULL;
 	}
 	i915_active_init(&ring->vma->active, NULL, NULL);
+	__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
+	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
+	ring->vma->node.size = sz;
 
 	intel_ring_update_space(ring);
 
@@ -241,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(request, &engine->active.requests, sched.link) {
-		if (!i915_request_signaled(request))
-			dma_fence_set_error(&request->fence, -EIO);
-
+		i915_request_set_error_once(request, -EIO);
 		i915_request_mark_complete(request);
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 43d4d58..697114d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -142,6 +142,24 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
 	return err;
 }
 
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+				     unsigned long *saved)
+{
+	*saved = engine->props.heartbeat_interval_ms;
+	engine->props.heartbeat_interval_ms = 0;
+
+	intel_engine_pm_get(engine);
+	intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+				    unsigned long saved)
+{
+	intel_engine_pm_put(engine);
+
+	engine->props.heartbeat_interval_ms = saved;
+}
+
 static int live_idle_flush(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -152,9 +170,11 @@ static int live_idle_flush(void *arg)
 	/* Check that we can flush the idle barriers */
 
 	for_each_engine(engine, gt, id) {
-		intel_engine_pm_get(engine);
+		unsigned long heartbeat;
+
+		engine_heartbeat_disable(engine, &heartbeat);
 		err = __live_idle_pulse(engine, intel_engine_flush_barriers);
-		intel_engine_pm_put(engine);
+		engine_heartbeat_enable(engine, heartbeat);
 		if (err)
 			break;
 	}
@@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg)
 	/* Check that heartbeat pulses flush the idle barriers */
 
 	for_each_engine(engine, gt, id) {
-		intel_engine_pm_get(engine);
+		unsigned long heartbeat;
+
+		engine_heartbeat_disable(engine, &heartbeat);
 		err = __live_idle_pulse(engine, intel_engine_pulse);
-		intel_engine_pm_put(engine);
+		engine_heartbeat_enable(engine, heartbeat);
 		if (err && err != -ENODEV)
 			break;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3e5e6c8..2b2efff 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
 cancel_rq:
 	if (err) {
-		i915_request_skip(rq, err);
+		i915_request_set_error_once(rq, err);
 		i915_request_add(rq);
 	}
 unpin_hws:
@@ -1640,7 +1640,7 @@ static int igt_reset_engines_atomic(void *arg)
 	if (!intel_has_reset_engine(gt))
 		return 0;
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	igt_global_reset_lock(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index fd3770e..a912159 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -18,10 +18,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
 
 	wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
 
-	if (!get_ia_constants(llc, &consts)) {
-		err = -ENODEV;
+	if (!get_ia_constants(llc, &consts))
 		goto out_rpm;
-	}
 
 	for (gpu_freq = consts.min_gpu_freq;
 	     gpu_freq <= consts.max_gpu_freq;
@@ -71,10 +69,5 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
 
 int st_llc_verify(struct intel_llc *llc)
 {
-	int err = 0;
-
-	if (HAS_LLC(llc_to_gt(llc)->i915))
-		err = gen6_verify_ring_freq(llc);
-
-	return err;
+	return gen6_verify_ring_freq(llc);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index b292f8c..6f06ba7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -68,6 +68,71 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
 	engine->props.heartbeat_interval_ms = saved;
 }
 
+static int wait_for_submit(struct intel_engine_cs *engine,
+			   struct i915_request *rq,
+			   unsigned long timeout)
+{
+	timeout += jiffies;
+	do {
+		cond_resched();
+		intel_engine_flush_submission(engine);
+
+		if (READ_ONCE(engine->execlists.pending[0]))
+			continue;
+
+		if (i915_request_is_active(rq))
+			return 0;
+
+		if (i915_request_started(rq)) /* that was quick! */
+			return 0;
+	} while (time_before(jiffies, timeout));
+
+	return -ETIME;
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+			  struct i915_request *rq,
+			  unsigned long timeout)
+{
+	timeout += jiffies;
+
+	do {
+		cond_resched();
+		intel_engine_flush_submission(engine);
+
+		if (READ_ONCE(engine->execlists.pending[0]))
+			continue;
+
+		if (i915_request_completed(rq))
+			break;
+
+		if (READ_ONCE(rq->fence.error))
+			break;
+	} while (time_before(jiffies, timeout));
+
+	flush_scheduled_work();
+
+	if (rq->fence.error != -EIO) {
+		pr_err("%s: hanging request %llx:%lld not reset\n",
+		       engine->name,
+		       rq->fence.context,
+		       rq->fence.seqno);
+		return -EINVAL;
+	}
+
+	/* Give the request a jiffie to complete after flushing the worker */
+	if (i915_request_wait(rq, 0,
+			      max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+		pr_err("%s: hanging request %llx:%lld did not complete\n",
+		       engine->name,
+		       rq->fence.context,
+		       rq->fence.seqno);
+		return -ETIME;
+	}
+
+	return 0;
+}
+
 static int live_sanitycheck(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -285,6 +350,84 @@ static int live_unlite_preempt(void *arg)
 	return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
 }
 
+static int live_pin_rewind(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * We have to be careful not to trust intel_ring too much, for example
+	 * ring->head is updated upon retire which is out of sync with pinning
+	 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+	 * or else we risk writing an older, stale value.
+	 *
+	 * To simulate this, let's apply a bit of deliberate sabotague.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+		struct intel_ring *ring;
+		struct igt_live_test t;
+
+		if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+			err = -EIO;
+			break;
+		}
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		err = intel_context_pin(ce);
+		if (err) {
+			intel_context_put(ce);
+			break;
+		}
+
+		/* Keep the context awake while we play games */
+		err = i915_active_acquire(&ce->active);
+		if (err) {
+			intel_context_unpin(ce);
+			intel_context_put(ce);
+			break;
+		}
+		ring = ce->ring;
+
+		/* Poison the ring, and offset the next request from HEAD */
+		memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+		ring->emit = ring->size / 2;
+		ring->tail = ring->emit;
+		GEM_BUG_ON(ring->head);
+
+		intel_context_unpin(ce);
+
+		/* Submit a simple nop request */
+		GEM_BUG_ON(intel_context_is_pinned(ce));
+		rq = intel_context_create_request(ce);
+		i915_active_release(&ce->active); /* e.g. async retire */
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
+		GEM_BUG_ON(!rq->head);
+		i915_request_add(rq);
+
+		/* Expect not to hang! */
+		if (igt_live_test_end(&t)) {
+			err = -EIO;
+			break;
+		}
+	}
+
+	return err;
+}
+
 static int live_hold_reset(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -386,6 +529,152 @@ static int live_hold_reset(void *arg)
 	return err;
 }
 
+static const char *error_repr(int err)
+{
+	return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+	static const struct error_phase {
+		enum { GOOD = 0, BAD = -EIO } error[2];
+	} phases[] = {
+		{ { BAD,  GOOD } },
+		{ { BAD,  BAD  } },
+		{ { BAD,  GOOD } },
+		{ { GOOD, GOOD } }, /* sentinel */
+	};
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+	 * of invalid commands in user batches that will cause a GPU hang.
+	 * This is a faster mechanism than using hangcheck/heartbeats, but
+	 * only detects problems the HW knows about -- it will not warn when
+	 * we kill the HW!
+	 *
+	 * To verify our detection and reset, we throw some invalid commands
+	 * at the HW and wait for the interrupt.
+	 */
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		const struct error_phase *p;
+		unsigned long heartbeat;
+		int err = 0;
+
+		engine_heartbeat_disable(engine, &heartbeat);
+
+		for (p = phases; p->error[0] != GOOD; p++) {
+			struct i915_request *client[ARRAY_SIZE(phases->error)];
+			u32 *cs;
+			int i;
+
+			memset(client, 0, sizeof(*client));
+			for (i = 0; i < ARRAY_SIZE(client); i++) {
+				struct intel_context *ce;
+				struct i915_request *rq;
+
+				ce = intel_context_create(engine);
+				if (IS_ERR(ce)) {
+					err = PTR_ERR(ce);
+					goto out;
+				}
+
+				rq = intel_context_create_request(ce);
+				intel_context_put(ce);
+				if (IS_ERR(rq)) {
+					err = PTR_ERR(rq);
+					goto out;
+				}
+
+				if (rq->engine->emit_init_breadcrumb) {
+					err = rq->engine->emit_init_breadcrumb(rq);
+					if (err) {
+						i915_request_add(rq);
+						goto out;
+					}
+				}
+
+				cs = intel_ring_begin(rq, 2);
+				if (IS_ERR(cs)) {
+					i915_request_add(rq);
+					err = PTR_ERR(cs);
+					goto out;
+				}
+
+				if (p->error[i]) {
+					*cs++ = 0xdeadbeef;
+					*cs++ = 0xdeadbeef;
+				} else {
+					*cs++ = MI_NOOP;
+					*cs++ = MI_NOOP;
+				}
+
+				client[i] = i915_request_get(rq);
+				i915_request_add(rq);
+			}
+
+			err = wait_for_submit(engine, client[0], HZ / 2);
+			if (err) {
+				pr_err("%s: first request did not start within time!\n",
+				       engine->name);
+				err = -ETIME;
+				goto out;
+			}
+
+			for (i = 0; i < ARRAY_SIZE(client); i++) {
+				if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+					pr_debug("%s: %s request incomplete!\n",
+						 engine->name,
+						 error_repr(p->error[i]));
+
+				if (!i915_request_started(client[i])) {
+					pr_debug("%s: %s request not stated!\n",
+						 engine->name,
+						 error_repr(p->error[i]));
+					err = -ETIME;
+					goto out;
+				}
+
+				/* Kick the tasklet to process the error */
+				intel_engine_flush_submission(engine);
+				if (client[i]->fence.error != p->error[i]) {
+					pr_err("%s: %s request completed with wrong error code: %d\n",
+					       engine->name,
+					       error_repr(p->error[i]),
+					       client[i]->fence.error);
+					err = -EINVAL;
+					goto out;
+				}
+			}
+
+out:
+			for (i = 0; i < ARRAY_SIZE(client); i++)
+				if (client[i])
+					i915_request_put(client[i]);
+			if (err) {
+				pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+				       engine->name, p - phases,
+				       p->error[0], p->error[1]);
+				break;
+			}
+		}
+
+		engine_heartbeat_enable(engine, heartbeat);
+		if (err) {
+			intel_gt_set_wedged(gt);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
 static int
 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
 {
@@ -580,6 +869,10 @@ static int live_timeslice_preempt(void *arg)
 	if (err)
 		goto err_map;
 
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_pin;
+
 	for_each_prime_number_from(count, 1, 16) {
 		struct intel_engine_cs *engine;
 		enum intel_engine_id id;
@@ -614,6 +907,215 @@ static int live_timeslice_preempt(void *arg)
 	return err;
 }
 
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+		struct i915_request *wait,
+		void *slot, int idx)
+{
+	const u32 offset =
+		i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(slot);
+	struct i915_request *rq;
+	u32 *cs;
+	int err;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return rq;
+
+	if (wait) {
+		err = i915_request_await_dma_fence(rq, &wait->fence);
+		if (err)
+			goto err;
+	}
+
+	cs = intel_ring_begin(rq, 10);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_NOOP;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_NEQ_SDD;
+	*cs++ = 0;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = offset + idx * sizeof(u32);
+	*cs++ = 0;
+
+	intel_ring_advance(rq, cs);
+
+	rq->sched.attr.priority = I915_PRIORITY_MASK;
+	err = 0;
+err:
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (err) {
+		i915_request_put(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static int live_timeslice_rewind(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * The usual presumption on timeslice expiration is that we replace
+	 * the active context with another. However, given a chain of
+	 * dependencies we may end up with replacing the context with itself,
+	 * but only a few of those requests, forcing us to rewind the
+	 * RING_TAIL of the original request.
+	 */
+	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		enum { A1, A2, B1 };
+		enum { X = 1, Y, Z };
+		struct i915_request *rq[3] = {};
+		struct intel_context *ce;
+		unsigned long heartbeat;
+		unsigned long timeslice;
+		int i, err = 0;
+		u32 *slot;
+
+		if (!intel_engine_has_timeslices(engine))
+			continue;
+
+		/*
+		 * A:rq1 -- semaphore wait, timestamp X
+		 * A:rq2 -- write timestamp Y
+		 *
+		 * B:rq1 [await A:rq1] -- write timestamp Z
+		 *
+		 * Force timeslice, release semaphore.
+		 *
+		 * Expect execution/evaluation order XZY
+		 */
+
+		engine_heartbeat_disable(engine, &heartbeat);
+		timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+		slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto err;
+		}
+
+		rq[0] = create_rewinder(ce, NULL, slot, 1);
+		if (IS_ERR(rq[0])) {
+			intel_context_put(ce);
+			goto err;
+		}
+
+		rq[1] = create_rewinder(ce, NULL, slot, 2);
+		intel_context_put(ce);
+		if (IS_ERR(rq[1]))
+			goto err;
+
+		err = wait_for_submit(engine, rq[1], HZ / 2);
+		if (err) {
+			pr_err("%s: failed to submit first context\n",
+			       engine->name);
+			goto err;
+		}
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto err;
+		}
+
+		rq[2] = create_rewinder(ce, rq[0], slot, 3);
+		intel_context_put(ce);
+		if (IS_ERR(rq[2]))
+			goto err;
+
+		err = wait_for_submit(engine, rq[2], HZ / 2);
+		if (err) {
+			pr_err("%s: failed to submit second context\n",
+			       engine->name);
+			goto err;
+		}
+		GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
+
+		/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+		GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+		GEM_BUG_ON(!i915_request_is_active(rq[A2]));
+		GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+
+		/* Wait for the timeslice to kick in */
+		del_timer(&engine->execlists.timer);
+		tasklet_hi_schedule(&engine->execlists.tasklet);
+		intel_engine_flush_submission(engine);
+
+		/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+		GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+		GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+		GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+		/* Release the hounds! */
+		slot[0] = 1;
+		wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+		for (i = 1; i <= 3; i++) {
+			unsigned long timeout = jiffies + HZ / 2;
+
+			while (!READ_ONCE(slot[i]) &&
+			       time_before(jiffies, timeout))
+				;
+
+			if (!time_before(jiffies, timeout)) {
+				pr_err("%s: rq[%d] timed out\n",
+				       engine->name, i - 1);
+				err = -ETIME;
+				goto err;
+			}
+
+			pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+		}
+
+		/* XZY: XZ < XY */
+		if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+			pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+			       engine->name,
+			       slot[Z] - slot[X],
+			       slot[Y] - slot[X]);
+			err = -EINVAL;
+		}
+
+err:
+		memset32(&slot[0], -1, 4);
+		wmb();
+
+		engine->props.timeslice_duration_ms = timeslice;
+		engine_heartbeat_enable(engine, heartbeat);
+		for (i = 0; i < 3; i++)
+			i915_request_put(rq[i]);
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static struct i915_request *nop_request(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq;
@@ -628,21 +1130,6 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
 	return rq;
 }
 
-static int wait_for_submit(struct intel_engine_cs *engine,
-			   struct i915_request *rq,
-			   unsigned long timeout)
-{
-	timeout += jiffies;
-	do {
-		cond_resched();
-		intel_engine_flush_submission(engine);
-		if (i915_request_is_active(rq))
-			return 0;
-	} while (time_before(jiffies, timeout));
-
-	return -ETIME;
-}
-
 static long timeslice_threshold(const struct intel_engine_cs *engine)
 {
 	return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
@@ -688,6 +1175,10 @@ static int live_timeslice_queue(void *arg)
 	if (err)
 		goto err_map;
 
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_pin;
+
 	for_each_engine(engine, gt, id) {
 		struct i915_sched_attr attr = {
 			.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
@@ -774,6 +1265,7 @@ static int live_timeslice_queue(void *arg)
 			break;
 	}
 
+err_pin:
 	i915_vma_unpin(vma);
 err_map:
 	i915_gem_object_unpin_map(obj);
@@ -832,6 +1324,10 @@ static int live_busywait_preempt(void *arg)
 	if (err)
 		goto err_map;
 
+	err = i915_vma_sync(vma);
+	if (err)
+		goto err_vma;
+
 	for_each_engine(engine, gt, id) {
 		struct i915_request *lo, *hi;
 		struct igt_live_test t;
@@ -1352,14 +1848,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 	if (err)
 		goto out;
 
-	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
-		err = -EIO;
-		goto out;
-	}
-
-	if (rq->fence.error != -EIO) {
-		pr_err("Cancelled inflight0 request did not report -EIO\n");
-		err = -EINVAL;
+	err = wait_for_reset(arg->engine, rq, HZ / 2);
+	if (err) {
+		pr_err("Cancelled inflight0 request did not reset\n");
 		goto out;
 	}
 
@@ -1417,10 +1908,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 
 	igt_spinner_end(&arg->a.spin);
-	if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
-		err = -EIO;
+	err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+	if (err)
 		goto out;
-	}
 
 	if (rq[0]->fence.error != 0) {
 		pr_err("Normal inflight0 request did not complete\n");
@@ -1500,10 +1990,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 	if (err)
 		goto out;
 
-	if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
-		err = -EIO;
+	err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+	if (err)
 		goto out;
-	}
 
 	if (rq[0]->fence.error != -EIO) {
 		pr_err("Cancelled inflight0 request did not report -EIO\n");
@@ -1561,14 +2050,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 	if (err)
 		goto out;
 
-	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
-		err = -EIO;
-		goto out;
-	}
-
-	if (rq->fence.error != -EIO) {
-		pr_err("Cancelled inflight0 request did not report -EIO\n");
-		err = -EINVAL;
+	err = wait_for_reset(arg->engine, rq, HZ / 2);
+	if (err) {
+		pr_err("Cancelled inflight0 request did not reset\n");
 		goto out;
 	}
 
@@ -1656,7 +2140,7 @@ static int live_suppress_self_preempt(void *arg)
 	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
 		return 0;
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0; /* presume black blox */
 
 	if (intel_vgpu_active(gt->i915))
@@ -2279,117 +2763,6 @@ static int live_preempt_gang(void *arg)
 	return 0;
 }
 
-static int live_preempt_hang(void *arg)
-{
-	struct intel_gt *gt = arg;
-	struct i915_gem_context *ctx_hi, *ctx_lo;
-	struct igt_spinner spin_hi, spin_lo;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = -ENOMEM;
-
-	if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
-		return 0;
-
-	if (!intel_has_reset_engine(gt))
-		return 0;
-
-	if (igt_spinner_init(&spin_hi, gt))
-		return -ENOMEM;
-
-	if (igt_spinner_init(&spin_lo, gt))
-		goto err_spin_hi;
-
-	ctx_hi = kernel_context(gt->i915);
-	if (!ctx_hi)
-		goto err_spin_lo;
-	ctx_hi->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
-	ctx_lo = kernel_context(gt->i915);
-	if (!ctx_lo)
-		goto err_ctx_hi;
-	ctx_lo->sched.priority =
-		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
-	for_each_engine(engine, gt, id) {
-		struct i915_request *rq;
-
-		if (!intel_engine_has_preemption(engine))
-			continue;
-
-		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
-					    MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		i915_request_add(rq);
-		if (!igt_wait_for_spinner(&spin_lo, rq)) {
-			GEM_TRACE("lo spinner failed to start\n");
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
-					    MI_ARB_CHECK);
-		if (IS_ERR(rq)) {
-			igt_spinner_end(&spin_lo);
-			err = PTR_ERR(rq);
-			goto err_ctx_lo;
-		}
-
-		init_completion(&engine->execlists.preempt_hang.completion);
-		engine->execlists.preempt_hang.inject_hang = true;
-
-		i915_request_add(rq);
-
-		if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
-						 HZ / 10)) {
-			pr_err("Preemption did not occur within timeout!");
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-		intel_engine_reset(engine, NULL);
-		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-
-		engine->execlists.preempt_hang.inject_hang = false;
-
-		if (!igt_wait_for_spinner(&spin_hi, rq)) {
-			GEM_TRACE("hi spinner failed to start\n");
-			GEM_TRACE_DUMP();
-			intel_gt_set_wedged(gt);
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-
-		igt_spinner_end(&spin_hi);
-		igt_spinner_end(&spin_lo);
-		if (igt_flush_test(gt->i915)) {
-			err = -EIO;
-			goto err_ctx_lo;
-		}
-	}
-
-	err = 0;
-err_ctx_lo:
-	kernel_context_close(ctx_lo);
-err_ctx_hi:
-	kernel_context_close(ctx_hi);
-err_spin_lo:
-	igt_spinner_fini(&spin_lo);
-err_spin_hi:
-	igt_spinner_fini(&spin_hi);
-	return err;
-}
-
 static int live_preempt_timeout(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -2882,7 +3255,7 @@ static int live_virtual_engine(void *arg)
 	unsigned int class, inst;
 	int err;
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	for_each_engine(engine, gt, id) {
@@ -3015,7 +3388,7 @@ static int live_virtual_mask(void *arg)
 	unsigned int class, inst;
 	int err;
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3055,6 +3428,10 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	if (IS_ERR(scratch))
 		return PTR_ERR(scratch);
 
+	err = i915_vma_sync(scratch);
+	if (err)
+		goto out_scratch;
+
 	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
@@ -3153,7 +3530,7 @@ static int live_virtual_preserved(void *arg)
 	 * are preserved.
 	 */
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	/* As we use CS_GPR we cannot run before they existed on all engines. */
@@ -3243,15 +3620,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
 	rq[0] = ERR_PTR(-ENOMEM);
 	for_each_engine(master, gt, id) {
 		struct i915_sw_fence fence = {};
+		struct intel_context *ce;
 
 		if (master->class == class)
 			continue;
 
+		ce = intel_context_create(master);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
 		memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
 
-		rq[0] = igt_spinner_create_request(&spin,
-						   master->kernel_context,
-						   MI_NOOP);
+		rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+		intel_context_put(ce);
 		if (IS_ERR(rq[0])) {
 			err = PTR_ERR(rq[0]);
 			goto out;
@@ -3377,7 +3760,7 @@ static int live_virtual_bond(void *arg)
 	unsigned int class, inst;
 	int err;
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3538,7 +3921,7 @@ static int live_virtual_reset(void *arg)
 	 * forgotten.
 	 */
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	if (!intel_has_reset_engine(gt))
@@ -3571,8 +3954,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_sanitycheck),
 		SUBTEST(live_unlite_switch),
 		SUBTEST(live_unlite_preempt),
+		SUBTEST(live_pin_rewind),
 		SUBTEST(live_hold_reset),
+		SUBTEST(live_error_interrupt),
 		SUBTEST(live_timeslice_preempt),
+		SUBTEST(live_timeslice_rewind),
 		SUBTEST(live_timeslice_queue),
 		SUBTEST(live_busywait_preempt),
 		SUBTEST(live_preempt),
@@ -3583,7 +3969,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_suppress_wait_preempt),
 		SUBTEST(live_chain_preempt),
 		SUBTEST(live_preempt_gang),
-		SUBTEST(live_preempt_hang),
 		SUBTEST(live_preempt_timeout),
 		SUBTEST(live_preempt_smoke),
 		SUBTEST(live_virtual_engine),
@@ -3631,6 +4016,62 @@ static void hexdump(const void *buf, size_t len)
 	}
 }
 
+static int emit_semaphore_signal(struct intel_context *ce, void *slot)
+{
+	const u32 offset =
+		i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(slot);
+	struct i915_request *rq;
+	u32 *cs;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return PTR_ERR(cs);
+	}
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = offset;
+	*cs++ = 0;
+	*cs++ = 1;
+
+	intel_ring_advance(rq, cs);
+
+	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+	i915_request_add(rq);
+	return 0;
+}
+
+static int context_flush(struct intel_context *ce, long timeout)
+{
+	struct i915_request *rq;
+	struct dma_fence *fence;
+	int err = 0;
+
+	rq = intel_engine_create_kernel_request(ce->engine);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	fence = i915_active_fence_get(&ce->timeline->last_request);
+	if (fence) {
+		i915_request_await_dma_fence(rq, fence);
+		dma_fence_put(fence);
+	}
+
+	rq = i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, timeout) < 0)
+		err = -ETIME;
+	i915_request_put(rq);
+
+	rmb(); /* We know the request is written, make sure all state is too! */
+	return err;
+}
+
 static int live_lrc_layout(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -3797,6 +4238,11 @@ static int live_lrc_fixed(void *arg)
 				CTX_BB_STATE - 1,
 				"BB_STATE"
 			},
+			{
+				i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
+				CTX_TIMESTAMP - 1,
+				"RING_CTX_TIMESTAMP"
+			},
 			{ },
 		}, *t;
 		u32 *hw;
@@ -3880,8 +4326,16 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
 	*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
 	*cs++ = 0;
 
+	i915_vma_lock(scratch);
+	err = i915_request_await_object(rq, scratch->obj, true);
+	if (!err)
+		err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(scratch);
+
 	i915_request_get(rq);
 	i915_request_add(rq);
+	if (err)
+		goto err_rq;
 
 	intel_engine_flush_submission(engine);
 	expected[RING_TAIL_IDX] = ce->ring->tail;
@@ -3947,13 +4401,13 @@ static int live_lrc_state(void *arg)
 	return err;
 }
 
-static int gpr_make_dirty(struct intel_engine_cs *engine)
+static int gpr_make_dirty(struct intel_context *ce)
 {
 	struct i915_request *rq;
 	u32 *cs;
 	int n;
 
-	rq = intel_engine_create_kernel_request(engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -3965,20 +4419,79 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
 
 	*cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
 	for (n = 0; n < NUM_GPR_DW; n++) {
-		*cs++ = CS_GPR(engine, n);
+		*cs++ = CS_GPR(ce->engine, n);
 		*cs++ = STACK_MAGIC;
 	}
 	*cs++ = MI_NOOP;
 
 	intel_ring_advance(rq, cs);
+
+	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
 	i915_request_add(rq);
 
 	return 0;
 }
 
-static int __live_gpr_clear(struct intel_engine_cs *engine,
-			    struct i915_vma *scratch)
+static struct i915_request *
+__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
 {
+	const u32 offset =
+		i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(slot);
+	struct i915_request *rq;
+	u32 *cs;
+	int err;
+	int n;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return rq;
+
+	cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
+	if (IS_ERR(cs)) {
+		i915_request_add(rq);
+		return ERR_CAST(cs);
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_NOOP;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_NEQ_SDD;
+	*cs++ = 0;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	for (n = 0; n < NUM_GPR_DW; n++) {
+		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+		*cs++ = CS_GPR(ce->engine, n);
+		*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+		*cs++ = 0;
+	}
+
+	i915_vma_lock(scratch);
+	err = i915_request_await_object(rq, scratch->obj, true);
+	if (!err)
+		err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(scratch);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (err) {
+		i915_request_put(rq);
+		rq = ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+static int __live_lrc_gpr(struct intel_engine_cs *engine,
+			  struct i915_vma *scratch,
+			  bool preempt)
+{
+	u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
 	struct intel_context *ce;
 	struct i915_request *rq;
 	u32 *cs;
@@ -3988,7 +4501,7 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
 	if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
 		return 0; /* GPR only on rcs0 for gen8 */
 
-	err = gpr_make_dirty(engine);
+	err = gpr_make_dirty(engine->kernel_context);
 	if (err)
 		return err;
 
@@ -3996,28 +4509,28 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
-	rq = intel_context_create_request(ce);
+	rq = __gpr_read(ce, scratch, slot);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_put;
 	}
 
-	cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
-	if (IS_ERR(cs)) {
-		err = PTR_ERR(cs);
-		i915_request_add(rq);
-		goto err_put;
-	}
+	err = wait_for_submit(engine, rq, HZ / 2);
+	if (err)
+		goto err_rq;
 
-	for (n = 0; n < NUM_GPR_DW; n++) {
-		*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
-		*cs++ = CS_GPR(engine, n);
-		*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
-		*cs++ = 0;
-	}
+	if (preempt) {
+		err = gpr_make_dirty(engine->kernel_context);
+		if (err)
+			goto err_rq;
 
-	i915_request_get(rq);
-	i915_request_add(rq);
+		err = emit_semaphore_signal(engine->kernel_context, slot);
+		if (err)
+			goto err_rq;
+	} else {
+		slot[0] = 1;
+		wmb();
+	}
 
 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
 		err = -ETIME;
@@ -4044,13 +4557,15 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
 	i915_gem_object_unpin_map(scratch->obj);
 
 err_rq:
+	memset32(&slot[0], -1, 4);
+	wmb();
 	i915_request_put(rq);
 err_put:
 	intel_context_put(ce);
 	return err;
 }
 
-static int live_gpr_clear(void *arg)
+static int live_lrc_gpr(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
@@ -4068,7 +4583,971 @@ static int live_gpr_clear(void *arg)
 		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_gpr_clear(engine, scratch);
+		unsigned long heartbeat;
+
+		engine_heartbeat_disable(engine, &heartbeat);
+
+		err = __live_lrc_gpr(engine, scratch, false);
+		if (err)
+			goto err;
+
+		err = __live_lrc_gpr(engine, scratch, true);
+		if (err)
+			goto err;
+
+err:
+		engine_heartbeat_enable(engine, heartbeat);
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			break;
+	}
+
+	i915_vma_unpin_and_release(&scratch, 0);
+	return err;
+}
+
+static struct i915_request *
+create_timestamp(struct intel_context *ce, void *slot, int idx)
+{
+	const u32 offset =
+		i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(slot);
+	struct i915_request *rq;
+	u32 *cs;
+	int err;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return rq;
+
+	cs = intel_ring_begin(rq, 10);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_NOOP;
+
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_NEQ_SDD;
+	*cs++ = 0;
+	*cs++ = offset;
+	*cs++ = 0;
+
+	*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+	*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
+	*cs++ = offset + idx * sizeof(u32);
+	*cs++ = 0;
+
+	intel_ring_advance(rq, cs);
+
+	rq->sched.attr.priority = I915_PRIORITY_MASK;
+	err = 0;
+err:
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (err) {
+		i915_request_put(rq);
+		return ERR_PTR(err);
+	}
+
+	return rq;
+}
+
+struct lrc_timestamp {
+	struct intel_engine_cs *engine;
+	struct intel_context *ce[2];
+	u32 poison;
+};
+
+static bool timestamp_advanced(u32 start, u32 end)
+{
+	return (s32)(end - start) > 0;
+}
+
+static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
+{
+	u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
+	struct i915_request *rq;
+	u32 timestamp;
+	int err = 0;
+
+	arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
+	rq = create_timestamp(arg->ce[0], slot, 1);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	err = wait_for_submit(rq->engine, rq, HZ / 2);
+	if (err)
+		goto err;
+
+	if (preempt) {
+		arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
+		err = emit_semaphore_signal(arg->ce[1], slot);
+		if (err)
+			goto err;
+	} else {
+		slot[0] = 1;
+		wmb();
+	}
+
+	/* And wait for switch to kernel (to save our context to memory) */
+	err = context_flush(arg->ce[0], HZ / 2);
+	if (err)
+		goto err;
+
+	if (!timestamp_advanced(arg->poison, slot[1])) {
+		pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
+		       arg->engine->name, preempt ? "preempt" : "simple",
+		       arg->poison, slot[1]);
+		err = -EINVAL;
+	}
+
+	timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
+	if (!timestamp_advanced(slot[1], timestamp)) {
+		pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
+		       arg->engine->name, preempt ? "preempt" : "simple",
+		       slot[1], timestamp);
+		err = -EINVAL;
+	}
+
+err:
+	memset32(slot, -1, 4);
+	i915_request_put(rq);
+	return err;
+}
+
+static int live_lrc_timestamp(void *arg)
+{
+	struct lrc_timestamp data = {};
+	struct intel_gt *gt = arg;
+	enum intel_engine_id id;
+	const u32 poison[] = {
+		0,
+		S32_MAX,
+		(u32)S32_MAX + 1,
+		U32_MAX,
+	};
+
+	/*
+	 * We want to verify that the timestamp is saved and restore across
+	 * context switches and is monotonic.
+	 *
+	 * So we do this with a little bit of LRC poisoning to check various
+	 * boundary conditions, and see what happens if we preempt the context
+	 * with a second request (carrying more poison into the timestamp).
+	 */
+
+	for_each_engine(data.engine, gt, id) {
+		unsigned long heartbeat;
+		int i, err = 0;
+
+		engine_heartbeat_disable(data.engine, &heartbeat);
+
+		for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+			struct intel_context *tmp;
+
+			tmp = intel_context_create(data.engine);
+			if (IS_ERR(tmp)) {
+				err = PTR_ERR(tmp);
+				goto err;
+			}
+
+			err = intel_context_pin(tmp);
+			if (err) {
+				intel_context_put(tmp);
+				goto err;
+			}
+
+			data.ce[i] = tmp;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(poison); i++) {
+			data.poison = poison[i];
+
+			err = __lrc_timestamp(&data, false);
+			if (err)
+				break;
+
+			err = __lrc_timestamp(&data, true);
+			if (err)
+				break;
+		}
+
+err:
+		engine_heartbeat_enable(data.engine, heartbeat);
+		for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+			if (!data.ce[i])
+				break;
+
+			intel_context_unpin(data.ce[i]);
+			intel_context_put(data.ce[i]);
+		}
+
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	int err;
+
+	obj = i915_gem_object_create_internal(vm->i915, size);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err) {
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	return vma;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+	struct i915_vma *batch;
+	u32 dw, x, *cs, *hw;
+
+	batch = create_user_vma(ce->vm, SZ_64K);
+	if (IS_ERR(batch))
+		return batch;
+
+	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+	if (IS_ERR(cs)) {
+		i915_vma_put(batch);
+		return ERR_CAST(cs);
+	}
+
+	x = 0;
+	dw = 0;
+	hw = ce->engine->pinned_default_state;
+	hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+	do {
+		u32 len = hw[dw] & 0x7f;
+
+		if (hw[dw] == 0) {
+			dw++;
+			continue;
+		}
+
+		if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+			dw += len + 2;
+			continue;
+		}
+
+		dw++;
+		len = (len + 1) / 2;
+		while (len--) {
+			*cs++ = MI_STORE_REGISTER_MEM_GEN8;
+			*cs++ = hw[dw];
+			*cs++ = lower_32_bits(scratch->node.start + x);
+			*cs++ = upper_32_bits(scratch->node.start + x);
+
+			dw += 2;
+			x += 4;
+		}
+	} while (dw < PAGE_SIZE / sizeof(u32) &&
+		 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+	*cs++ = MI_BATCH_BUFFER_END;
+
+	i915_gem_object_flush_map(batch->obj);
+	i915_gem_object_unpin_map(batch->obj);
+
+	return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+			  struct i915_vma *vma,
+			  unsigned int flags)
+{
+	int err;
+
+	i915_vma_lock(vma);
+	err = i915_request_await_object(rq, vma->obj, flags);
+	if (!err)
+		err = i915_vma_move_to_active(vma, rq, flags);
+	i915_vma_unlock(vma);
+
+	return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+		 struct i915_vma *before,
+		 struct i915_vma *after,
+		 u32 *sema)
+{
+	struct i915_vma *b_before, *b_after;
+	struct i915_request *rq;
+	u32 *cs;
+	int err;
+
+	b_before = store_context(ce, before);
+	if (IS_ERR(b_before))
+		return ERR_CAST(b_before);
+
+	b_after = store_context(ce, after);
+	if (IS_ERR(b_after)) {
+		rq = ERR_CAST(b_after);
+		goto err_before;
+	}
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		goto err_after;
+
+	err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+	if (err)
+		goto err_rq;
+
+	err = move_to_active(rq, b_before, 0);
+	if (err)
+		goto err_rq;
+
+	err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+	if (err)
+		goto err_rq;
+
+	err = move_to_active(rq, b_after, 0);
+	if (err)
+		goto err_rq;
+
+	cs = intel_ring_begin(rq, 14);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_rq;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+	*cs++ = lower_32_bits(b_before->node.start);
+	*cs++ = upper_32_bits(b_before->node.start);
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+	*cs++ = MI_SEMAPHORE_WAIT |
+		MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_POLL |
+		MI_SEMAPHORE_SAD_NEQ_SDD;
+	*cs++ = 0;
+	*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(sema);
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+	*cs++ = lower_32_bits(b_after->node.start);
+	*cs++ = upper_32_bits(b_after->node.start);
+
+	intel_ring_advance(rq, cs);
+
+	WRITE_ONCE(*sema, 0);
+	i915_request_get(rq);
+	i915_request_add(rq);
+err_after:
+	i915_vma_put(b_after);
+err_before:
+	i915_vma_put(b_before);
+	return rq;
+
+err_rq:
+	i915_request_add(rq);
+	rq = ERR_PTR(err);
+	goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+	struct i915_vma *batch;
+	u32 dw, *cs, *hw;
+
+	batch = create_user_vma(ce->vm, SZ_64K);
+	if (IS_ERR(batch))
+		return batch;
+
+	cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+	if (IS_ERR(cs)) {
+		i915_vma_put(batch);
+		return ERR_CAST(cs);
+	}
+
+	dw = 0;
+	hw = ce->engine->pinned_default_state;
+	hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+	do {
+		u32 len = hw[dw] & 0x7f;
+
+		if (hw[dw] == 0) {
+			dw++;
+			continue;
+		}
+
+		if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+			dw += len + 2;
+			continue;
+		}
+
+		dw++;
+		len = (len + 1) / 2;
+		*cs++ = MI_LOAD_REGISTER_IMM(len);
+		while (len--) {
+			*cs++ = hw[dw];
+			*cs++ = poison;
+			dw += 2;
+		}
+	} while (dw < PAGE_SIZE / sizeof(u32) &&
+		 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+	*cs++ = MI_BATCH_BUFFER_END;
+
+	i915_gem_object_flush_map(batch->obj);
+	i915_gem_object_unpin_map(batch->obj);
+
+	return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+	struct i915_request *rq;
+	struct i915_vma *batch;
+	u32 *cs;
+	int err;
+
+	batch = load_context(ce, poison);
+	if (IS_ERR(batch))
+		return PTR_ERR(batch);
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_batch;
+	}
+
+	err = move_to_active(rq, batch, 0);
+	if (err)
+		goto err_rq;
+
+	cs = intel_ring_begin(rq, 8);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_rq;
+	}
+
+	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+	*cs++ = lower_32_bits(batch->node.start);
+	*cs++ = upper_32_bits(batch->node.start);
+
+	*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+	*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+		offset_in_page(sema);
+	*cs++ = 0;
+	*cs++ = 1;
+
+	intel_ring_advance(rq, cs);
+
+	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+	i915_request_add(rq);
+err_batch:
+	i915_vma_put(batch);
+	return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+	return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+			     struct i915_vma *ref[2],
+			     struct i915_vma *result[2],
+			     struct intel_context *ce,
+			     u32 poison)
+{
+	u32 x, dw, *hw, *lrc;
+	u32 *A[2], *B[2];
+	int err = 0;
+
+	A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+	if (IS_ERR(A[0]))
+		return PTR_ERR(A[0]);
+
+	A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+	if (IS_ERR(A[1])) {
+		err = PTR_ERR(A[1]);
+		goto err_A0;
+	}
+
+	B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+	if (IS_ERR(B[0])) {
+		err = PTR_ERR(B[0]);
+		goto err_A1;
+	}
+
+	B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+	if (IS_ERR(B[1])) {
+		err = PTR_ERR(B[1]);
+		goto err_B0;
+	}
+
+	lrc = i915_gem_object_pin_map(ce->state->obj,
+				      i915_coherent_map_type(engine->i915));
+	if (IS_ERR(lrc)) {
+		err = PTR_ERR(lrc);
+		goto err_B1;
+	}
+	lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+	x = 0;
+	dw = 0;
+	hw = engine->pinned_default_state;
+	hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+	do {
+		u32 len = hw[dw] & 0x7f;
+
+		if (hw[dw] == 0) {
+			dw++;
+			continue;
+		}
+
+		if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+			dw += len + 2;
+			continue;
+		}
+
+		dw++;
+		len = (len + 1) / 2;
+		while (len--) {
+			if (!is_moving(A[0][x], A[1][x]) &&
+			    (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+				switch (hw[dw] & 4095) {
+				case 0x30: /* RING_HEAD */
+				case 0x34: /* RING_TAIL */
+					break;
+
+				default:
+					pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+					       engine->name, dw,
+					       hw[dw], hw[dw + 1],
+					       A[0][x], B[0][x], B[1][x],
+					       poison, lrc[dw + 1]);
+					err = -EINVAL;
+					break;
+				}
+			}
+			dw += 2;
+			x++;
+		}
+	} while (dw < PAGE_SIZE / sizeof(u32) &&
+		 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+	i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+	i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+	i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+	i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+	i915_gem_object_unpin_map(ref[0]->obj);
+	return err;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+	u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+	struct i915_vma *ref[2], *result[2];
+	struct intel_context *A, *B;
+	struct i915_request *rq;
+	int err;
+
+	A = intel_context_create(engine);
+	if (IS_ERR(A))
+		return PTR_ERR(A);
+
+	B = intel_context_create(engine);
+	if (IS_ERR(B)) {
+		err = PTR_ERR(B);
+		goto err_A;
+	}
+
+	ref[0] = create_user_vma(A->vm, SZ_64K);
+	if (IS_ERR(ref[0])) {
+		err = PTR_ERR(ref[0]);
+		goto err_B;
+	}
+
+	ref[1] = create_user_vma(A->vm, SZ_64K);
+	if (IS_ERR(ref[1])) {
+		err = PTR_ERR(ref[1]);
+		goto err_ref0;
+	}
+
+	rq = record_registers(A, ref[0], ref[1], sema);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_ref1;
+	}
+
+	WRITE_ONCE(*sema, 1);
+	wmb();
+
+	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+		i915_request_put(rq);
+		err = -ETIME;
+		goto err_ref1;
+	}
+	i915_request_put(rq);
+
+	result[0] = create_user_vma(A->vm, SZ_64K);
+	if (IS_ERR(result[0])) {
+		err = PTR_ERR(result[0]);
+		goto err_ref1;
+	}
+
+	result[1] = create_user_vma(A->vm, SZ_64K);
+	if (IS_ERR(result[1])) {
+		err = PTR_ERR(result[1]);
+		goto err_result0;
+	}
+
+	rq = record_registers(A, result[0], result[1], sema);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_result1;
+	}
+
+	err = poison_registers(B, poison, sema);
+	if (err) {
+		WRITE_ONCE(*sema, -1);
+		i915_request_put(rq);
+		goto err_result1;
+	}
+
+	if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+		i915_request_put(rq);
+		err = -ETIME;
+		goto err_result1;
+	}
+	i915_request_put(rq);
+
+	err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+	i915_vma_put(result[1]);
+err_result0:
+	i915_vma_put(result[0]);
+err_ref1:
+	i915_vma_put(ref[1]);
+err_ref0:
+	i915_vma_put(ref[0]);
+err_B:
+	intel_context_put(B);
+err_A:
+	intel_context_put(A);
+	return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+	if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+		return true;
+
+	if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+		return true;
+
+	return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	const u32 poison[] = {
+		STACK_MAGIC,
+		0x3a3a3a3a,
+		0x5c5c5c5c,
+		0xffffffff,
+		0xffff0000,
+	};
+
+	/*
+	 * Our goal is try and verify that per-context state cannot be
+	 * tampered with by another non-privileged client.
+	 *
+	 * We take the list of context registers from the LRI in the default
+	 * context image and attempt to modify that list from a remote context.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		int err = 0;
+		int i;
+
+		/* Just don't even ask */
+		if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+		    skip_isolation(engine))
+			continue;
+
+		intel_engine_pm_get(engine);
+		if (engine->pinned_default_state) {
+			for (i = 0; i < ARRAY_SIZE(poison); i++) {
+				err = __lrc_isolation(engine, poison[i]);
+				if (err)
+					break;
+
+				err = __lrc_isolation(engine, ~poison[i]);
+				if (err)
+					break;
+			}
+		}
+		intel_engine_pm_put(engine);
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+			  struct i915_request *rq)
+{
+	const unsigned int bit = I915_RESET_ENGINE + engine->id;
+	unsigned long *lock = &engine->gt->reset.flags;
+
+	if (test_and_set_bit(bit, lock))
+		return;
+
+	tasklet_disable(&engine->execlists.tasklet);
+
+	if (!rq->fence.error)
+		intel_engine_reset(engine, NULL);
+
+	tasklet_enable(&engine->execlists.tasklet);
+	clear_and_wake_up_bit(bit, lock);
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+				    struct rnd_state *prng)
+{
+	struct i915_request *rq;
+	int err;
+
+	err = intel_context_pin(ce);
+	if (err)
+		return ERR_PTR(err);
+
+	prandom_bytes_state(prng,
+			    ce->lrc_reg_state,
+			    ce->engine->context_size -
+			    LRC_STATE_PN * PAGE_SIZE);
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_unpin;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	return rq;
+
+err_unpin:
+	intel_context_unpin(ce);
+	return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+	struct intel_context *ce;
+	struct i915_request *hang;
+	int err = 0;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	hang = garbage(ce, prng);
+	if (IS_ERR(hang)) {
+		err = PTR_ERR(hang);
+		goto err_ce;
+	}
+
+	if (wait_for_submit(engine, hang, HZ / 2)) {
+		i915_request_put(hang);
+		err = -ETIME;
+		goto err_ce;
+	}
+
+	intel_context_set_banned(ce);
+	garbage_reset(engine, hang);
+
+	intel_engine_flush_submission(engine);
+	if (!hang->fence.error) {
+		i915_request_put(hang);
+		pr_err("%s: corrupted context was not reset\n",
+		       engine->name);
+		err = -EINVAL;
+		goto err_ce;
+	}
+
+	if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+		pr_err("%s: corrupted context did not recover\n",
+		       engine->name);
+		i915_request_put(hang);
+		err = -EIO;
+		goto err_ce;
+	}
+	i915_request_put(hang);
+
+err_ce:
+	intel_context_put(ce);
+	return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * Verify that we can recover if one context state is completely
+	 * corrupted.
+	 */
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+		return 0;
+
+	for_each_engine(engine, gt, id) {
+		I915_RND_STATE(prng);
+		int err = 0, i;
+
+		if (!intel_has_reset_engine(engine->gt))
+			continue;
+
+		intel_engine_pm_get(engine);
+		for (i = 0; i < 3; i++) {
+			err = __lrc_garbage(engine, &prng);
+			if (err)
+				break;
+		}
+		intel_engine_pm_put(engine);
+
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	IGT_TIMEOUT(end_time);
+	int err;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	ce->runtime.num_underflow = 0;
+	ce->runtime.max_underflow = 0;
+
+	do {
+		unsigned int loop = 1024;
+
+		while (loop) {
+			rq = intel_context_create_request(ce);
+			if (IS_ERR(rq)) {
+				err = PTR_ERR(rq);
+				goto err_rq;
+			}
+
+			if (--loop == 0)
+				i915_request_get(rq);
+
+			i915_request_add(rq);
+		}
+
+		if (__igt_timeout(end_time, NULL))
+			break;
+
+		i915_request_put(rq);
+	} while (1);
+
+	err = i915_request_wait(rq, 0, HZ / 5);
+	if (err < 0) {
+		pr_err("%s: request not completed!\n", engine->name);
+		goto err_wait;
+	}
+
+	igt_flush_test(engine->i915);
+
+	pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
+		engine->name,
+		intel_context_get_total_runtime_ns(ce),
+		intel_context_get_avg_runtime_ns(ce));
+
+	err = 0;
+	if (ce->runtime.num_underflow) {
+		pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
+		       engine->name,
+		       ce->runtime.num_underflow,
+		       ce->runtime.max_underflow);
+		GEM_TRACE_DUMP();
+		err = -EOVERFLOW;
+	}
+
+err_wait:
+	i915_request_put(rq);
+err_rq:
+	intel_context_put(ce);
+	return err;
+}
+
+static int live_pphwsp_runtime(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * Check that cumulative context runtime as stored in the pphwsp[16]
+	 * is monotonic.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		err = __live_pphwsp_runtime(engine);
 		if (err)
 			break;
 	}
@@ -4076,7 +5555,6 @@ static int live_gpr_clear(void *arg)
 	if (igt_flush_test(gt->i915))
 		err = -EIO;
 
-	i915_vma_unpin_and_release(&scratch, 0);
 	return err;
 }
 
@@ -4086,7 +5564,11 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_lrc_layout),
 		SUBTEST(live_lrc_fixed),
 		SUBTEST(live_lrc_state),
-		SUBTEST(live_gpr_clear),
+		SUBTEST(live_lrc_gpr),
+		SUBTEST(live_lrc_isolation),
+		SUBTEST(live_lrc_timestamp),
+		SUBTEST(live_lrc_garbage),
+		SUBTEST(live_pphwsp_runtime),
 	};
 
 	if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index de1f831..8831ffe 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -12,7 +12,8 @@
 #include "selftests/igt_spinner.h"
 
 struct live_mocs {
-	struct drm_i915_mocs_table table;
+	struct drm_i915_mocs_table mocs;
+	struct drm_i915_mocs_table l3cc;
 	struct i915_vma *scratch;
 	void *vaddr;
 };
@@ -70,11 +71,22 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
 
 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
 {
+	struct drm_i915_mocs_table table;
+	unsigned int flags;
 	int err;
 
-	if (!get_mocs_settings(gt->i915, &arg->table))
+	memset(arg, 0, sizeof(*arg));
+
+	flags = get_mocs_settings(gt->i915, &table);
+	if (!flags)
 		return -EINVAL;
 
+	if (flags & HAS_RENDER_L3CC)
+		arg->l3cc = table;
+
+	if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
+		arg->mocs = table;
+
 	arg->scratch = create_scratch(gt);
 	if (IS_ERR(arg->scratch))
 		return PTR_ERR(arg->scratch);
@@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg,
 	/* Read the mocs tables back using SRM */
 	offset = i915_ggtt_offset(vma);
 	if (!err)
-		err = read_mocs_table(rq, &arg->table, &offset);
+		err = read_mocs_table(rq, &arg->mocs, &offset);
 	if (!err && ce->engine->class == RENDER_CLASS)
-		err = read_l3cc_table(rq, &arg->table, &offset);
+		err = read_l3cc_table(rq, &arg->l3cc, &offset);
 	offset -= i915_ggtt_offset(vma);
 	GEM_BUG_ON(offset > PAGE_SIZE);
 
@@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg,
 	/* Compare the results against the expected tables */
 	vaddr = arg->vaddr;
 	if (!err)
-		err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+		err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
 	if (!err && ce->engine->class == RENDER_CLASS)
-		err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+		err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
 	if (err)
 		return err;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 8cc55a0..95b165f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -12,6 +12,21 @@
 
 #include "selftests/i915_random.h"
 
+static u64 rc6_residency(struct intel_rc6 *rc6)
+{
+	u64 result;
+
+	/* XXX VLV_GT_MEDIA_RC6? */
+
+	result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	if (HAS_RC6p(rc6_to_i915(rc6)))
+		result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
+	if (HAS_RC6pp(rc6_to_i915(rc6)))
+		result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
+
+	return result;
+}
+
 int live_rc6_manual(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -38,9 +53,9 @@ int live_rc6_manual(void *arg)
 	__intel_rc6_disable(rc6);
 	msleep(1); /* wakeup is not immediate, takes about 100us on icl */
 
-	res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	res[0] = rc6_residency(rc6);
 	msleep(250);
-	res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	res[1] = rc6_residency(rc6);
 	if ((res[1] - res[0]) >> 10) {
 		pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
 		       (res[1] - res[0]) >> 10);
@@ -51,14 +66,15 @@ int live_rc6_manual(void *arg)
 	/* Manually enter RC6 */
 	intel_rc6_park(rc6);
 
-	res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	res[0] = rc6_residency(rc6);
 	msleep(100);
-	res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+	res[1] = rc6_residency(rc6);
 
 	if (res[1] == res[0]) {
-		pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+		pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
 		       intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
-		       intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+		       intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
+		       res[0]);
 		err = -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 6ad6aca..35406ec 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -115,7 +115,7 @@ static int igt_atomic_engine_reset(void *arg)
 	if (!intel_has_reset_engine(gt))
 		return 0;
 
-	if (USES_GUC_SUBMISSION(gt->i915))
+	if (intel_uc_uses_guc_submission(&gt->uc))
 		return 0;
 
 	intel_gt_pm_get(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 0000000..9995faa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
+	u32 *cs;
+	int err;
+
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+	if (IS_ERR(vma)) {
+		i915_gem_object_put(obj);
+		return vma;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+	if (err) {
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	err = i915_vma_sync(vma);
+	if (err) {
+		i915_gem_object_put(obj);
+		return ERR_PTR(err);
+	}
+
+	cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(cs)) {
+		i915_gem_object_put(obj);
+		return ERR_CAST(cs);
+	}
+
+	if (INTEL_GEN(engine->i915) >= 6) {
+		*cs++ = MI_STORE_DWORD_IMM_GEN4;
+		*cs++ = 0;
+	} else if (INTEL_GEN(engine->i915) >= 4) {
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = 0;
+	} else {
+		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+	}
+	*cs++ = vma->node.start + 4000;
+	*cs++ = STACK_MAGIC;
+
+	*cs++ = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(obj);
+
+	vma->private = intel_context_create(engine); /* dummy residuals */
+	if (IS_ERR(vma->private)) {
+		vma = ERR_CAST(vma->private);
+		i915_gem_object_put(obj);
+	}
+
+	return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+	struct i915_request *rq;
+	int err = 0;
+
+	rq = intel_context_create_request(ce);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+
+	if (i915_request_wait(rq, 0, HZ / 5) < 0)
+		err = -ETIME;
+	i915_request_put(rq);
+
+	return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+	struct intel_context *ce;
+	int err;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	err = context_sync(ce);
+	intel_context_put(ce);
+
+	return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+	int pass;
+	int err;
+
+	for (pass = 0; pass < 2; pass++) {
+		WRITE_ONCE(*result, 0);
+		err = context_sync(engine->kernel_context);
+		if (err || READ_ONCE(*result)) {
+			if (!err) {
+				pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+				       pass);
+				err = -EINVAL;
+			}
+			return err;
+		}
+
+		WRITE_ONCE(*result, 0);
+		err = new_context_sync(engine);
+		if (READ_ONCE(*result) != STACK_MAGIC) {
+			if (!err) {
+				pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+				       pass);
+				err = -EINVAL;
+			}
+			return err;
+		}
+
+		WRITE_ONCE(*result, 0);
+		err = new_context_sync(engine);
+		if (READ_ONCE(*result) != STACK_MAGIC) {
+			if (!err) {
+				pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+				       pass);
+				err = -EINVAL;
+			}
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+	struct intel_context *ce;
+	int err, i;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	for (i = 0; i < 2; i++) {
+		WRITE_ONCE(*result, 0);
+		err = context_sync(ce);
+		if (err)
+			break;
+	}
+	intel_context_put(ce);
+	if (err)
+		return err;
+
+	if (READ_ONCE(*result)) {
+		pr_err("wa_bb emitted between the same user context\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+	struct intel_context *ce;
+	int err, i;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	for (i = 0; i < 2; i++) {
+		WRITE_ONCE(*result, 0);
+		err = context_sync(ce);
+		if (err)
+			break;
+
+		err = context_sync(engine->kernel_context);
+		if (err)
+			break;
+	}
+	intel_context_put(ce);
+	if (err)
+		return err;
+
+	if (READ_ONCE(*result)) {
+		pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+	struct i915_vma *bb;
+	u32 *result;
+	int err;
+
+	bb = create_wally(engine);
+	if (IS_ERR(bb))
+		return PTR_ERR(bb);
+
+	result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+	if (IS_ERR(result)) {
+		intel_context_put(bb->private);
+		i915_vma_unpin_and_release(&bb, 0);
+		return PTR_ERR(result);
+	}
+	result += 1000;
+
+	engine->wa_ctx.vma = bb;
+
+	err = mixed_contexts_sync(engine, result);
+	if (err)
+		goto out;
+
+	err = double_context_sync_00(engine, result);
+	if (err)
+		goto out;
+
+	err = kernel_context_sync_00(engine, result);
+	if (err)
+		goto out;
+
+out:
+	intel_context_put(engine->wa_ctx.vma->private);
+	i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+	return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	/*
+	 * Exercise the inter-context wa batch.
+	 *
+	 * Between each user context we run a wa batch, and since it may
+	 * have implications for user visible state, we have to check that
+	 * we do actually execute it.
+	 *
+	 * The trick we use is to replace the normal wa batch with a custom
+	 * one that writes to a marker within it, and we can then look for
+	 * that marker to confirm if the batch was run when we expect it,
+	 * and equally important it was wasn't run when we don't!
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct i915_vma *saved_wa;
+		int err;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		if (IS_GEN_RANGE(gt->i915, 4, 5))
+			continue; /* MI_STORE_DWORD is privileged! */
+
+		saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+		intel_engine_pm_get(engine);
+		err = __live_ctx_switch_wa(engine);
+		intel_engine_pm_put(engine);
+		if (igt_flush_test(gt->i915))
+			err = -EIO;
+
+		engine->wa_ctx.vma = saved_wa;
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_ctx_switch_wa),
+	};
+
+	if (HAS_EXECLISTS(i915))
+		return 0;
+
+	return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index e2d78cc..c2578a0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,6 +6,8 @@
 
 #include <linux/prime_numbers.h>
 
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_requests.h"
@@ -604,7 +606,6 @@ static int live_hwsp_alternate(void *arg)
 
 			tl = checked_intel_timeline_create(gt);
 			if (IS_ERR(tl)) {
-				intel_engine_pm_put(engine);
 				err = PTR_ERR(tl);
 				goto out;
 			}
@@ -750,6 +751,189 @@ static int live_hwsp_wrap(void *arg)
 	return err;
 }
 
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+				     unsigned long *saved)
+{
+	*saved = engine->props.heartbeat_interval_ms;
+	engine->props.heartbeat_interval_ms = 0;
+
+	intel_engine_pm_get(engine);
+	intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+				    unsigned long saved)
+{
+	intel_engine_pm_put(engine);
+
+	engine->props.heartbeat_interval_ms = saved;
+}
+
+static int live_hwsp_rollover_kernel(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * Run the host for long enough, and even the kernel context will
+	 * see a seqno rollover.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce = engine->kernel_context;
+		struct intel_timeline *tl = ce->timeline;
+		struct i915_request *rq[3] = {};
+		unsigned long heartbeat;
+		int i;
+
+		engine_heartbeat_disable(engine, &heartbeat);
+		if (intel_gt_wait_for_idle(gt, HZ / 2)) {
+			err = -EIO;
+			goto out;
+		}
+
+		GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
+		tl->seqno = 0;
+		timeline_rollback(tl);
+		timeline_rollback(tl);
+		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+		for (i = 0; i < ARRAY_SIZE(rq); i++) {
+			struct i915_request *this;
+
+			this = i915_request_create(ce);
+			if (IS_ERR(this)) {
+				err = PTR_ERR(this);
+				goto out;
+			}
+
+			pr_debug("%s: create fence.seqnp:%d\n",
+				 engine->name,
+				 lower_32_bits(this->fence.seqno));
+
+			GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+			rq[i] = i915_request_get(this);
+			i915_request_add(this);
+		}
+
+		/* We expected a wrap! */
+		GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+		if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+			pr_err("Wait for timeline wrap timed out!\n");
+			err = -EIO;
+			goto out;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(rq); i++) {
+			if (!i915_request_completed(rq[i])) {
+				pr_err("Pre-wrap request not completed!\n");
+				err = -EINVAL;
+				goto out;
+			}
+		}
+
+out:
+		for (i = 0; i < ARRAY_SIZE(rq); i++)
+			i915_request_put(rq[i]);
+		engine_heartbeat_enable(engine, heartbeat);
+		if (err)
+			break;
+	}
+
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+
+	return err;
+}
+
+static int live_hwsp_rollover_user(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/*
+	 * Simulate a long running user context, and force the seqno wrap
+	 * on the user's timeline.
+	 */
+
+	for_each_engine(engine, gt, id) {
+		struct i915_request *rq[3] = {};
+		struct intel_timeline *tl;
+		struct intel_context *ce;
+		int i;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce))
+			return PTR_ERR(ce);
+
+		err = intel_context_alloc_state(ce);
+		if (err)
+			goto out;
+
+		tl = ce->timeline;
+		if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+			goto out;
+
+		timeline_rollback(tl);
+		timeline_rollback(tl);
+		WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+		for (i = 0; i < ARRAY_SIZE(rq); i++) {
+			struct i915_request *this;
+
+			this = intel_context_create_request(ce);
+			if (IS_ERR(this)) {
+				err = PTR_ERR(this);
+				goto out;
+			}
+
+			pr_debug("%s: create fence.seqnp:%d\n",
+				 engine->name,
+				 lower_32_bits(this->fence.seqno));
+
+			GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+			rq[i] = i915_request_get(this);
+			i915_request_add(this);
+		}
+
+		/* We expected a wrap! */
+		GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+		if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+			pr_err("Wait for timeline wrap timed out!\n");
+			err = -EIO;
+			goto out;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(rq); i++) {
+			if (!i915_request_completed(rq[i])) {
+				pr_err("Pre-wrap request not completed!\n");
+				err = -EINVAL;
+				goto out;
+			}
+		}
+
+out:
+		for (i = 0; i < ARRAY_SIZE(rq); i++)
+			i915_request_put(rq[i]);
+		intel_context_put(ce);
+		if (err)
+			break;
+	}
+
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+
+	return err;
+}
+
 static int live_hwsp_recycle(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -827,6 +1011,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_hwsp_engine),
 		SUBTEST(live_hwsp_alternate),
 		SUBTEST(live_hwsp_wrap),
+		SUBTEST(live_hwsp_rollover_kernel),
+		SUBTEST(live_hwsp_rollover_user),
 	};
 
 	if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index ac19218..5ed3232 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -583,6 +583,15 @@ static int check_dirty_whitelist(struct intel_context *ce)
 		if (err)
 			goto err_request;
 
+		i915_vma_lock(scratch);
+		err = i915_request_await_object(rq, scratch->obj, true);
+		if (err == 0)
+			err = i915_vma_move_to_active(scratch, rq,
+						      EXEC_OBJECT_WRITE);
+		i915_vma_unlock(scratch);
+		if (err)
+			goto err_request;
+
 		err = engine->emit_bb_start(rq,
 					    batch->node.start, PAGE_SIZE,
 					    0);
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 0000000..8f9b2f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+	struct kobject base;
+	struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+	return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+	/* Trim off the trailing space and replace with a newline */
+	if (len > PAGE_SIZE)
+		len = PAGE_SIZE;
+	if (len > 0)
+		buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+	    u32 caps, char *buf, bool show_unknown)
+{
+	const char * const *repr;
+	int count, n;
+	ssize_t len;
+
+	BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
+
+	switch (engine->class) {
+	case VIDEO_DECODE_CLASS:
+		repr = vcs_caps;
+		count = ARRAY_SIZE(vcs_caps);
+		break;
+
+	case VIDEO_ENHANCEMENT_CLASS:
+		repr = vecs_caps;
+		count = ARRAY_SIZE(vecs_caps);
+		break;
+
+	default:
+		repr = NULL;
+		count = 0;
+		break;
+	}
+	GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
+
+	len = 0;
+	for_each_set_bit(n,
+			 (unsigned long *)&caps,
+			 show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
+		if (n >= count || !repr[n]) {
+			if (GEM_WARN_ON(show_unknown))
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						"[%x] ", n);
+		} else {
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"%s ", repr[n]);
+		}
+		if (GEM_WARN_ON(len >= PAGE_SIZE))
+			break;
+	}
+	return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long long duration;
+	int err;
+
+	/*
+	 * When waiting for a request, if is it currently being executed
+	 * on the GPU, we busywait for a short while before sleeping. The
+	 * premise is that most requests are short, and if it is already
+	 * executing then there is a good chance that it will complete
+	 * before we can setup the interrupt handler and go to sleep.
+	 * We try to offset the cost of going to sleep, by first spinning
+	 * on the request -- if it completed in less time than it would take
+	 * to go sleep, process the interrupt and return back to the client,
+	 * then we have saved the client some latency, albeit at the cost
+	 * of spinning on an expensive CPU core.
+	 *
+	 * While we try to avoid waiting at all for a request that is unlikely
+	 * to complete, deciding how long it is worth spinning is for is an
+	 * arbitrary decision: trading off power vs latency.
+	 */
+
+	err = kstrtoull(buf, 0, &duration);
+	if (err)
+		return err;
+
+	if (duration > jiffies_to_nsecs(2))
+		return -EINVAL;
+
+	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+	return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long long duration;
+	int err;
+
+	/*
+	 * Execlists uses a scheduling quantum (a timeslice) to alternate
+	 * execution between ready-to-run contexts of equal priority. This
+	 * ensures that all users (though only if they of equal importance)
+	 * have the opportunity to run and prevents livelocks where contexts
+	 * may have implicit ordering due to userspace semaphores.
+	 */
+
+	err = kstrtoull(buf, 0, &duration);
+	if (err)
+		return err;
+
+	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+		return -EINVAL;
+
+	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+	if (execlists_active(&engine->execlists))
+		set_timer_ms(&engine->execlists.timer, duration);
+
+	return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+	   const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long long duration;
+	int err;
+
+	/*
+	 * When we allow ourselves to sleep before a GPU reset after disabling
+	 * submission, even for a few milliseconds, gives an innocent context
+	 * the opportunity to clear the GPU before the reset occurs. However,
+	 * how long to sleep depends on the typical non-preemptible duration
+	 * (a similar problem to determining the ideal preempt-reset timeout
+	 * or even the heartbeat interval).
+	 */
+
+	err = kstrtoull(buf, 0, &duration);
+	if (err)
+		return err;
+
+	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+		return -EINVAL;
+
+	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+	return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long long timeout;
+	int err;
+
+	/*
+	 * After initialising a preemption request, we give the current
+	 * resident a small amount of time to vacate the GPU. The preemption
+	 * request is for a higher priority context and should be immediate to
+	 * maintain high quality of service (and avoid priority inversion).
+	 * However, the preemption granularity of the GPU can be quite coarse
+	 * and so we need a compromise.
+	 */
+
+	err = kstrtoull(buf, 0, &timeout);
+	if (err)
+		return err;
+
+	if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+		return -EINVAL;
+
+	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+	if (READ_ONCE(engine->execlists.pending[0]))
+		set_timer_ms(&engine->execlists.preempt, timeout);
+
+	return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+		     char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long long delay;
+	int err;
+
+	/*
+	 * We monitor the health of the system via periodic heartbeat pulses.
+	 * The pulses also provide the opportunity to perform garbage
+	 * collection.  However, we interpret an incomplete pulse (a missed
+	 * heartbeat) as an indication that the system is no longer responsive,
+	 * i.e. hung, and perform an engine or full GPU reset. Given that the
+	 * preemption granularity can be very coarse on a system, the optimal
+	 * value for any workload is unknowable!
+	 */
+
+	err = kstrtoull(buf, 0, &delay);
+	if (err)
+		return err;
+
+	if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+		return -EINVAL;
+
+	err = intel_engine_set_heartbeat(engine, delay);
+	if (err)
+		return err;
+
+	return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+	kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+	.release = kobj_engine_release,
+	.sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+	struct kobj_engine *ke;
+
+	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+	if (!ke)
+		return NULL;
+
+	kobject_init(&ke->base, &kobj_engine_type);
+	ke->engine = engine;
+
+	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+		kobject_put(&ke->base);
+		return NULL;
+	}
+
+	/* xfer ownership to sysfs tree */
+	return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+	static const struct attribute *files[] = {
+		&name_attr.attr,
+		&class_attr.attr,
+		&inst_attr.attr,
+		&mmio_attr.attr,
+		&caps_attr.attr,
+		&all_caps_attr.attr,
+		&max_spin_attr.attr,
+		&stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+		&heartbeat_interval_attr.attr,
+#endif
+		NULL
+	};
+
+	struct device *kdev = i915->drm.primary->kdev;
+	struct intel_engine_cs *engine;
+	struct kobject *dir;
+
+	dir = kobject_create_and_add("engine", &kdev->kobj);
+	if (!dir)
+		return;
+
+	for_each_uabi_engine(engine, i915) {
+		struct kobject *kobj;
+
+		kobj = kobj_engine(dir, engine);
+		if (!kobj)
+			goto err_engine;
+
+		if (sysfs_create_files(kobj, files))
+			goto err_object;
+
+		if (intel_engine_has_timeslices(engine) &&
+		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+			goto err_engine;
+
+		if (intel_engine_has_preempt_reset(engine) &&
+		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+			goto err_engine;
+
+		if (0) {
+err_object:
+			kobject_put(kobj);
+err_engine:
+			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+				engine->name);
+			break;
+		}
+	}
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 0000000..9546fff
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5d00a3b..819f09e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -207,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
 {
 	u32 flags = 0;
 
-	if (!intel_guc_is_submission_supported(guc))
+	if (!intel_guc_submission_is_used(guc))
 		flags |= GUC_CTL_DISABLE_SCHEDULER;
 
 	return flags;
@@ -217,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
 {
 	u32 flags = 0;
 
-	if (intel_guc_is_submission_supported(guc)) {
+	if (intel_guc_submission_is_used(guc)) {
 		u32 ctxnum, base;
 
 		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -333,7 +333,7 @@ int intel_guc_init(struct intel_guc *guc)
 
 	ret = intel_uc_fw_init(&guc->fw);
 	if (ret)
-		goto err_fetch;
+		goto out;
 
 	ret = intel_guc_log_create(&guc->log);
 	if (ret)
@@ -348,7 +348,7 @@ int intel_guc_init(struct intel_guc *guc)
 	if (ret)
 		goto err_ads;
 
-	if (intel_guc_is_submission_supported(guc)) {
+	if (intel_guc_submission_is_used(guc)) {
 		/*
 		 * This is stuff we need to have available at fw load time
 		 * if we are planning to enable submission later
@@ -364,6 +364,8 @@ int intel_guc_init(struct intel_guc *guc)
 	/* We need to notify the guc whenever we change the GGTT */
 	i915_ggtt_enable_guc(gt->ggtt);
 
+	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
 	return 0;
 
 err_ct:
@@ -374,9 +376,8 @@ int intel_guc_init(struct intel_guc *guc)
 	intel_guc_log_destroy(&guc->log);
 err_fw:
 	intel_uc_fw_fini(&guc->fw);
-err_fetch:
-	intel_uc_fw_cleanup_fetch(&guc->fw);
-	DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+out:
+	i915_probe_error(gt->i915, "failed with %d\n", ret);
 	return ret;
 }
 
@@ -384,12 +385,12 @@ void intel_guc_fini(struct intel_guc *guc)
 {
 	struct intel_gt *gt = guc_to_gt(guc);
 
-	if (!intel_uc_fw_is_available(&guc->fw))
+	if (!intel_uc_fw_is_loadable(&guc->fw))
 		return;
 
 	i915_ggtt_disable_guc(gt->ggtt);
 
-	if (intel_guc_is_submission_supported(guc))
+	if (intel_guc_submission_is_used(guc))
 		intel_guc_submission_fini(guc);
 
 	intel_guc_ct_fini(&guc->ct);
@@ -397,9 +398,6 @@ void intel_guc_fini(struct intel_guc *guc)
 	intel_guc_ads_destroy(guc);
 	intel_guc_log_destroy(&guc->log);
 	intel_uc_fw_fini(&guc->fw);
-	intel_uc_fw_cleanup_fetch(&guc->fw);
-
-	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
 }
 
 /*
@@ -544,7 +542,7 @@ int intel_guc_suspend(struct intel_guc *guc)
 	 * If GuC communication is enabled but submission is not supported,
 	 * we do not need to suspend the GuC.
 	 */
-	if (!intel_guc_submission_is_enabled(guc))
+	if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
 		return 0;
 
 	/*
@@ -609,7 +607,7 @@ int intel_guc_resume(struct intel_guc *guc)
 	 * we do not need to resume the GuC but we do need to enable the
 	 * GuC communication on resume (above).
 	 */
-	if (!intel_guc_submission_is_enabled(guc))
+	if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
 		return 0;
 
 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -678,8 +676,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 	if (IS_ERR(vma))
 		goto err;
 
-	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-	ret = i915_vma_pin(vma, 0, 0, flags);
+	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+	ret = i915_ggtt_pin(vma, 0, flags);
 	if (ret) {
 		vma = ERR_PTR(ret);
 		goto err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 910d495..4594ccb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -39,7 +39,7 @@ struct intel_guc {
 		void (*disable)(struct intel_guc *guc);
 	} interrupts;
 
-	bool submission_supported;
+	bool submission_selected;
 
 	struct i915_vma *ads_vma;
 	struct __guc_ads_blob *ads_blob;
@@ -143,29 +143,36 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
 	return intel_uc_fw_is_supported(&guc->fw);
 }
 
-static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+static inline bool intel_guc_is_wanted(struct intel_guc *guc)
 {
 	return intel_uc_fw_is_enabled(&guc->fw);
 }
 
-static inline bool intel_guc_is_running(struct intel_guc *guc)
+static inline bool intel_guc_is_used(struct intel_guc *guc)
+{
+	GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+	return intel_uc_fw_is_available(&guc->fw);
+}
+
+static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
 {
 	return intel_uc_fw_is_running(&guc->fw);
 }
 
+static inline bool intel_guc_is_ready(struct intel_guc *guc)
+{
+	return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
+}
+
 static inline int intel_guc_sanitize(struct intel_guc *guc)
 {
 	intel_uc_fw_sanitize(&guc->fw);
+	intel_guc_ct_sanitize(&guc->ct);
 	guc->mmio_msg = 0;
 
 	return 0;
 }
 
-static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
-{
-	return guc->submission_supported;
-}
-
 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
 {
 	spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index c6f971a..11742fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,11 +5,15 @@
 
 #include "i915_drv.h"
 #include "intel_guc_ct.h"
+#include "gt/intel_gt.h"
 
+#define CT_ERROR(_ct, _fmt, ...) \
+	DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
 #ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define CT_DEBUG_DRIVER(...)	DRM_DEBUG_DRIVER(__VA_ARGS__)
+#define CT_DEBUG(_ct, _fmt, ...) \
+	DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
 #else
-#define CT_DEBUG_DRIVER(...)	do { } while (0)
+#define CT_DEBUG(...)	do { } while (0)
 #endif
 
 struct ct_request {
@@ -48,6 +52,21 @@ static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
 	return container_of(ct, struct intel_guc, ct);
 }
 
+static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
+{
+	return guc_to_gt(ct_to_guc(ct));
+}
+
+static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
+{
+	return ct_to_gt(ct)->i915;
+}
+
+static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
+{
+	return ct_to_i915(ct)->drm.dev;
+}
+
 static inline const char *guc_ct_buffer_type_to_str(u32 type)
 {
 	switch (type) {
@@ -63,7 +82,6 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
 				    u32 cmds_addr, u32 size)
 {
-	CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
 	memset(desc, 0, sizeof(*desc));
 	desc->addr = cmds_addr;
 	desc->size = size;
@@ -72,8 +90,6 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
 
 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
 {
-	CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
-			desc, desc->head, desc->tail);
 	desc->head = 0;
 	desc->tail = 0;
 	desc->is_in_error = 0;
@@ -89,31 +105,40 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
 		sizeof(struct guc_ct_buffer_desc),
 		type
 	};
-	int err;
 
 	/* Can't use generic send(), CT registration must go over MMIO */
-	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
-	if (err)
-		DRM_ERROR("CT: register %s buffer failed; err=%d\n",
-			  guc_ct_buffer_type_to_str(type), err);
+	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
+{
+	int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
+
+	if (unlikely(err))
+		CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
+			 guc_ct_buffer_type_to_str(type), err);
 	return err;
 }
 
-static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
-					   u32 type)
+static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
 		CTB_OWNER_HOST,
 		type
 	};
-	int err;
 
 	/* Can't use generic send(), CT deregistration must go over MMIO */
-	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
-	if (err)
-		DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
-			  guc_ct_buffer_type_to_str(type), err);
+	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
+{
+	int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
+
+	if (unlikely(err))
+		CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
+			 guc_ct_buffer_type_to_str(type), err);
 	return err;
 }
 
@@ -157,13 +182,12 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
 	 */
 
 	err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
-	if (err) {
-		DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+	if (unlikely(err)) {
+		CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
 		return err;
 	}
 
-	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
-			intel_guc_ggtt_offset(guc, ct->vma));
+	CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
 
 	/* store pointers to desc and cmds */
 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
@@ -197,7 +221,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
 int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
 	struct intel_guc *guc = ct_to_guc(ct);
-	u32 base;
+	u32 base, cmds, size;
 	int err;
 	int i;
 
@@ -212,23 +236,23 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
 	 */
 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
-		guc_ct_buffer_desc_init(ct->ctbs[i].desc,
-					base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
-					PAGE_SIZE/4);
+		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
+		size = PAGE_SIZE / 4;
+		CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
+		guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
 	}
 
-	/* register buffers, starting wirh RECV buffer
-	 * descriptors are in first half of the blob
+	/*
+	 * Register both CT buffers starting with RECV buffer.
+	 * Descriptors are in first half of the blob.
 	 */
-	err = guc_action_register_ct_buffer(guc,
-					    base + PAGE_SIZE/4 * CTB_RECV,
-					    INTEL_GUC_CT_BUFFER_TYPE_RECV);
+	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
+				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
 	if (unlikely(err))
 		goto err_out;
 
-	err = guc_action_register_ct_buffer(guc,
-					    base + PAGE_SIZE/4 * CTB_SEND,
-					    INTEL_GUC_CT_BUFFER_TYPE_SEND);
+	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
+				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
 	if (unlikely(err))
 		goto err_deregister;
 
@@ -237,10 +261,9 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
 	return 0;
 
 err_deregister:
-	guc_action_deregister_ct_buffer(guc,
-					INTEL_GUC_CT_BUFFER_TYPE_RECV);
+	ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
 err_out:
-	DRM_ERROR("CT: can't open channel; err=%d\n", err);
+	CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
 	return err;
 }
 
@@ -256,18 +279,16 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
 
 	ct->enabled = false;
 
-	if (intel_guc_is_running(guc)) {
-		guc_action_deregister_ct_buffer(guc,
-						INTEL_GUC_CT_BUFFER_TYPE_SEND);
-		guc_action_deregister_ct_buffer(guc,
-						INTEL_GUC_CT_BUFFER_TYPE_RECV);
+	if (intel_guc_is_fw_running(guc)) {
+		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
+		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
 	}
 }
 
 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
 {
 	/* For now it's trivial */
-	return ++ct->requests.next_fence;
+	return ++ct->requests.last_fence;
 }
 
 /**
@@ -288,25 +309,33 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
  *                   ^-----------------len-------------------^
  */
 
-static int ctb_write(struct intel_guc_ct_buffer *ctb,
-		     const u32 *action,
-		     u32 len /* in dwords */,
-		     u32 fence,
-		     bool want_response)
+static int ct_write(struct intel_guc_ct *ct,
+		    const u32 *action,
+		    u32 len /* in dwords */,
+		    u32 fence,
+		    bool want_response)
 {
+	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
 	struct guc_ct_buffer_desc *desc = ctb->desc;
-	u32 head = desc->head / 4;	/* in dwords */
-	u32 tail = desc->tail / 4;	/* in dwords */
-	u32 size = desc->size / 4;	/* in dwords */
-	u32 used;			/* in dwords */
+	u32 head = desc->head;
+	u32 tail = desc->tail;
+	u32 size = desc->size;
+	u32 used;
 	u32 header;
 	u32 *cmds = ctb->cmds;
 	unsigned int i;
 
-	GEM_BUG_ON(desc->size % 4);
-	GEM_BUG_ON(desc->head % 4);
-	GEM_BUG_ON(desc->tail % 4);
-	GEM_BUG_ON(tail >= size);
+	if (unlikely(desc->is_in_error))
+		return -EPIPE;
+
+	if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+		     (tail | head) >= size))
+		goto corrupted;
+
+	/* later calculations will be done in dwords */
+	head /= 4;
+	tail /= 4;
+	size /= 4;
 
 	/*
 	 * tail == head condition indicates empty. GuC FW does not support
@@ -332,9 +361,8 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
 
-	CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
-			4, &header, 4, &fence,
-			4 * (len - 1), &action[1]);
+	CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
+		 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
 
 	cmds[tail] = header;
 	tail = (tail + 1) % size;
@@ -346,12 +374,17 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
 		cmds[tail] = action[i];
 		tail = (tail + 1) % size;
 	}
+	GEM_BUG_ON(tail > size);
 
 	/* now update desc tail (back in bytes) */
 	desc->tail = tail * 4;
-	GEM_BUG_ON(desc->tail > desc->size);
-
 	return 0;
+
+corrupted:
+	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+		 desc->addr, desc->head, desc->tail, desc->size);
+	desc->is_in_error = 1;
+	return -EPIPE;
 }
 
 /**
@@ -469,7 +502,7 @@ static int ct_send(struct intel_guc_ct *ct,
 	list_add_tail(&request.link, &ct->requests.pending);
 	spin_unlock_irqrestore(&ct->requests.lock, flags);
 
-	err = ctb_write(ctb, action, len, fence, !!response_buf);
+	err = ct_write(ct, action, len, fence, !!response_buf);
 	if (unlikely(err))
 		goto unlink;
 
@@ -526,11 +559,11 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
 
 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
 	if (unlikely(ret < 0)) {
-		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
-			  action[0], ret, status);
+		CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
+			 action[0], ret, status);
 	} else if (unlikely(ret)) {
-		CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
-				action[0], ret, ret);
+		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
+			 action[0], ret, ret);
 	}
 
 	mutex_unlock(&guc->send_mutex);
@@ -552,22 +585,29 @@ static inline bool ct_header_is_response(u32 header)
 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
 }
 
-static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
+static int ct_read(struct intel_guc_ct *ct, u32 *data)
 {
+	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
 	struct guc_ct_buffer_desc *desc = ctb->desc;
-	u32 head = desc->head / 4;	/* in dwords */
-	u32 tail = desc->tail / 4;	/* in dwords */
-	u32 size = desc->size / 4;	/* in dwords */
+	u32 head = desc->head;
+	u32 tail = desc->tail;
+	u32 size = desc->size;
 	u32 *cmds = ctb->cmds;
-	s32 available;			/* in dwords */
+	s32 available;
 	unsigned int len;
 	unsigned int i;
 
-	GEM_BUG_ON(desc->size % 4);
-	GEM_BUG_ON(desc->head % 4);
-	GEM_BUG_ON(desc->tail % 4);
-	GEM_BUG_ON(tail >= size);
-	GEM_BUG_ON(head >= size);
+	if (unlikely(desc->is_in_error))
+		return -EPIPE;
+
+	if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+		     (tail | head) >= size))
+		goto corrupted;
+
+	/* later calculations will be done in dwords */
+	head /= 4;
+	tail /= 4;
+	size /= 4;
 
 	/* tail == head condition indicates empty */
 	available = tail - head;
@@ -577,7 +617,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 	/* beware of buffer wrap case */
 	if (unlikely(available < 0))
 		available += size;
-	CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
+	CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
 	GEM_BUG_ON(available < 0);
 
 	data[0] = cmds[head];
@@ -586,23 +626,29 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 	/* message len with header */
 	len = ct_header_get_len(data[0]) + 1;
 	if (unlikely(len > (u32)available)) {
-		DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
-			  4, data,
-			  4 * (head + available - 1 > size ?
-			       size - head : available - 1), &cmds[head],
-			  4 * (head + available - 1 > size ?
-			       available - 1 - size + head : 0), &cmds[0]);
-		return -EPROTO;
+		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
+			 4, data,
+			 4 * (head + available - 1 > size ?
+			      size - head : available - 1), &cmds[head],
+			 4 * (head + available - 1 > size ?
+			      available - 1 - size + head : 0), &cmds[0]);
+		goto corrupted;
 	}
 
 	for (i = 1; i < len; i++) {
 		data[i] = cmds[head];
 		head = (head + 1) % size;
 	}
-	CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
+	CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
 
 	desc->head = head * 4;
 	return 0;
+
+corrupted:
+	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+		 desc->addr, desc->head, desc->tail, desc->size);
+	desc->is_in_error = 1;
+	return -EPIPE;
 }
 
 /**
@@ -627,7 +673,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 {
 	u32 header = msg[0];
 	u32 len = ct_header_get_len(header);
-	u32 msglen = len + 1; /* total message length including header */
+	u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
 	u32 fence;
 	u32 status;
 	u32 datalen;
@@ -639,7 +685,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 
 	/* Response payload shall at least include fence and status */
 	if (unlikely(len < 2)) {
-		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+		CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
 		return -EPROTO;
 	}
 
@@ -649,22 +695,22 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 
 	/* Format of the status follows RESPONSE message */
 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
-		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+		CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
 		return -EPROTO;
 	}
 
-	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
+	CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
 
 	spin_lock(&ct->requests.lock);
 	list_for_each_entry(req, &ct->requests.pending, link) {
 		if (unlikely(fence != req->fence)) {
-			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
-					req->fence);
+			CT_DEBUG(ct, "request %u awaits response\n",
+				 req->fence);
 			continue;
 		}
 		if (unlikely(datalen > req->response_len)) {
-			DRM_ERROR("CT: response %u too long %*ph\n",
-				  req->fence, 4 * msglen, msg);
+			CT_ERROR(ct, "Response for %u is too long %*ph\n",
+				 req->fence, msgsize, msg);
 			datalen = 0;
 		}
 		if (datalen)
@@ -677,7 +723,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 	spin_unlock(&ct->requests.lock);
 
 	if (!found)
-		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
+		CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
 	return 0;
 }
 
@@ -687,7 +733,7 @@ static void ct_process_request(struct intel_guc_ct *ct,
 	struct intel_guc *guc = ct_to_guc(ct);
 	int ret;
 
-	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
+	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
 
 	switch (action) {
 	case INTEL_GUC_ACTION_DEFAULT:
@@ -698,8 +744,8 @@ static void ct_process_request(struct intel_guc_ct *ct,
 
 	default:
 fail_unexpected:
-		DRM_ERROR("CT: unexpected request %x %*ph\n",
-			  action, 4 * len, payload);
+		CT_ERROR(ct, "Unexpected request %x %*ph\n",
+			 action, 4 * len, payload);
 		break;
 	}
 }
@@ -767,18 +813,18 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
 {
 	u32 header = msg[0];
 	u32 len = ct_header_get_len(header);
-	u32 msglen = len + 1; /* total message length including header */
+	u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
 	struct ct_incoming_request *request;
 	unsigned long flags;
 
 	GEM_BUG_ON(ct_header_is_response(header));
 
-	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
+	request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
 	if (unlikely(!request)) {
-		DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
+		CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
 		return 0; /* XXX: -ENOMEM ? */
 	}
-	memcpy(request->msg, msg, 4 * msglen);
+	memcpy(request->msg, msg, msgsize);
 
 	spin_lock_irqsave(&ct->requests.lock, flags);
 	list_add_tail(&request->link, &ct->requests.incoming);
@@ -794,7 +840,6 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
  */
 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
 {
-	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
 	int err = 0;
 
@@ -804,7 +849,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
 	}
 
 	do {
-		err = ctb_read(ctb, msg);
+		err = ct_read(ct, msg);
 		if (err)
 			break;
 
@@ -813,10 +858,4 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
 		else
 			err = ct_handle_request(ct, msg);
 	} while (!err);
-
-	if (GEM_WARN_ON(err == -EPROTO)) {
-		DRM_ERROR("CT: corrupted message detected!\n");
-		ctb->desc->is_in_error = 1;
-	}
 }
-
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 3e7fe23..494a51a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -49,7 +49,7 @@ struct intel_guc_ct {
 	struct intel_guc_ct_buffer ctbs[2];
 
 	struct {
-		u32 next_fence; /* fence to be used with next request to send */
+		u32 last_fence; /* last fence used to send request */
 
 		spinlock_t lock; /* protects pending requests list */
 		struct list_head pending; /* requests waiting for response */
@@ -65,6 +65,11 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
 int intel_guc_ct_enable(struct intel_guc_ct *ct);
 void intel_guc_ct_disable(struct intel_guc_ct *ct);
 
+static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct)
+{
+	ct->enabled = false;
+}
+
 static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
 {
 	return ct->enabled;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9e42324..fe7778c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
-		if (!i915_request_signaled(rq))
-			dma_fence_set_error(&rq->fence, -EIO);
-
+		i915_request_set_error_once(rq, -EIO);
 		i915_request_mark_complete(rq);
 	}
 
@@ -660,12 +658,9 @@ void intel_guc_submission_disable(struct intel_guc *guc)
 	guc_proc_desc_fini(guc);
 }
 
-static bool __guc_submission_support(struct intel_guc *guc)
+static bool __guc_submission_selected(struct intel_guc *guc)
 {
-	/* XXX: GuC submission is unavailable for now */
-	return false;
-
-	if (!intel_guc_is_supported(guc))
+	if (!intel_guc_submission_is_supported(guc))
 		return false;
 
 	return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
@@ -673,7 +668,7 @@ static bool __guc_submission_support(struct intel_guc *guc)
 
 void intel_guc_submission_init_early(struct intel_guc *guc)
 {
-	guc->submission_supported = __guc_submission_support(guc);
+	guc->submission_selected = __guc_submission_selected(guc);
 }
 
 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index e402a29..4cf9d3e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -8,7 +8,8 @@
 
 #include <linux/types.h>
 
-struct intel_guc;
+#include "intel_guc.h"
+
 struct intel_engine_cs;
 
 void intel_guc_submission_init_early(struct intel_guc *guc);
@@ -20,4 +21,20 @@ int intel_guc_preempt_work_create(struct intel_guc *guc);
 void intel_guc_preempt_work_destroy(struct intel_guc *guc);
 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
 
+static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
+{
+	/* XXX: GuC submission is unavailable for now */
+	return false;
+}
+
+static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
+{
+	return guc->submission_selected;
+}
+
+static inline bool intel_guc_submission_is_used(struct intel_guc *guc)
+{
+	return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc);
+}
+
 #endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 32a0698..a74b656 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -121,19 +121,20 @@ int intel_huc_init(struct intel_huc *huc)
 	if (err)
 		goto out_fini;
 
+	intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
 	return 0;
 
 out_fini:
 	intel_uc_fw_fini(&huc->fw);
 out:
-	intel_uc_fw_cleanup_fetch(&huc->fw);
-	DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+	i915_probe_error(i915, "failed with %d\n", err);
 	return err;
 }
 
 void intel_huc_fini(struct intel_huc *huc)
 {
-	if (!intel_uc_fw_is_available(&huc->fw))
+	if (!intel_uc_fw_is_loadable(&huc->fw))
 		return;
 
 	intel_huc_rsa_data_destroy(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 644c059..a40b9cf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,11 +41,17 @@ static inline bool intel_huc_is_supported(struct intel_huc *huc)
 	return intel_uc_fw_is_supported(&huc->fw);
 }
 
-static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+static inline bool intel_huc_is_wanted(struct intel_huc *huc)
 {
 	return intel_uc_fw_is_enabled(&huc->fw);
 }
 
+static inline bool intel_huc_is_used(struct intel_huc *huc)
+{
+	GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+	return intel_uc_fw_is_available(&huc->fw);
+}
+
 static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
 {
 	return intel_uc_fw_is_running(&huc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index eee193b..9cdf4cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -20,7 +20,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
 	struct drm_i915_private *i915 = gt->i915;
 
 	intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
-			       intel_uc_uses_guc(uc),
+			       intel_uc_wants_guc(uc),
 			       INTEL_INFO(i915)->platform, INTEL_REVID(i915));
 }
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 64934a8..a4cbe06 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -48,17 +48,17 @@ static void __confirm_options(struct intel_uc *uc)
 	DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
 			     "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
 			     i915_modparams.enable_guc,
-			     yesno(intel_uc_uses_guc(uc)),
-			     yesno(intel_uc_uses_guc_submission(uc)),
-			     yesno(intel_uc_uses_huc(uc)));
+			     yesno(intel_uc_wants_guc(uc)),
+			     yesno(intel_uc_wants_guc_submission(uc)),
+			     yesno(intel_uc_wants_huc(uc)));
 
 	if (i915_modparams.enable_guc == -1)
 		return;
 
 	if (i915_modparams.enable_guc == 0) {
-		GEM_BUG_ON(intel_uc_uses_guc(uc));
-		GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
-		GEM_BUG_ON(intel_uc_uses_huc(uc));
+		GEM_BUG_ON(intel_uc_wants_guc(uc));
+		GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
+		GEM_BUG_ON(intel_uc_wants_huc(uc));
 		return;
 	}
 
@@ -93,7 +93,7 @@ void intel_uc_init_early(struct intel_uc *uc)
 
 	__confirm_options(uc);
 
-	if (intel_uc_uses_guc(uc))
+	if (intel_uc_wants_guc(uc))
 		uc->ops = &uc_ops_on;
 	else
 		uc->ops = &uc_ops_off;
@@ -257,13 +257,13 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
 {
 	int err;
 
-	GEM_BUG_ON(!intel_uc_uses_guc(uc));
+	GEM_BUG_ON(!intel_uc_wants_guc(uc));
 
 	err = intel_uc_fw_fetch(&uc->guc.fw);
 	if (err)
 		return;
 
-	if (intel_uc_uses_huc(uc))
+	if (intel_uc_wants_huc(uc))
 		intel_uc_fw_fetch(&uc->huc.fw);
 }
 
@@ -273,25 +273,38 @@ static void __uc_cleanup_firmwares(struct intel_uc *uc)
 	intel_uc_fw_cleanup_fetch(&uc->guc.fw);
 }
 
-static void __uc_init(struct intel_uc *uc)
+static int __uc_init(struct intel_uc *uc)
 {
 	struct intel_guc *guc = &uc->guc;
 	struct intel_huc *huc = &uc->huc;
 	int ret;
 
-	GEM_BUG_ON(!intel_uc_uses_guc(uc));
+	GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+	if (!intel_uc_uses_guc(uc))
+		return 0;
+
+	if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
+		return -ENOMEM;
 
 	/* XXX: GuC submission is unavailable for now */
-	GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+	GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
 
 	ret = intel_guc_init(guc);
-	if (ret) {
-		intel_uc_fw_cleanup_fetch(&huc->fw);
-		return;
+	if (ret)
+		return ret;
+
+	if (intel_uc_uses_huc(uc)) {
+		ret = intel_huc_init(huc);
+		if (ret)
+			goto out_guc;
 	}
 
-	if (intel_uc_uses_huc(uc))
-		intel_huc_init(huc);
+	return 0;
+
+out_guc:
+	intel_guc_fini(guc);
+	return ret;
 }
 
 static void __uc_fini(struct intel_uc *uc)
@@ -402,12 +415,12 @@ static int __uc_init_hw(struct intel_uc *uc)
 	int ret, attempts;
 
 	GEM_BUG_ON(!intel_uc_supports_guc(uc));
-	GEM_BUG_ON(!intel_uc_uses_guc(uc));
+	GEM_BUG_ON(!intel_uc_wants_guc(uc));
 
-	if (!intel_uc_fw_is_available(&guc->fw)) {
+	if (!intel_uc_fw_is_loadable(&guc->fw)) {
 		ret = __uc_check_hw(uc) ||
 		      intel_uc_fw_is_overridden(&guc->fw) ||
-		      intel_uc_supports_guc_submission(uc) ?
+		      intel_uc_wants_guc_submission(uc) ?
 		      intel_uc_fw_status_to_error(guc->fw.status) : 0;
 		goto err_out;
 	}
@@ -459,14 +472,14 @@ static int __uc_init_hw(struct intel_uc *uc)
 	if (ret)
 		goto err_communication;
 
-	if (intel_uc_supports_guc_submission(uc))
+	if (intel_uc_uses_guc_submission(uc))
 		intel_guc_submission_enable(guc);
 
 	dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
 		 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
 		 guc->fw.major_ver_found, guc->fw.minor_ver_found,
 		 "submission",
-		 enableddisabled(intel_uc_supports_guc_submission(uc)));
+		 enableddisabled(intel_uc_uses_guc_submission(uc)));
 
 	if (intel_uc_uses_huc(uc)) {
 		dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
@@ -505,10 +518,10 @@ static void __uc_fini_hw(struct intel_uc *uc)
 {
 	struct intel_guc *guc = &uc->guc;
 
-	if (!intel_guc_is_running(guc))
+	if (!intel_guc_is_fw_running(guc))
 		return;
 
-	if (intel_uc_supports_guc_submission(uc))
+	if (intel_uc_uses_guc_submission(uc))
 		intel_guc_submission_disable(guc);
 
 	if (guc_communication_enabled(guc))
@@ -527,7 +540,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
 {
 	struct intel_guc *guc = &uc->guc;
 
-	if (!intel_guc_is_running(guc))
+	if (!intel_guc_is_ready(guc))
 		return;
 
 	guc_disable_communication(guc);
@@ -539,7 +552,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc)
 	struct intel_guc *guc = &uc->guc;
 	int err;
 
-	if (!intel_guc_is_running(guc))
+	if (!intel_guc_is_ready(guc))
 		return;
 
 	err = intel_guc_suspend(guc);
@@ -554,7 +567,7 @@ void intel_uc_suspend(struct intel_uc *uc)
 	struct intel_guc *guc = &uc->guc;
 	intel_wakeref_t wakeref;
 
-	if (!intel_guc_is_running(guc))
+	if (!intel_guc_is_ready(guc))
 		return;
 
 	with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
@@ -566,7 +579,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
 	struct intel_guc *guc = &uc->guc;
 	int err;
 
-	if (!intel_guc_is_running(guc))
+	if (!intel_guc_is_fw_running(guc))
 		return 0;
 
 	/* Make sure we enable communication if and only if it's disabled */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 49c9135..5ae7b50b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -7,6 +7,7 @@
 #define _INTEL_UC_H_
 
 #include "intel_guc.h"
+#include "intel_guc_submission.h"
 #include "intel_huc.h"
 #include "i915_params.h"
 
@@ -16,7 +17,7 @@ struct intel_uc_ops {
 	int (*sanitize)(struct intel_uc *uc);
 	void (*init_fw)(struct intel_uc *uc);
 	void (*fini_fw)(struct intel_uc *uc);
-	void (*init)(struct intel_uc *uc);
+	int (*init)(struct intel_uc *uc);
 	void (*fini)(struct intel_uc *uc);
 	int (*init_hw)(struct intel_uc *uc);
 	void (*fini_hw)(struct intel_uc *uc);
@@ -40,35 +41,44 @@ void intel_uc_runtime_suspend(struct intel_uc *uc);
 int intel_uc_resume(struct intel_uc *uc);
 int intel_uc_runtime_resume(struct intel_uc *uc);
 
-static inline bool intel_uc_supports_guc(struct intel_uc *uc)
-{
-	return intel_guc_is_supported(&uc->guc);
+/*
+ * We need to know as early as possible if we're going to use GuC or not to
+ * take the correct setup paths. Additionally, once we've started loading the
+ * GuC, it is unsafe to keep executing without it because some parts of the HW,
+ * a subset of which is not cleaned on GT reset, will start expecting the GuC FW
+ * to be running.
+ * To solve both these requirements, we commit to using the microcontrollers if
+ * the relevant modparam is set and the blobs are found on the system. At this
+ * stage, the only thing that can stop us from attempting to load the blobs on
+ * the HW and use them is a fundamental issue (e.g. no memory for our
+ * structures); if we hit such a problem during driver load we're broken even
+ * without GuC, so there is no point in trying to fall back.
+ *
+ * Given the above, we can be in one of 4 states, with the last one implying
+ * we're committed to using the microcontroller:
+ * - Not supported: not available in HW and/or firmware not defined.
+ * - Supported: available in HW and firmware defined.
+ * - Wanted: supported + enabled in modparam.
+ * - In use: wanted + firmware found on the system and successfully fetched.
+ */
+
+#define __uc_state_checker(x, func, state, required) \
+static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \
+{ \
+	return intel_##func##_is_##required(&uc->x); \
 }
 
-static inline bool intel_uc_uses_guc(struct intel_uc *uc)
-{
-	return intel_guc_is_enabled(&uc->guc);
-}
+#define uc_state_checkers(x, func) \
+__uc_state_checker(x, func, supports, supported) \
+__uc_state_checker(x, func, wants, wanted) \
+__uc_state_checker(x, func, uses, used)
 
-static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
-{
-	return intel_guc_is_submission_supported(&uc->guc);
-}
+uc_state_checkers(guc, guc);
+uc_state_checkers(huc, huc);
+uc_state_checkers(guc, guc_submission);
 
-static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
-{
-	return intel_guc_is_submission_supported(&uc->guc);
-}
-
-static inline bool intel_uc_supports_huc(struct intel_uc *uc)
-{
-	return intel_uc_supports_guc(uc);
-}
-
-static inline bool intel_uc_uses_huc(struct intel_uc *uc)
-{
-	return intel_huc_is_enabled(&uc->huc);
-}
+#undef uc_state_checkers
+#undef __uc_state_checker
 
 #define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
 static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
@@ -80,7 +90,7 @@ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
 intel_uc_ops_function(sanitize, sanitize, int, 0);
 intel_uc_ops_function(fetch_firmwares, init_fw, void, );
 intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
-intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(init, init, int, 0);
 intel_uc_ops_function(fini, fini, void, );
 intel_uc_ops_function(init_hw, init_hw, int, 0);
 intel_uc_ops_function(fini_hw, fini_hw, void, );
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 8ee0a0c..18c7552 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
  * features.
  */
 #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
-	fw_def(TIGERLAKE,   0, guc_def(tgl, 35, 2, 0), huc_def(tgl,  7, 0, 3)) \
+	fw_def(TIGERLAKE,   0, guc_def(tgl, 35, 2, 0), huc_def(tgl,  7, 0, 12)) \
 	fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl,  9, 0, 0)) \
 	fw_def(ICELAKE,     0, guc_def(icl, 33, 0, 0), huc_def(icl,  9, 0, 0)) \
 	fw_def(COFFEELAKE,  5, guc_def(cml, 33, 0, 0), huc_def(cml,  4, 0, 0)) \
@@ -279,7 +279,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 
 	err = i915_inject_probe_error(i915, -ENXIO);
 	if (err)
-		return err;
+		goto fail;
 
 	__force_fw_fetch_failures(uc_fw, -EINVAL);
 	__force_fw_fetch_failures(uc_fw, -ESTALE);
@@ -501,7 +501,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
 	if (err)
 		return err;
 
-	if (!intel_uc_fw_is_available(uc_fw))
+	if (!intel_uc_fw_is_loadable(uc_fw))
 		return -ENOEXEC;
 
 	/* Call custom loader */
@@ -544,7 +544,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
 
 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
 {
-	intel_uc_fw_cleanup_fetch(uc_fw);
+	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
+		i915_gem_object_unpin_pages(uc_fw->obj);
+
+	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1f30543d..888ff0d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -29,8 +29,11 @@ struct intel_gt;
  * |            |                 SELECTED                          |
  * +------------+-               /   |   \                         -+
  * |            |    MISSING <--/    |    \--> ERROR                |
- * |   fetch    |                    |                              |
- * |            |        /------> AVAILABLE <---<-----------\       |
+ * |   fetch    |                    V                              |
+ * |            |                 AVAILABLE                         |
+ * +------------+-                   |                             -+
+ * |   init     |                    V                              |
+ * |            |        /------> LOADABLE <----<-----------\       |
  * +------------+-       \         /    \        \           \     -+
  * |            |         FAIL <--<      \--> TRANSFERRED     \     |
  * |   upload   |                  \           /   \          /     |
@@ -46,6 +49,7 @@ enum intel_uc_fw_status {
 	INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
 	INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
 	INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+	INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
 	INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
 	INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
 	INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
@@ -115,6 +119,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
 		return "ERROR";
 	case INTEL_UC_FIRMWARE_AVAILABLE:
 		return "AVAILABLE";
+	case INTEL_UC_FIRMWARE_LOADABLE:
+		return "LOADABLE";
 	case INTEL_UC_FIRMWARE_FAIL:
 		return "FAIL";
 	case INTEL_UC_FIRMWARE_TRANSFERRED:
@@ -143,6 +149,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
 	case INTEL_UC_FIRMWARE_SELECTED:
 		return -ESTALE;
 	case INTEL_UC_FIRMWARE_AVAILABLE:
+	case INTEL_UC_FIRMWARE_LOADABLE:
 	case INTEL_UC_FIRMWARE_TRANSFERRED:
 	case INTEL_UC_FIRMWARE_RUNNING:
 		return 0;
@@ -184,6 +191,11 @@ static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
 	return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
 }
 
+static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw)
+{
+	return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE;
+}
+
 static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
 {
 	return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
@@ -202,7 +214,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
 static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
 {
 	if (intel_uc_fw_is_loaded(uc_fw))
-		intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+		intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE);
 }
 
 static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 7714204..8b13f09 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,7 +41,7 @@
 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct intel_gt *gt = gvt->gt;
 	unsigned int flags;
 	u64 start, end, size;
 	struct drm_mm_node *node;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 		flags = PIN_MAPPABLE;
 	}
 
-	mutex_lock(&dev_priv->ggtt.vm.mutex);
-	mmio_hw_access_pre(dev_priv);
-	ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
+	mutex_lock(&gt->ggtt->vm.mutex);
+	mmio_hw_access_pre(gt);
+	ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
 				  size, I915_GTT_PAGE_SIZE,
 				  I915_COLOR_UNEVICTABLE,
 				  start, end, flags);
-	mmio_hw_access_post(dev_priv);
-	mutex_unlock(&dev_priv->ggtt.vm.mutex);
+	mmio_hw_access_post(gt);
+	mutex_unlock(&gt->ggtt->vm.mutex);
 	if (ret)
 		gvt_err("fail to alloc %s gm space from host\n",
 			high_gm ? "high" : "low");
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct intel_gt *gt = gvt->gt;
 	int ret;
 
 	ret = alloc_gm(vgpu, false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
 
 	return 0;
 out_free_aperture:
-	mutex_lock(&dev_priv->ggtt.vm.mutex);
+	mutex_lock(&gt->ggtt->vm.mutex);
 	drm_mm_remove_node(&vgpu->gm.low_gm_node);
-	mutex_unlock(&dev_priv->ggtt.vm.mutex);
+	mutex_unlock(&gt->ggtt->vm.mutex);
 	return ret;
 }
 
 static void free_vgpu_gm(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_gvt *gvt = vgpu->gvt;
+	struct intel_gt *gt = gvt->gt;
 
-	mutex_lock(&dev_priv->ggtt.vm.mutex);
+	mutex_lock(&gt->ggtt->vm.mutex);
 	drm_mm_remove_node(&vgpu->gm.low_gm_node);
 	drm_mm_remove_node(&vgpu->gm.high_gm_node);
-	mutex_unlock(&dev_priv->ggtt.vm.mutex);
+	mutex_unlock(&gt->ggtt->vm.mutex);
 }
 
 /**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 		u32 fence, u64 value)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->gt->i915;
+	struct intel_uncore *uncore = gvt->gt->uncore;
 	struct i915_fence_reg *reg;
 	i915_reg_t fence_reg_lo, fence_reg_hi;
 
-	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+	assert_rpm_wakelock_held(uncore->rpm);
 
-	if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
+	if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
 		return;
 
 	reg = vgpu->fence.regs[fence];
-	if (WARN_ON(!reg))
+	if (drm_WARN_ON(&i915->drm, !reg))
 		return;
 
 	fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
 	fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
 
-	I915_WRITE(fence_reg_lo, 0);
-	POSTING_READ(fence_reg_lo);
+	intel_uncore_write(uncore, fence_reg_lo, 0);
+	intel_uncore_posting_read(uncore, fence_reg_lo);
 
-	I915_WRITE(fence_reg_hi, upper_32_bits(value));
-	I915_WRITE(fence_reg_lo, lower_32_bits(value));
-	POSTING_READ(fence_reg_lo);
+	intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
+	intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
+	intel_uncore_posting_read(uncore, fence_reg_lo);
 }
 
 static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
 static void free_vgpu_fence(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct intel_uncore *uncore = gvt->gt->uncore;
 	struct i915_fence_reg *reg;
+	intel_wakeref_t wakeref;
 	u32 i;
 
-	if (WARN_ON(!vgpu_fence_sz(vgpu)))
+	if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
 		return;
 
-	intel_runtime_pm_get(&dev_priv->runtime_pm);
+	wakeref = intel_runtime_pm_get(uncore->rpm);
 
-	mutex_lock(&dev_priv->ggtt.vm.mutex);
+	mutex_lock(&gvt->gt->ggtt->vm.mutex);
 	_clear_vgpu_fence(vgpu);
 	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
 		reg = vgpu->fence.regs[i];
 		i915_unreserve_fence(reg);
 		vgpu->fence.regs[i] = NULL;
 	}
-	mutex_unlock(&dev_priv->ggtt.vm.mutex);
+	mutex_unlock(&gvt->gt->ggtt->vm.mutex);
 
-	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+	intel_runtime_pm_put(uncore->rpm, wakeref);
 }
 
 static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+	struct intel_uncore *uncore = gvt->gt->uncore;
 	struct i915_fence_reg *reg;
+	intel_wakeref_t wakeref;
 	int i;
 
-	intel_runtime_pm_get(rpm);
+	wakeref = intel_runtime_pm_get(uncore->rpm);
 
 	/* Request fences from host */
-	mutex_lock(&dev_priv->ggtt.vm.mutex);
+	mutex_lock(&gvt->gt->ggtt->vm.mutex);
 
 	for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
-		reg = i915_reserve_fence(&dev_priv->ggtt);
+		reg = i915_reserve_fence(gvt->gt->ggtt);
 		if (IS_ERR(reg))
 			goto out_free_fence;
 
@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
 
 	_clear_vgpu_fence(vgpu);
 
-	mutex_unlock(&dev_priv->ggtt.vm.mutex);
-	intel_runtime_pm_put_unchecked(rpm);
+	mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+	intel_runtime_pm_put(uncore->rpm, wakeref);
 	return 0;
+
 out_free_fence:
 	gvt_vgpu_err("Failed to alloc fences\n");
 	/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
 		i915_unreserve_fence(reg);
 		vgpu->fence.regs[i] = NULL;
 	}
-	mutex_unlock(&dev_priv->ggtt.vm.mutex);
-	intel_runtime_pm_put_unchecked(rpm);
+	mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+	intel_runtime_pm_put_unchecked(uncore->rpm);
 	return -ENOSPC;
 }
 
@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
  */
 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_gvt *gvt = vgpu->gvt;
+	intel_wakeref_t wakeref;
 
-	intel_runtime_pm_get(&dev_priv->runtime_pm);
-	_clear_vgpu_fence(vgpu);
-	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+	with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
+		_clear_vgpu_fence(vgpu);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 19cf1bb..072725a 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -106,10 +106,13 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
 	void *p_data, unsigned int bytes)
 {
-	if (WARN_ON(bytes > 4))
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+	if (drm_WARN_ON(&i915->drm, bytes > 4))
 		return -EINVAL;
 
-	if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+	if (drm_WARN_ON(&i915->drm,
+			offset + bytes > vgpu->gvt->device_info.cfg_space_size))
 		return -EINVAL;
 
 	memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -297,34 +300,36 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 	void *p_data, unsigned int bytes)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	int ret;
 
-	if (WARN_ON(bytes > 4))
+	if (drm_WARN_ON(&i915->drm, bytes > 4))
 		return -EINVAL;
 
-	if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+	if (drm_WARN_ON(&i915->drm,
+			offset + bytes > vgpu->gvt->device_info.cfg_space_size))
 		return -EINVAL;
 
 	/* First check if it's PCI_COMMAND */
 	if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
-		if (WARN_ON(bytes > 2))
+		if (drm_WARN_ON(&i915->drm, bytes > 2))
 			return -EINVAL;
 		return emulate_pci_command_write(vgpu, offset, p_data, bytes);
 	}
 
 	switch (rounddown(offset, 4)) {
 	case PCI_ROM_ADDRESS:
-		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
 			return -EINVAL;
 		return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
 
 	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
-		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
 			return -EINVAL;
 		return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
 
 	case INTEL_GVT_PCI_SWSCI:
-		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
 			return -EINVAL;
 		ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
 		if (ret)
@@ -332,7 +337,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 		break;
 
 	case INTEL_GVT_PCI_OPREGION:
-		if (WARN_ON(!IS_ALIGNED(offset, 4)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
 			return -EINVAL;
 		ret = intel_vgpu_opregion_base_write_handler(vgpu,
 						   *(u32 *)p_data);
@@ -391,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
 	memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
 
 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
-				pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+		pci_resource_len(gvt->gt->i915->drm.pdev, 0);
 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
-				pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+		pci_resource_len(gvt->gt->i915->drm.pdev, 2);
 
 	memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
 }
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21a176c..9e065ad 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -462,7 +462,7 @@ enum {
 
 struct parser_exec_state {
 	struct intel_vgpu *vgpu;
-	int ring_id;
+	const struct intel_engine_cs *engine;
 
 	int buf_type;
 
@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 	},
 };
 
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
 {
 	const struct decode_info *d_info;
 
-	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+	d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
 	if (d_info == NULL)
 		return INVALID_OP;
 
 	return cmd >> (32 - d_info->op_len);
 }
 
-static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
-		unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+	       const struct intel_engine_cs *engine)
 {
 	struct cmd_entry *e;
 
 	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
-		if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
+		if (opcode == e->info->opcode &&
+		    e->info->rings & engine->mask)
 			return e->info;
 	}
 	return NULL;
 }
 
-static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
-		u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+	     const struct intel_engine_cs *engine)
 {
 	u32 opcode;
 
-	opcode = get_opcode(cmd, ring_id);
+	opcode = get_opcode(cmd, engine);
 	if (opcode == INVALID_OP)
 		return NULL;
 
-	return find_cmd_entry(gvt, opcode, ring_id);
+	return find_cmd_entry(gvt, opcode, engine);
 }
 
 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
 	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
 }
 
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
 {
 	const struct decode_info *d_info;
 	int i;
 
-	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+	d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
 	if (d_info == NULL)
 		return;
 
@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
 	int cnt = 0;
 	int i;
 
-	gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
-			" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
-			s->ring_id, s->ring_start, s->ring_start + s->ring_size,
-			s->ring_head, s->ring_tail);
+	gvt_dbg_cmd("  vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+		    " ring_head(%08lx) ring_tail(%08lx)\n",
+		    s->vgpu->id, s->engine->name,
+		    s->ring_start, s->ring_start + s->ring_size,
+		    s->ring_head, s->ring_tail);
 
 	gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
 			s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
 			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
 			cmd_val(s, 2), cmd_val(s, 3));
 
-	print_opcode(cmd_val(s, 0), s->ring_id);
+	print_opcode(cmd_val(s, 0), s->engine);
 
 	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
 
@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
 	unsigned int data;
 	u32 ring_base;
 	u32 nopid;
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 
 	if (!strcmp(cmd, "lri"))
 		data = cmd_val(s, index + 1);
@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
 		return -EINVAL;
 	}
 
-	ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+	ring_base = s->engine->mmio_base;
 	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
 
 	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 	 * update reg values in it into vregs, so LRIs in workload with
 	 * inhibit context will restore with correct values
 	 */
-	if (IS_GEN(gvt->dev_priv, 9) &&
-			intel_gvt_mmio_is_in_ctx(gvt, offset) &&
-			!strncmp(cmd, "lri", 3)) {
+	if (IS_GEN(s->engine->i915, 9) &&
+	    intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+	    !strncmp(cmd, "lri", 3)) {
 		intel_gvt_hypervisor_read_gpa(s->vgpu,
 			s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
 		/* check inhibit context */
@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
 {
 	int i, ret = 0;
 	int cmd_len = cmd_length(s);
-	struct intel_gvt *gvt = s->vgpu->gvt;
 	u32 valid_len = CMD_LEN(1);
 
 	/*
@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s)
 	}
 
 	for (i = 1; i < cmd_len; i += 2) {
-		if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
-			if (s->ring_id == BCS0 &&
+		if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+			if (s->engine->id == BCS0 &&
 			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
 				ret |= 0;
 			else
@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
 	int cmd_len = cmd_length(s);
 
 	for (i = 1; i < cmd_len; i += 2) {
-		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+		if (IS_BROADWELL(s->engine->i915))
 			ret |= ((cmd_reg_inhibit(s, i) ||
-					(cmd_reg_inhibit(s, i + 1)))) ?
+				 (cmd_reg_inhibit(s, i + 1)))) ?
 				-EBADRQC : 0;
 		if (ret)
 			break;
@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
 	int cmd_len = cmd_length(s);
 
 	for (i = 1; i < cmd_len;) {
-		if (IS_BROADWELL(gvt->dev_priv))
+		if (IS_BROADWELL(s->engine->i915))
 			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
 		if (ret)
 			break;
@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
 				if (ret)
 					return ret;
 				if (index_mode) {
-					hws_pga = s->vgpu->hws_pga[s->ring_id];
+					hws_pga = s->vgpu->hws_pga[s->engine->id];
 					gma = hws_pga + gma;
 					patch_value(s, cmd_ptr(s, 2), gma);
 					val = cmd_val(s, 1) & (~(1 << 21));
@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
 		return ret;
 
 	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
-		set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
-				s->workload->pending_events);
+		set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+			s->workload->pending_events);
 	return 0;
 }
 
 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
 {
-	set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
-			s->workload->pending_events);
+	set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+		s->workload->pending_events);
 	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
 	return 0;
 }
@@ -1213,7 +1215,7 @@ struct plane_code_mapping {
 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
 		struct mi_display_flip_command_info *info)
 {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = s->engine->i915;
 	struct plane_code_mapping gen8_plane_code[] = {
 		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
 		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1230,7 +1232,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
 	dword2 = cmd_val(s, 2);
 
 	v = (dword0 & GENMASK(21, 19)) >> 19;
-	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+	if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
 		return -EBADRQC;
 
 	info->pipe = gen8_plane_code[v].pipe;
@@ -1250,7 +1252,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
 		info->stride_reg = SPRSTRIDE(info->pipe);
 		info->surf_reg = SPRSURF(info->pipe);
 	} else {
-		WARN_ON(1);
+		drm_WARN_ON(&dev_priv->drm, 1);
 		return -EBADRQC;
 	}
 	return 0;
@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
 		struct mi_display_flip_command_info *info)
 {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = s->engine->i915;
 	struct intel_vgpu *vgpu = s->vgpu;
 	u32 dword0 = cmd_val(s, 0);
 	u32 dword1 = cmd_val(s, 1);
@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
 		struct mi_display_flip_command_info *info)
 {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 	u32 stride, tile;
 
 	if (!info->async_flip)
 		return 0;
 
-	if (INTEL_GEN(dev_priv) >= 9) {
+	if (INTEL_GEN(s->engine->i915) >= 9) {
 		stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
 		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
 				GENMASK(12, 10)) >> 10;
@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
 		struct parser_exec_state *s,
 		struct mi_display_flip_command_info *info)
 {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = s->engine->i915;
 	struct intel_vgpu *vgpu = s->vgpu;
 
 	set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
 static int decode_mi_display_flip(struct parser_exec_state *s,
 		struct mi_display_flip_command_info *info)
 {
-	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
-	if (IS_BROADWELL(dev_priv))
+	if (IS_BROADWELL(s->engine->i915))
 		return gen8_decode_mi_display_flip(s, info);
-	if (INTEL_GEN(dev_priv) >= 9)
+	if (INTEL_GEN(s->engine->i915) >= 9)
 		return skl_decode_mi_display_flip(s, info);
 
 	return -ENODEV;
@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
 		if (ret)
 			return ret;
 		if (index_mode) {
-			hws_pga = s->vgpu->hws_pga[s->ring_id];
+			hws_pga = s->vgpu->hws_pga[s->engine->id];
 			gma = hws_pga + gma;
 			patch_value(s, cmd_ptr(s, 1), gma);
 			val = cmd_val(s, 0) & (~(1 << 21));
@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
 	}
 	/* Check notify bit */
 	if ((cmd_val(s, 0) & (1 << 8)))
-		set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
-				s->workload->pending_events);
+		set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+			s->workload->pending_events);
 	return ret;
 }
 
@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
 static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
 	/* Decide privilege based on address space */
-	if (cmd_val(s, 0) & (1 << 8) &&
-			!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+	if (cmd_val(s, 0) & BIT(8) &&
+	    !(s->vgpu->scan_nonprivbb & s->engine->mask))
 		return 0;
+
 	return 1;
 }
 
+static const char *repr_addr_type(unsigned int type)
+{
+	return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
 static int find_bb_size(struct parser_exec_state *s,
 			unsigned long *bb_size,
 			unsigned long *bb_end_cmd_offset)
@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s,
 		return -EFAULT;
 
 	cmd = cmd_val(s, 0);
-	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+	info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
 	if (info == NULL) {
-		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
-				cmd, get_opcode(cmd, s->ring_id),
-				(s->buf_addr_type == PPGTT_BUFFER) ?
-				"ppgtt" : "ggtt", s->ring_id, s->workload);
+		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+			     cmd, get_opcode(cmd, s->engine),
+			     repr_addr_type(s->buf_addr_type),
+			     s->engine->name, s->workload);
 		return -EBADRQC;
 	}
 	do {
 		if (copy_gma_to_hva(s->vgpu, mm,
-				gma, gma + 4, &cmd) < 0)
+				    gma, gma + 4, &cmd) < 0)
 			return -EFAULT;
-		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+		info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
 		if (info == NULL) {
-			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
-				cmd, get_opcode(cmd, s->ring_id),
-				(s->buf_addr_type == PPGTT_BUFFER) ?
-				"ppgtt" : "ggtt", s->ring_id, s->workload);
+			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+				     cmd, get_opcode(cmd, s->engine),
+				     repr_addr_type(s->buf_addr_type),
+				     s->engine->name, s->workload);
 			return -EBADRQC;
 		}
 
@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
 	u32 cmd = *(u32 *)va;
 	const struct cmd_info *info;
 
-	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+	info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
 	if (info == NULL) {
-		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
-			cmd, get_opcode(cmd, s->ring_id),
-			(s->buf_addr_type == PPGTT_BUFFER) ?
-			"ppgtt" : "ggtt", s->ring_id, s->workload);
+		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+			     cmd, get_opcode(cmd, s->engine),
+			     repr_addr_type(s->buf_addr_type),
+			     s->engine->name, s->workload);
 		return -EBADRQC;
 	}
 
@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	if (bb->ppgtt)
 		start_offset = gma & ~I915_GTT_PAGE_MASK;
 
-	bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+	bb->obj = i915_gem_object_create_shmem(s->engine->i915,
 					       round_up(bb_size + start_offset,
 							PAGE_SIZE));
 	if (IS_ERR(bb->obj)) {
@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
 	if (cmd == MI_NOOP)
 		info = &cmd_info[mi_noop_index];
 	else
-		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+		info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
 
 	if (info == NULL) {
-		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
-				cmd, get_opcode(cmd, s->ring_id),
-				(s->buf_addr_type == PPGTT_BUFFER) ?
-				"ppgtt" : "ggtt", s->ring_id, s->workload);
+		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+			     cmd, get_opcode(cmd, s->engine),
+			     repr_addr_type(s->buf_addr_type),
+			     s->engine->name, s->workload);
 		return -EBADRQC;
 	}
 
 	s->info = info;
 
-	trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+	trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
 			  cmd_length(s), s->buf_type, s->buf_addr_type,
 			  s->workload, info->name);
 
 	if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
 		ret = gvt_check_valid_cmd_length(cmd_length(s),
-			info->valid_len);
+						 info->valid_len);
 		if (ret)
 			return ret;
 	}
@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
 	s.buf_type = RING_BUFFER_INSTRUCTION;
 	s.buf_addr_type = GTT_BUFFER;
 	s.vgpu = workload->vgpu;
-	s.ring_id = workload->ring_id;
+	s.engine = workload->engine;
 	s.ring_start = workload->rb_start;
 	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
 	s.ring_head = gma_head;
@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
 	s.workload = workload;
 	s.is_ctx_wa = false;
 
-	if ((bypass_scan_mask & (1 << workload->ring_id)) ||
-		gma_head == gma_tail)
+	if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
 		return 0;
 
 	ret = ip_gma_set(&s, gma_head);
@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	s.buf_type = RING_BUFFER_INSTRUCTION;
 	s.buf_addr_type = GTT_BUFFER;
 	s.vgpu = workload->vgpu;
-	s.ring_id = workload->ring_id;
+	s.engine = workload->engine;
 	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
 	s.ring_size = ring_size;
 	s.ring_head = gma_head;
@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
 	void *shadow_ring_buffer_va;
-	int ring_id = workload->ring_id;
 	int ret;
 
 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	gma_tail = workload->rb_start + workload->rb_tail;
 	gma_top = workload->rb_start + guest_rb_size;
 
-	if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+	if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
 		void *p;
 
 		/* realloc the new ring buffer if needed */
-		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
-				GFP_KERNEL);
+		p = krealloc(s->ring_scan_buffer[workload->engine->id],
+			     workload->rb_len, GFP_KERNEL);
 		if (!p) {
 			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
 			return -ENOMEM;
 		}
-		s->ring_scan_buffer[ring_id] = p;
-		s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+		s->ring_scan_buffer[workload->engine->id] = p;
+		s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
 	}
 
-	shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+	shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
 
 	/* get shadow ring buffer va */
 	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	int ret = 0;
 	void *map;
 
-	obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+	obj = i915_gem_object_create_shmem(workload->engine->i915,
 					   roundup(ctx_size + CACHELINE_BYTES,
 						   PAGE_SIZE));
 	if (IS_ERR(obj))
@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	return 0;
 }
 
-static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
-		unsigned int opcode, unsigned long rings)
-{
-	const struct cmd_info *info = NULL;
-	unsigned int ring;
-
-	for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
-		info = find_cmd_entry(gvt, opcode, ring);
-		if (info)
-			break;
-	}
-	return info;
-}
-
 static int init_cmd_table(struct intel_gvt *gvt)
 {
+	unsigned int gen_type = intel_gvt_get_device_type(gvt);
 	int i;
-	struct cmd_entry *e;
-	const struct cmd_info *info;
-	unsigned int gen_type;
-
-	gen_type = intel_gvt_get_device_type(gvt);
 
 	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+		struct cmd_entry *e;
+
 		if (!(cmd_info[i].devices & gen_type))
 			continue;
 
@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
 			return -ENOMEM;
 
 		e->info = &cmd_info[i];
-		info = find_cmd_entry_any_ring(gvt,
-				e->info->opcode, e->info->rings);
-		if (info) {
-			gvt_err("%s %s duplicated\n", e->info->name,
-					info->name);
-			kfree(e);
-			return -EEXIST;
-		}
 		if (cmd_info[i].opcode == OP_MI_NOOP)
 			mi_noop_index = i;
 
 		INIT_HLIST_NODE(&e->hlist);
 		add_cmd_entry(gvt, e);
 		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
-				e->info->name, e->info->opcode, e->info->flag,
-				e->info->devices, e->info->rings);
+			    e->info->name, e->info->opcode, e->info->flag,
+			    e->info->devices, e->info->rings);
 	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 285f601..ec47d41 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
 static inline int mmio_diff_handler(struct intel_gvt *gvt,
 				    u32 offset, void *data)
 {
-	struct drm_i915_private *i915 = gvt->dev_priv;
 	struct mmio_diff_param *param = data;
 	struct diff_mmio *node;
 	u32 preg, vreg;
 
-	preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
+	preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
 	vreg = vgpu_vreg(param->vgpu, offset);
 
 	if (preg != vreg) {
@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
 	mutex_lock(&gvt->lock);
 	spin_lock_bh(&gvt->scheduler.mmio_context_lock);
 
-	mmio_hw_access_pre(gvt->dev_priv);
+	mmio_hw_access_pre(gvt->gt);
 	/* Recognize all the diff mmios to list. */
 	intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
-	mmio_hw_access_post(gvt->dev_priv);
+	mmio_hw_access_post(gvt->gt);
 
 	spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
 	mutex_unlock(&gvt->lock);
@@ -128,6 +127,7 @@ static int
 vgpu_scan_nonprivbb_get(void *data, u64 *val)
 {
 	struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
 	*val = vgpu->scan_nonprivbb;
 	return 0;
 }
@@ -142,42 +142,7 @@ static int
 vgpu_scan_nonprivbb_set(void *data, u64 val)
 {
 	struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	enum intel_engine_id id;
-	char buf[128], *s;
-	int len;
 
-	val &= (1 << I915_NUM_ENGINES) - 1;
-
-	if (vgpu->scan_nonprivbb == val)
-		return 0;
-
-	if (!val)
-		goto done;
-
-	len = sprintf(buf,
-		"gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
-		vgpu->id);
-
-	s = buf + len;
-
-	for (id = 0; id < I915_NUM_ENGINES; id++) {
-		struct intel_engine_cs *engine;
-
-		engine = dev_priv->engine[id];
-		if (engine && (val & (1 << id))) {
-			len = snprintf(s, 4, "%d, ", engine->id);
-			s += len;
-		} else
-			val &=  ~(1 << id);
-	}
-
-	if (val)
-		sprintf(s, "low performance expected.");
-
-	pr_warn("%s\n", buf);
-
-done:
 	vgpu->scan_nonprivbb = val;
 	return 0;
 }
@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
  */
 void intel_gvt_debugfs_init(struct intel_gvt *gvt)
 {
-	struct drm_minor *minor = gvt->dev_priv->drm.primary;
+	struct drm_minor *minor = gvt->gt->i915->drm.primary;
 
 	gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
 
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index a62bdf9..6e5c988 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
 
 static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
 	if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
 		return 0;
@@ -69,9 +69,10 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
 
 int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
-	if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+	if (drm_WARN_ON(&dev_priv->drm,
+			pipe < PIPE_A || pipe >= I915_MAX_PIPES))
 		return -EINVAL;
 
 	if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
@@ -168,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
 
 static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	int pipe;
 
 	if (IS_BROXTON(dev_priv)) {
@@ -319,9 +320,10 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
 				    int type, unsigned int resolution)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
-	if (WARN_ON(resolution >= GVT_EDID_NUM))
+	if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
 		return -EINVAL;
 
 	port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
@@ -389,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
 
 static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	struct intel_vgpu_irq *irq = &vgpu->irq;
 	int vblank_event[] = {
 		[PIPE_A] = PIPE_A_VBLANK,
@@ -421,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
 	int pipe;
 
 	mutex_lock(&vgpu->vgpu_lock);
-	for_each_pipe(vgpu->gvt->dev_priv, pipe)
+	for_each_pipe(vgpu->gvt->gt->i915, pipe)
 		emulate_vblank_on_pipe(vgpu, pipe);
 	mutex_unlock(&vgpu->vgpu_lock);
 }
@@ -454,11 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
  */
 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 
 	/* TODO: add more platforms support */
-	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
-		IS_COFFEELAKE(dev_priv)) {
+	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+	    IS_COFFEELAKE(i915)) {
 		if (connected) {
 			vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
 				SFUSE_STRAP_DDID_DETECTED;
@@ -484,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
  */
 void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
 	    IS_COFFEELAKE(dev_priv))
@@ -506,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
  */
 int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
 	intel_vgpu_init_i2c_edid(vgpu);
 
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index ae139f0..37fc460 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -67,11 +67,11 @@ static int vgpu_gem_get_pages(
 	u32 page_num;
 
 	fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
-	if (WARN_ON(!fb_info))
+	if (drm_WARN_ON(&dev_priv->drm, !fb_info))
 		return -ENODEV;
 
 	vgpu = fb_info->obj->vgpu;
-	if (WARN_ON(!vgpu))
+	if (drm_WARN_ON(&dev_priv->drm, !vgpu))
 		return -ENODEV;
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
 
 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
 {
-	struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
 	struct vfio_device_gfx_plane_info *gfx_plane_info = args;
 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
 	struct intel_vgpu_fb_info fb_info;
@@ -523,7 +523,7 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
 /* To associate an exposed dmabuf with the dmabuf_obj */
 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
 {
-	struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+	struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
 	struct intel_vgpu_dmabuf_obj *dmabuf_obj;
 	struct drm_i915_gem_object *obj;
 	struct dma_buf *dmabuf;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1fe6124..190651d 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
 static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
 			unsigned int offset, void *p_data, unsigned int bytes)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	int port, pin_select;
 
 	memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
 	if (pin_select == 0)
 		return 0;
 
-	if (IS_BROXTON(dev_priv))
+	if (IS_BROXTON(i915))
 		port = bxt_get_port_from_gmbus0(pin_select);
-	else if (IS_COFFEELAKE(dev_priv))
+	else if (IS_COFFEELAKE(i915))
 		port = cnp_get_port_from_gmbus0(pin_select);
 	else
 		port = get_port_from_gmbus0(pin_select);
-	if (WARN_ON(port < 0))
+	if (drm_WARN_ON(&i915->drm, port < 0))
 		return 0;
 
 	vgpu->display.i2c_edid.state = I2C_GMBUS;
@@ -276,7 +276,9 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 	void *p_data, unsigned int bytes)
 {
-	WARN_ON(1);
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+	drm_WARN_ON(&i915->drm, 1);
 	return 0;
 }
 
@@ -371,7 +373,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
 	unsigned int offset, void *p_data, unsigned int bytes)
 {
-	if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+	if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
 		return -EINVAL;
 
 	if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
@@ -399,7 +403,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
 int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
 		unsigned int offset, void *p_data, unsigned int bytes)
 {
-	if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+	if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
 		return -EINVAL;
 
 	if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
@@ -473,6 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
 				unsigned int offset,
 				void *p_data)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
 	int msg_length, ret_msg_size;
 	int msg, addr, ctrl, op;
@@ -532,9 +539,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
 		 * support the gfx driver to do EDID access.
 		 */
 	} else {
-		if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+		if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
 			return;
-		if (WARN_ON(msg_length != 4))
+		if (drm_WARN_ON(&i915->drm, msg_length != 4))
 			return;
 		if (i2c_edid->edid_available && i2c_edid->slave_selected) {
 			unsigned char val = edid_get_byte(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index d6e7a11..dd25c30 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -39,8 +39,7 @@
 #define _EL_OFFSET_STATUS_BUF   0x370
 #define _EL_OFFSET_STATUS_PTR   0x3A0
 
-#define execlist_ring_mmio(gvt, ring_id, offset) \
-	(gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
 
 #define valid_context(ctx) ((ctx)->valid)
 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
 	[VECS0] = VECS_AS_CONTEXT_SWITCH,
 };
 
-static int ring_id_to_context_switch_event(unsigned int ring_id)
+static int to_context_switch_event(const struct intel_engine_cs *engine)
 {
-	if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
+	if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
 		return -EINVAL;
 
-	return context_switch_events[ring_id];
+	return context_switch_events[engine->id];
 }
 
 static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
 	struct execlist_ctx_descriptor_format *desc = execlist->running_context;
 	struct intel_vgpu *vgpu = execlist->vgpu;
 	struct execlist_status_format status;
-	int ring_id = execlist->ring_id;
-	u32 status_reg = execlist_ring_mmio(vgpu->gvt,
-			ring_id, _EL_OFFSET_STATUS);
+	u32 status_reg =
+		execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
 
 	status.ldw = vgpu_vreg(vgpu, status_reg);
 	status.udw = vgpu_vreg(vgpu, status_reg + 4);
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
 }
 
 static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
-		struct execlist_context_status_format *status,
-		bool trigger_interrupt_later)
+			       struct execlist_context_status_format *status,
+			       bool trigger_interrupt_later)
 {
 	struct intel_vgpu *vgpu = execlist->vgpu;
-	int ring_id = execlist->ring_id;
 	struct execlist_context_status_pointer_format ctx_status_ptr;
 	u32 write_pointer;
 	u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
 	unsigned long hwsp_gpa;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
-	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
-			_EL_OFFSET_STATUS_PTR);
-	ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
-			_EL_OFFSET_STATUS_BUF);
+	ctx_status_ptr_reg =
+		execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
+	ctx_status_buf_reg =
+		execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
 
 	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
 
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
 
 	/* Update the CSB and CSB write pointer in HWSP */
 	hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
-					 vgpu->hws_pga[ring_id]);
+					 vgpu->hws_pga[execlist->engine->id]);
 	if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
 		intel_gvt_hypervisor_write_gpa(vgpu,
-			hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
-			write_pointer * 8,
-			status, 8);
+					       hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+					       status, 8);
 		intel_gvt_hypervisor_write_gpa(vgpu,
-			hwsp_gpa +
-			intel_hws_csb_write_index(dev_priv) * 4,
-			&write_pointer, 4);
+					       hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+					       &write_pointer, 4);
 	}
 
 	gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
-		vgpu->id, write_pointer, offset, status->ldw, status->udw);
+		   vgpu->id, write_pointer, offset, status->ldw, status->udw);
 
 	if (trigger_interrupt_later)
 		return;
 
 	intel_vgpu_trigger_virtual_event(vgpu,
-			ring_id_to_context_switch_event(execlist->ring_id));
+					 to_context_switch_event(execlist->engine));
 }
 
 static int emulate_execlist_ctx_schedule_out(
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
 		struct intel_vgpu_execlist *execlist)
 {
 	struct intel_vgpu *vgpu = execlist->vgpu;
-	int ring_id = execlist->ring_id;
-	u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
-			_EL_OFFSET_STATUS);
+	u32 status_reg =
+		execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
 	struct execlist_status_format status;
 
 	status.ldw = vgpu_vreg(vgpu, status_reg);
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct execlist_ctx_descriptor_format ctx[2];
-	int ring_id = workload->ring_id;
 	int ret;
 
 	if (!workload->emulate_schedule_in)
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
 	ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
 	ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
 
-	ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+	ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
+					   ctx);
 	if (ret) {
 		gvt_vgpu_err("fail to emulate execlist schedule in\n");
 		return ret;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int ring_id = workload->ring_id;
 	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+	struct intel_vgpu_execlist *execlist =
+		&s->execlist[workload->engine->id];
 	struct intel_vgpu_workload *next_workload;
-	struct list_head *next = workload_q_head(vgpu, ring_id)->next;
+	struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
 	bool lite_restore = false;
 	int ret = 0;
 
-	gvt_dbg_el("complete workload %p status %d\n", workload,
-			workload->status);
+	gvt_dbg_el("complete workload %p status %d\n",
+		   workload, workload->status);
 
-	if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
+	if (workload->status || vgpu->resetting_eng & workload->engine->mask)
 		goto out;
 
-	if (!list_empty(workload_q_head(vgpu, ring_id))) {
+	if (!list_empty(workload_q_head(vgpu, workload->engine))) {
 		struct execlist_ctx_descriptor_format *this_desc, *next_desc;
 
 		next_workload = container_of(next,
@@ -436,14 +429,15 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 	return ret;
 }
 
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
-		struct execlist_ctx_descriptor_format *desc,
-		bool emulate_schedule_in)
+static int submit_context(struct intel_vgpu *vgpu,
+			  const struct intel_engine_cs *engine,
+			  struct execlist_ctx_descriptor_format *desc,
+			  bool emulate_schedule_in)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_vgpu_workload *workload = NULL;
 
-	workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+	workload = intel_vgpu_create_workload(vgpu, engine, desc);
 	if (IS_ERR(workload))
 		return PTR_ERR(workload);
 
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 	workload->emulate_schedule_in = emulate_schedule_in;
 
 	if (emulate_schedule_in)
-		workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
+		workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
 
 	gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
-			emulate_schedule_in);
+		   emulate_schedule_in);
 
 	intel_vgpu_queue_workload(workload);
 	return 0;
 }
 
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+			       const struct intel_engine_cs *engine)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+	struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
 	struct execlist_ctx_descriptor_format *desc[2];
 	int i, ret;
 
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 	for (i = 0; i < ARRAY_SIZE(desc); i++) {
 		if (!desc[i]->valid)
 			continue;
-		ret = submit_context(vgpu, ring_id, desc[i], i == 0);
+		ret = submit_context(vgpu, engine, desc[i], i == 0);
 		if (ret) {
 			gvt_vgpu_err("failed to submit desc %d\n", i);
 			return ret;
@@ -504,22 +499,22 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 	return -EINVAL;
 }
 
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
+			       const struct intel_engine_cs *engine)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+	struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
 	struct execlist_context_status_pointer_format ctx_status_ptr;
 	u32 ctx_status_ptr_reg;
 
 	memset(execlist, 0, sizeof(*execlist));
 
 	execlist->vgpu = vgpu;
-	execlist->ring_id = ring_id;
+	execlist->engine = engine;
 	execlist->slot[0].index = 0;
 	execlist->slot[1].index = 1;
 
-	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
-			_EL_OFFSET_STATUS_PTR);
+	ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
 	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
 	ctx_status_ptr.read_ptr = 0;
 	ctx_status_ptr.write_ptr = 0x7;
@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 static void clean_execlist(struct intel_vgpu *vgpu,
 			   intel_engine_mask_t engine_mask)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	struct intel_engine_cs *engine;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	intel_engine_mask_t tmp;
@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu,
 static void reset_execlist(struct intel_vgpu *vgpu,
 			   intel_engine_mask_t engine_mask)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	struct intel_engine_cs *engine;
 	intel_engine_mask_t tmp;
 
 	for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
-		init_vgpu_execlist(vgpu, engine->id);
+		init_vgpu_execlist(vgpu, engine);
 }
 
 static int init_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5c0c1fd..d62cd14 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
 	struct intel_vgpu_execlist_slot *running_slot;
 	struct intel_vgpu_execlist_slot *pending_slot;
 	struct execlist_ctx_descriptor_format *running_context;
-	int ring_id;
 	struct intel_vgpu *vgpu;
 	struct intel_vgpu_elsp_dwords elsp_dwords;
+	const struct intel_engine_cs *engine;
 };
 
 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
 
 int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
 
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+			       const struct intel_engine_cs *engine);
 
 void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
 			       intel_engine_mask_t engine_mask);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 8bb292b..0889ad8 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
 static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
 	u32 tiled, int stride_mask, int bpp)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
 	u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
 	u32 stride = stride_reg;
@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
 int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
 	struct intel_vgpu_primary_plane_format *plane)
 {
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	u32 val, fmt;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int pipe;
 
 	pipe = get_active_pipe(vgpu);
@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
 int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
 	struct intel_vgpu_cursor_plane_format *plane)
 {
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	u32 val, mode, index;
 	u32 alpha_plane, alpha_force;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int pipe;
 
 	pipe = get_active_pipe(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index b0c1fda..990a181 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
 
 static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
 {
-	struct drm_i915_private *i915 = gvt->dev_priv;
-
-	*(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+	*(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
 							    _MMIO(offset));
 	return 0;
 }
@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
 static int expose_firmware_sysfs(struct intel_gvt *gvt)
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
 	struct gvt_firmware_header *h;
 	void *firmware;
 	void *p;
@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
 static void clean_firmware_sysfs(struct intel_gvt *gvt)
 {
-	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
 
 	device_remove_bin_file(&pdev->dev, &firmware_attr);
 	vfree(firmware_attr.private);
@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
 			   const struct firmware *fw)
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
 	struct gvt_firmware_header *h;
 	unsigned long id, crc32_start;
 	const void *mem;
@@ -208,8 +205,7 @@ static int verify_firmware(struct intel_gvt *gvt,
 int intel_gvt_load_firmware(struct intel_gvt *gvt)
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	struct pci_dev *pdev = dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
 	struct intel_gvt_firmware *firmware = &gvt->firmware;
 	struct gvt_firmware_header *h;
 	const struct firmware *fw;
@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 
 	gvt_dbg_core("request hw state firmware %s...\n", path);
 
-	ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+	ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
 	kfree(path);
 
 	if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4a48280..2a4b23f 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,8 +71,10 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
 /* translate a guest gmadr to host gmadr */
 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
 {
-	if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
-		 "invalid guest gmadr %llx\n", g_addr))
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+	if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
+		     "invalid guest gmadr %llx\n", g_addr))
 		return -EACCES;
 
 	if (vgpu_gmadr_is_aperture(vgpu, g_addr))
@@ -87,8 +89,10 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
 /* translate a host gmadr to guest gmadr */
 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
 {
-	if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
-		 "invalid host gmadr %llx\n", h_addr))
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+	if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+		     "invalid host gmadr %llx\n", h_addr))
 		return -EACCES;
 
 	if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
@@ -275,24 +279,23 @@ static inline int get_pse_type(int type)
 	return gtt_type_table[type].pse_entry_type;
 }
 
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
 {
-	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
 
 	return readq(addr);
 }
 
-static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct intel_gt *gt)
 {
-	mmio_hw_access_pre(dev_priv);
-	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-	mmio_hw_access_post(dev_priv);
+	mmio_hw_access_pre(gt);
+	intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	mmio_hw_access_post(gt);
 }
 
-static void write_pte64(struct drm_i915_private *dev_priv,
-		unsigned long index, u64 pte)
+static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
 {
-	void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+	void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
 
 	writeq(pte, addr);
 }
@@ -315,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
 		if (WARN_ON(ret))
 			return ret;
 	} else if (!pt) {
-		e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+		e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
 	} else {
 		e->val64 = *((u64 *)pt + index);
 	}
@@ -340,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
 		if (WARN_ON(ret))
 			return ret;
 	} else if (!pt) {
-		write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+		write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
 	} else {
 		*((u64 *)pt + index) = e->val64;
 	}
@@ -734,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
 
 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
-	struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
 
 	trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
 
@@ -819,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
 		struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
 {
-	struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 	struct intel_vgpu_ppgtt_spt *spt = NULL;
 	dma_addr_t daddr;
 	int ret;
@@ -940,6 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
 		struct intel_gvt_gtt_entry *e)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	struct intel_vgpu_ppgtt_spt *s;
 	enum intel_gvt_gtt_type cur_pt_type;
@@ -952,7 +956,9 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
 
 		if (!gtt_type_is_pt(cur_pt_type) ||
 				!gtt_type_is_pt(cur_pt_type + 1)) {
-			WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+			drm_WARN(&i915->drm, 1,
+				 "Invalid page table type, cur_pt_type is: %d\n",
+				 cur_pt_type);
 			return -EINVAL;
 		}
 
@@ -1044,7 +1050,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
 
 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
 	if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
 		u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
@@ -1153,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	unsigned long pfn;
 
-	if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+	if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
 		return 0;
 
 	pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@ -2314,7 +2320,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 	ggtt_invalidate_pte(vgpu, &e);
 
 	ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
-	ggtt_invalidate(gvt->dev_priv);
+	ggtt_invalidate(gvt->gt);
 	return 0;
 }
 
@@ -2347,16 +2353,18 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 		enum intel_gvt_gtt_type type)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_vgpu_gtt *gtt = &vgpu->gtt;
 	struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 	int page_entry_num = I915_GTT_PAGE_SIZE >>
 				vgpu->gvt->device_info.gtt_entry_size_shift;
 	void *scratch_pt;
 	int i;
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 	dma_addr_t daddr;
 
-	if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+	if (drm_WARN_ON(&i915->drm,
+			type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
 		return -EINVAL;
 
 	scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
@@ -2410,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
 {
 	int i;
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 	dma_addr_t daddr;
 
 	for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2682,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
 	int ret;
 	void *page;
-	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &gvt->gt->i915->drm.pdev->dev;
 	dma_addr_t daddr;
 
 	gvt_dbg_core("init gtt\n");
@@ -2731,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
  */
 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 {
-	struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &gvt->gt->i915->drm.pdev->dev;
 	dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
 					I915_GTT_PAGE_SHIFT);
 
@@ -2779,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
 	struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
 	struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
 	struct intel_gvt_gtt_entry old_entry;
@@ -2809,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
 		ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
 	}
 
-	ggtt_invalidate(dev_priv);
+	ggtt_invalidate(gvt->gt);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 8f37eef..9e17878 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -35,6 +35,7 @@
 #include <linux/kthread.h>
 
 #include "i915_drv.h"
+#include "intel_gvt.h"
 #include "gvt.h"
 #include <linux/vfio.h>
 #include <linux/mdev.h>
@@ -49,15 +50,15 @@ static const char * const supported_hypervisors[] = {
 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
 		const char *name)
 {
+	const char *driver_name =
+		dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
 	int i;
-	struct intel_vgpu_type *t;
-	const char *driver_name = dev_driver_string(
-			&gvt->dev_priv->drm.pdev->dev);
 
+	name += strlen(driver_name) + 1;
 	for (i = 0; i < gvt->num_types; i++) {
-		t = &gvt->types[i];
-		if (!strncmp(t->name, name + strlen(driver_name) + 1,
-			sizeof(t->name)))
+		struct intel_vgpu_type *t = &gvt->types[i];
+
+		if (!strncmp(t->name, name, sizeof(t->name)))
 			return t;
 	}
 
@@ -120,10 +121,8 @@ static struct attribute_group *gvt_vgpu_type_groups[] = {
 	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
 };
 
-static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
-		struct attribute_group ***intel_vgpu_type_groups)
+static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
 {
-	*type_attrs = gvt_type_attrs;
 	*intel_vgpu_type_groups = gvt_vgpu_type_groups;
 	return true;
 }
@@ -191,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
 static void init_device_info(struct intel_gvt *gvt)
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
 
 	info->max_support_vgpus = 8;
 	info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -257,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt)
 
 /**
  * intel_gvt_clean_device - clean a GVT device
- * @dev_priv: i915 private
+ * @i915: i915 private
  *
  * This function is called at the driver unloading stage, to free the
  * resources owned by a GVT device.
  *
  */
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+void intel_gvt_clean_device(struct drm_i915_private *i915)
 {
-	struct intel_gvt *gvt = to_gvt(dev_priv);
+	struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
 
-	if (WARN_ON(!gvt))
+	if (drm_WARN_ON(&i915->drm, !gvt))
 		return;
 
 	intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
@@ -285,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
 	intel_gvt_clean_mmio_info(gvt);
 	idr_destroy(&gvt->vgpu_idr);
 
-	kfree(dev_priv->gvt);
-	dev_priv->gvt = NULL;
+	kfree(i915->gvt);
 }
 
 /**
  * intel_gvt_init_device - initialize a GVT device
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
  *
  * This function is called at the initialization stage, to initialize
  * necessary GVT components.
@@ -300,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
  * Zero on success, negative error code if failed.
  *
  */
-int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+int intel_gvt_init_device(struct drm_i915_private *i915)
 {
 	struct intel_gvt *gvt;
 	struct intel_vgpu *vgpu;
 	int ret;
 
-	if (WARN_ON(dev_priv->gvt))
+	if (drm_WARN_ON(&i915->drm, i915->gvt))
 		return -EEXIST;
 
 	gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
@@ -319,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	spin_lock_init(&gvt->scheduler.mmio_context_lock);
 	mutex_init(&gvt->lock);
 	mutex_init(&gvt->sched_lock);
-	gvt->dev_priv = dev_priv;
+	gvt->gt = &i915->gt;
+	i915->gvt = gvt;
 
 	init_device_info(gvt);
 
@@ -378,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	intel_gvt_debugfs_init(gvt);
 
 	gvt_dbg_core("gvt device initialization is done\n");
-	dev_priv->gvt = gvt;
-	intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+	intel_gvt_host.dev = &i915->drm.pdev->dev;
 	intel_gvt_host.initialized = true;
 	return 0;
 
@@ -404,6 +402,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 out_clean_idr:
 	idr_destroy(&gvt->vgpu_idr);
 	kfree(gvt);
+	i915->gvt = NULL;
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b47c6ac..58c2c79 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,41 +196,21 @@ struct intel_vgpu {
 
 	struct dentry *debugfs;
 
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
-	struct {
-		struct mdev_device *mdev;
-		struct vfio_region *region;
-		int num_regions;
-		struct eventfd_ctx *intx_trigger;
-		struct eventfd_ctx *msi_trigger;
-
-		/*
-		 * Two caches are used to avoid mapping duplicated pages (eg.
-		 * scratch pages). This help to reduce dma setup overhead.
-		 */
-		struct rb_root gfn_cache;
-		struct rb_root dma_addr_cache;
-		unsigned long nr_cache_entries;
-		struct mutex cache_lock;
-
-		struct notifier_block iommu_notifier;
-		struct notifier_block group_notifier;
-		struct kvm *kvm;
-		struct work_struct release_work;
-		atomic_t released;
-		struct vfio_device *vfio_device;
-	} vdev;
-#endif
+	/* Hypervisor-specific device state. */
+	void *vdev;
 
 	struct list_head dmabuf_obj_list_head;
 	struct mutex dmabuf_lock;
 	struct idr object_idr;
 
-	struct completion vblank_done;
-
 	u32 scan_nonprivbb;
 };
 
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+	return vgpu->vdev;
+}
+
 /* validating GM healthy status*/
 #define vgpu_is_vm_unhealthy(ret_val) \
 	(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
@@ -306,7 +286,7 @@ struct intel_gvt {
 	/* scheduler scope lock, protect gvt and vgpu schedule related data */
 	struct mutex sched_lock;
 
-	struct drm_i915_private *dev_priv;
+	struct intel_gt *gt;
 	struct idr vgpu_idr;	/* vGPU IDR pool */
 
 	struct intel_gvt_device_info device_info;
@@ -376,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
 #define HOST_FENCE 4
 
-/* Aperture/GM space definitions for GVT device */
-#define gvt_aperture_sz(gvt)	  (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
+#define gvt_to_ggtt(gvt)	((gvt)->gt->ggtt)
 
-#define gvt_ggtt_gm_sz(gvt)	  (gvt->dev_priv->ggtt.vm.total)
-#define gvt_ggtt_sz(gvt) \
-	((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
-#define gvt_hidden_sz(gvt)	  (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+/* Aperture/GM space definitions for GVT device */
+#define gvt_aperture_sz(gvt)	  gvt_to_ggtt(gvt)->mappable_end
+#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
+
+#define gvt_ggtt_gm_sz(gvt)	gvt_to_ggtt(gvt)->vm.total
+#define gvt_ggtt_sz(gvt)	(gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
+#define gvt_hidden_sz(gvt)	(gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
 
 #define gvt_aperture_gmadr_base(gvt) (0)
 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
@@ -394,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
 				   + gvt_hidden_sz(gvt) - 1)
 
-#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
+#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
 
 /* Aperture/GM space definitions for vGPU */
 #define vgpu_aperture_offset(vgpu)	((vgpu)->gm.low_gm_node.start)
@@ -570,8 +551,7 @@ struct intel_gvt_ops {
 	void (*vgpu_deactivate)(struct intel_vgpu *);
 	struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
 			const char *name);
-	bool (*get_gvt_attrs)(struct attribute ***type_attrs,
-			struct attribute_group ***intel_vgpu_type_groups);
+	bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
 	int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
 	int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
 	int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
@@ -586,14 +566,14 @@ enum {
 	GVT_FAILSAFE_GUEST_ERR,
 };
 
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_pre(struct intel_gt *gt)
 {
-	intel_runtime_pm_get(&dev_priv->runtime_pm);
+	intel_runtime_pm_get(gt->uncore->rpm);
 }
 
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_post(struct intel_gt *gt)
 {
-	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+	intel_runtime_pm_put_unchecked(gt->uncore->rpm);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6d28d72..0182e2a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -49,15 +49,17 @@
 
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
 {
-	if (IS_BROADWELL(gvt->dev_priv))
+	struct drm_i915_private *i915 = gvt->gt->i915;
+
+	if (IS_BROADWELL(i915))
 		return D_BDW;
-	else if (IS_SKYLAKE(gvt->dev_priv))
+	else if (IS_SKYLAKE(i915))
 		return D_SKL;
-	else if (IS_KABYLAKE(gvt->dev_priv))
+	else if (IS_KABYLAKE(i915))
 		return D_KBL;
-	else if (IS_BROXTON(gvt->dev_priv))
+	else if (IS_BROXTON(i915))
 		return D_BXT;
-	else if (IS_COFFEELAKE(gvt->dev_priv))
+	else if (IS_COFFEELAKE(i915))
 		return D_CFL;
 
 	return 0;
@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
 }
 
 /**
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
  * @gvt: a GVT device
  * @offset: register offset
  *
  * Returns:
- * Ring ID on success, negative error code if failed.
+ * The engine containing the offset within its mmio page.
  */
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
-		unsigned int offset)
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
 {
-	enum intel_engine_id id;
 	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	offset &= ~GENMASK(11, 0);
-	for_each_engine(engine, gvt->dev_priv, id) {
+	for_each_engine(engine, gvt->gt, id)
 		if (engine->mmio_base == offset)
-			return id;
-	}
-	return -ENODEV;
+			return engine;
+
+	return NULL;
 }
 
 #define offset_to_fence_num(offset) \
@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
 {
 	u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
 
-	if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+	if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
 		if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
 			gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
 		else if (!ips)
@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
 static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		void *p_data, unsigned int bytes)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct intel_gvt *gvt = vgpu->gvt;
 	unsigned int fence_num = offset_to_fence_num(off);
 	int ret;
 
@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
 		return ret;
 	write_vreg(vgpu, off, p_data, bytes);
 
-	mmio_hw_access_pre(dev_priv);
+	mmio_hw_access_pre(gvt->gt);
 	intel_vgpu_write_fence(vgpu, fence_num,
 			vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
-	mmio_hw_access_post(dev_priv);
+	mmio_hw_access_post(gvt->gt);
 	return 0;
 }
 
@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
 	old = vgpu_vreg(vgpu, offset);
 	new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
 
-	if (INTEL_GEN(vgpu->gvt->dev_priv)  >=  9) {
+	if (INTEL_GEN(vgpu->gvt->gt->i915)  >=  9) {
 		switch (offset) {
 		case FORCEWAKE_RENDER_GEN9_REG:
 			ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 			gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
 			vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
 		}
-		engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
+		engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
 	}
 
 	/* vgpu_lock already hold by emulate mmio r/w */
@@ -492,7 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
 };
 
 /* a simple bsearch */
-static inline bool in_whitelist(unsigned int reg)
+static inline bool in_whitelist(u32 reg)
 {
 	int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
 	i915_reg_t *array = force_nonpriv_white_list;
@@ -514,26 +516,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
 	unsigned int offset, void *p_data, unsigned int bytes)
 {
 	u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
-	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
-	u32 ring_base;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	int ret = -EINVAL;
+	const struct intel_engine_cs *engine =
+		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
 
-	if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
-		gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
-			vgpu->id, ring_id, offset, bytes);
-		return ret;
+	if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
+		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+			vgpu->id, offset, bytes);
+		return -EINVAL;
 	}
 
-	ring_base = dev_priv->engine[ring_id]->mmio_base;
-
-	if (in_whitelist(reg_nonpriv) ||
-		reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
-		ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
-			bytes);
-	} else
+	if (!in_whitelist(reg_nonpriv) &&
+	    reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
 		gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
-			vgpu->id, *(u32 *)p_data, offset);
+			vgpu->id, reg_nonpriv, offset);
+	} else
+		intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
 
 	return 0;
 }
@@ -756,7 +753,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
 static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	u32 pipe = DSPSURF_TO_PIPE(offset);
 	int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
 
@@ -797,7 +794,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
 			       unsigned int offset, void *p_data,
 			       unsigned int bytes)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	enum pipe pipe = REG_50080_TO_PIPE(offset);
 	enum plane_id plane = REG_50080_TO_PLANE(offset);
 	int event = SKL_FLIP_EVENT(pipe, plane);
@@ -821,7 +818,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
 static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
 		unsigned int reg)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	enum intel_gvt_event_type event;
 
 	if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
@@ -836,7 +833,7 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
 		 reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
 		event = AUX_CHANNEL_D;
 	else {
-		WARN_ON(true);
+		drm_WARN_ON(&dev_priv->drm, true);
 		return -EINVAL;
 	}
 
@@ -924,11 +921,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
 	write_vreg(vgpu, offset, p_data, bytes);
 	data = vgpu_vreg(vgpu, offset);
 
-	if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
+	if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
 		&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
 		/* SKL DPB/C/D aux ctl register changed */
 		return 0;
-	} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+	} else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
 		   offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
 		/* write to the data registers */
 		return 0;
@@ -1244,8 +1241,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 
 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+	struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
 	char *env[3] = {NULL, NULL, NULL};
 	char vmid_str[20];
 	char display_ready_str[20];
@@ -1306,13 +1302,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int pf_write(struct intel_vgpu *vgpu,
 		unsigned int offset, void *p_data, unsigned int bytes)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	u32 val = *(u32 *)p_data;
 
 	if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
 	   offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
 	   offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
-		WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
-			  vgpu->id);
+		drm_WARN_ONCE(&i915->drm, true,
+			      "VM(%d): guest is trying to scaling a plane\n",
+			      vgpu->id);
 		return 0;
 	}
 
@@ -1360,13 +1358,15 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
 static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	u32 mode;
 
 	write_vreg(vgpu, offset, p_data, bytes);
 	mode = vgpu_vreg(vgpu, offset);
 
 	if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
-		WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
+		drm_WARN_ONCE(&i915->drm, 1,
+				"VM(%d): iGVT-g doesn't support GuC\n",
 				vgpu->id);
 		return 0;
 	}
@@ -1377,10 +1377,12 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	u32 trtte = *(u32 *)p_data;
 
 	if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
-		WARN(1, "VM(%d): Use physical address for TRTT!\n",
+		drm_WARN(&i915->drm, 1,
+				"VM(%d): Use physical address for TRTT!\n",
 				vgpu->id);
 		return -EINVAL;
 	}
@@ -1427,9 +1429,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
 
 	switch (cmd) {
 	case GEN9_PCODE_READ_MEM_LATENCY:
-		if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-			 || IS_KABYLAKE(vgpu->gvt->dev_priv)
-			 || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
+		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+		    IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
 			/**
 			 * "Read memory latency" command on gen9.
 			 * Below memory latency values are read
@@ -1439,7 +1441,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
 				*data0 = 0x1e1a1100;
 			else
 				*data0 = 0x61514b3d;
-		} else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+		} else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
 			/**
 			 * "Read memory latency" command on gen9.
 			 * Below memory latency values are read
@@ -1452,9 +1454,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
 		}
 		break;
 	case SKL_PCODE_CDCLK_CONTROL:
-		if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-			 || IS_KABYLAKE(vgpu->gvt->dev_priv)
-			 || IS_COFFEELAKE(vgpu->gvt->dev_priv))
+		if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+		    IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+		    IS_COFFEELAKE(vgpu->gvt->gt->i915))
 			*data0 = SKL_CDCLK_READY_FOR_CHANGE;
 		break;
 	case GEN6_PCODE_READ_RC6VIDS:
@@ -1478,24 +1480,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
 	u32 value = *(u32 *)p_data;
-	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+	const struct intel_engine_cs *engine =
+		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
 
 	if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
 		gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
 			      offset, value);
 		return -EINVAL;
 	}
+
 	/*
 	 * Need to emulate all the HWSP register write to ensure host can
 	 * update the VM CSB status correctly. Here listed registers can
 	 * support BDW, SKL or other platforms with same HWSP registers.
 	 */
-	if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+	if (unlikely(!engine)) {
 		gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
 			     offset);
 		return -EINVAL;
 	}
-	vgpu->hws_pga[ring_id] = value;
+	vgpu->hws_pga[engine->id] = value;
 	gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
 		     vgpu->id, value, offset);
 
@@ -1507,7 +1511,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
 {
 	u32 v = *(u32 *)p_data;
 
-	if (IS_BROXTON(vgpu->gvt->dev_priv))
+	if (IS_BROXTON(vgpu->gvt->gt->i915))
 		v &= (1 << 31) | (1 << 29);
 	else
 		v &= (1 << 31) | (1 << 29) | (1 << 9) |
@@ -1654,26 +1658,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
 		unsigned int offset, void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
-	int ring_id;
-	u32 ring_base;
+	const struct intel_engine_cs *engine =
+		intel_gvt_render_mmio_to_engine(gvt, offset);
 
-	ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
 	/**
 	 * Read HW reg in following case
 	 * a. the offset isn't a ring mmio
 	 * b. the offset's ring is running on hw.
 	 * c. the offset is ring time stamp mmio
 	 */
-	if (ring_id >= 0)
-		ring_base = dev_priv->engine[ring_id]->mmio_base;
 
-	if (ring_id < 0 || vgpu  == gvt->scheduler.engine_owner[ring_id] ||
-	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
-	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
-		mmio_hw_access_pre(dev_priv);
-		vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
-		mmio_hw_access_post(dev_priv);
+	if (!engine ||
+	    vgpu == gvt->scheduler.engine_owner[engine->id] ||
+	    offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
+	    offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
+		mmio_hw_access_pre(gvt->gt);
+		vgpu_vreg(vgpu, offset) =
+			intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
+		mmio_hw_access_post(gvt->gt);
 	}
 
 	return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -1682,22 +1684,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
 static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
-	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+	const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
 	struct intel_vgpu_execlist *execlist;
 	u32 data = *(u32 *)p_data;
 	int ret = 0;
 
-	if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
+	if (drm_WARN_ON(&i915->drm, !engine))
 		return -EINVAL;
 
-	execlist = &vgpu->submission.execlist[ring_id];
+	execlist = &vgpu->submission.execlist[engine->id];
 
 	execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
 	if (execlist->elsp_dwords.index == 3) {
-		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+		ret = intel_vgpu_submit_execlist(vgpu, engine);
 		if(ret)
-			gvt_vgpu_err("fail submit workload on ring %d\n",
-				ring_id);
+			gvt_vgpu_err("fail submit workload on ring %s\n",
+				     engine->name);
 	}
 
 	++execlist->elsp_dwords.index;
@@ -1709,12 +1712,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
 	u32 data = *(u32 *)p_data;
-	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+	const struct intel_engine_cs *engine =
+		intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
 	bool enable_execlist;
 	int ret;
 
 	(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
-	if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+	if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
 		(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
 	write_vreg(vgpu, offset, p_data, bytes);
 
@@ -1723,7 +1727,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		return 0;
 	}
 
-	if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+	if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
 	    data & _MASKED_BIT_ENABLE(2)) {
 		enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
 		return 0;
@@ -1743,16 +1747,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 			|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
 		enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
 
-		gvt_dbg_core("EXECLIST %s on ring %d\n",
-				(enable_execlist ? "enabling" : "disabling"),
-				ring_id);
+		gvt_dbg_core("EXECLIST %s on ring %s\n",
+			     (enable_execlist ? "enabling" : "disabling"),
+			     engine->name);
 
 		if (!enable_execlist)
 			return 0;
 
 		ret = intel_vgpu_select_submission_ops(vgpu,
-			       BIT(ring_id),
-			       INTEL_VGPU_EXECLIST_SUBMISSION);
+						       engine->mask,
+						       INTEL_VGPU_EXECLIST_SUBMISSION);
 		if (ret)
 			return ret;
 
@@ -1876,7 +1880,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
 
 static int init_generic_mmio_info(struct intel_gvt *gvt)
 {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *dev_priv = gvt->gt->i915;
 	int ret;
 
 	MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
@@ -2415,9 +2419,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 	MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
 	MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
 
-	MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
-	MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
-	MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+	MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
+	MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
+	MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
 	MMIO_D(SPLL_CTL, D_ALL);
 	MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
 	MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
@@ -2693,7 +2697,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
 static int init_bdw_mmio_info(struct intel_gvt *gvt)
 {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *dev_priv = gvt->gt->i915;
 	int ret;
 
 	MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2882,7 +2886,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
 
 static int init_skl_mmio_info(struct intel_gvt *gvt)
 {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *dev_priv = gvt->gt->i915;
 	int ret;
 
 	MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
@@ -2902,7 +2906,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
 	MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
 
-	MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+	MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
 
 	MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
 	MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -3131,7 +3135,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 
 static int init_bxt_mmio_info(struct intel_gvt *gvt)
 {
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *dev_priv = gvt->gt->i915;
 	int ret;
 
 	MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
@@ -3367,7 +3371,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
 int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 {
 	struct intel_gvt_device_info *info = &gvt->device_info;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
+	struct drm_i915_private *i915 = gvt->gt->i915;
 	int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
 	int ret;
 
@@ -3379,20 +3383,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
 	if (ret)
 		goto err;
 
-	if (IS_BROADWELL(dev_priv)) {
+	if (IS_BROADWELL(i915)) {
 		ret = init_bdw_mmio_info(gvt);
 		if (ret)
 			goto err;
-	} else if (IS_SKYLAKE(dev_priv)
-		|| IS_KABYLAKE(dev_priv)
-		|| IS_COFFEELAKE(dev_priv)) {
+	} else if (IS_SKYLAKE(i915) ||
+		   IS_KABYLAKE(i915) ||
+		   IS_COFFEELAKE(i915)) {
 		ret = init_bdw_mmio_info(gvt);
 		if (ret)
 			goto err;
 		ret = init_skl_mmio_info(gvt);
 		if (ret)
 			goto err;
-	} else if (IS_BROXTON(dev_priv)) {
+	} else if (IS_BROXTON(i915)) {
 		ret = init_bdw_mmio_info(gvt);
 		if (ret)
 			goto err;
@@ -3541,13 +3545,14 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
 			   void *pdata, unsigned int bytes, bool is_read)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_mmio_info *mmio_info;
 	struct gvt_mmio_block *mmio_block;
 	gvt_mmio_func func;
 	int ret;
 
-	if (WARN_ON(bytes > 8))
+	if (drm_WARN_ON(&i915->drm, bytes > 8))
 		return -EINVAL;
 
 	/*
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 11accd3..540017f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -245,6 +245,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 	unsigned int reg, void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *i915 = gvt->gt->i915;
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
 	struct intel_gvt_irq_info *info;
 	u32 ier = *(u32 *)p_data;
@@ -255,7 +256,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 	vgpu_vreg(vgpu, reg) = ier;
 
 	info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
-	if (WARN_ON(!info))
+	if (drm_WARN_ON(&i915->drm, !info))
 		return -EINVAL;
 
 	if (info->has_upstream_irq)
@@ -282,6 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
 int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
 	void *p_data, unsigned int bytes)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
 		iir_to_regbase(reg));
 	u32 iir = *(u32 *)p_data;
@@ -289,7 +291,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
 	trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
 		       (vgpu_vreg(vgpu, reg) ^ iir));
 
-	if (WARN_ON(!info))
+	if (drm_WARN_ON(&i915->drm, !info))
 		return -EINVAL;
 
 	vgpu_vreg(vgpu, reg) &= ~iir;
@@ -319,6 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
 static void update_upstream_irq(struct intel_vgpu *vgpu,
 		struct intel_gvt_irq_info *info)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_gvt_irq *irq = &vgpu->gvt->irq;
 	struct intel_gvt_irq_map *map = irq->irq_map;
 	struct intel_gvt_irq_info *up_irq_info = NULL;
@@ -340,7 +343,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
 		if (!up_irq_info)
 			up_irq_info = irq->info[map->up_irq_group];
 		else
-			WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+			drm_WARN_ON(&i915->drm, up_irq_info !=
+				    irq->info[map->up_irq_group]);
 
 		bit = map->up_irq_bit;
 
@@ -350,7 +354,7 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
 			clear_bits |= (1 << bit);
 	}
 
-	if (WARN_ON(!up_irq_info))
+	if (drm_WARN_ON(&i915->drm, !up_irq_info))
 		return;
 
 	if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
@@ -536,7 +540,7 @@ static void gen8_init_irq(
 	SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
 	SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
 
-	if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
+	if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
 		SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
 			INTEL_GVT_IRQ_INFO_GT1);
 		SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -568,7 +572,7 @@ static void gen8_init_irq(
 	SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
 	SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
 
-	if (IS_BROADWELL(gvt->dev_priv)) {
+	if (IS_BROADWELL(gvt->gt->i915)) {
 		SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
 		SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
 		SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -581,7 +585,7 @@ static void gen8_init_irq(
 
 		SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
 		SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
-	} else if (INTEL_GEN(gvt->dev_priv) >= 9) {
+	} else if (INTEL_GEN(gvt->gt->i915) >= 9) {
 		SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
 		SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
 		SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -618,13 +622,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
 void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
 	enum intel_gvt_event_type event)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_gvt *gvt = vgpu->gvt;
 	struct intel_gvt_irq *irq = &gvt->irq;
 	gvt_event_virt_handler_t handler;
 	struct intel_gvt_irq_ops *ops = gvt->irq.ops;
 
 	handler = get_event_virt_handler(irq, event);
-	WARN_ON(!handler);
+	drm_WARN_ON(&i915->drm, !handler);
 
 	handler(irq, event, vgpu);
 
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3259a1f..074c4ef 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,36 @@ struct gvt_dma {
 	struct kref ref;
 };
 
+struct kvmgt_vdev {
+	struct intel_vgpu *vgpu;
+	struct mdev_device *mdev;
+	struct vfio_region *region;
+	int num_regions;
+	struct eventfd_ctx *intx_trigger;
+	struct eventfd_ctx *msi_trigger;
+
+	/*
+	 * Two caches are used to avoid mapping duplicated pages (eg.
+	 * scratch pages). This help to reduce dma setup overhead.
+	 */
+	struct rb_root gfn_cache;
+	struct rb_root dma_addr_cache;
+	unsigned long nr_cache_entries;
+	struct mutex cache_lock;
+
+	struct notifier_block iommu_notifier;
+	struct notifier_block group_notifier;
+	struct kvm *kvm;
+	struct work_struct release_work;
+	atomic_t released;
+	struct vfio_device *vfio_device;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+	return intel_vgpu_vdev(vgpu);
+}
+
 static inline bool handle_valid(unsigned long handle)
 {
 	return !!(handle & ~0xff);
@@ -120,6 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 		unsigned long size)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	int total_pages;
 	int npage;
 	int ret;
@@ -129,8 +160,8 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 	for (npage = 0; npage < total_pages; npage++) {
 		unsigned long cur_gfn = gfn + npage;
 
-		ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
-		WARN_ON(ret != 1);
+		ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+		drm_WARN_ON(&i915->drm, ret != 1);
 	}
 }
 
@@ -152,7 +183,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 		unsigned long cur_gfn = gfn + npage;
 		unsigned long pfn;
 
-		ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+		ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
 				     IOMMU_READ | IOMMU_WRITE, &pfn);
 		if (ret != 1) {
 			gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
@@ -187,7 +218,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
 		dma_addr_t *dma_addr, unsigned long size)
 {
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 	struct page *page = NULL;
 	int ret;
 
@@ -210,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
 		dma_addr_t dma_addr, unsigned long size)
 {
-	struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+	struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
 
 	dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
 	gvt_unpin_guest_page(vgpu, gfn, size);
@@ -219,7 +250,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
 		dma_addr_t dma_addr)
 {
-	struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+	struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
 	struct gvt_dma *itr;
 
 	while (node) {
@@ -237,7 +268,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
 
 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
 {
-	struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+	struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
 	struct gvt_dma *itr;
 
 	while (node) {
@@ -258,6 +289,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 {
 	struct gvt_dma *new, *itr;
 	struct rb_node **link, *parent = NULL;
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 
 	new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
 	if (!new)
@@ -270,7 +302,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 	kref_init(&new->ref);
 
 	/* gfn_cache maps gfn to struct gvt_dma. */
-	link = &vgpu->vdev.gfn_cache.rb_node;
+	link = &vdev->gfn_cache.rb_node;
 	while (*link) {
 		parent = *link;
 		itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -281,11 +313,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 			link = &parent->rb_right;
 	}
 	rb_link_node(&new->gfn_node, parent, link);
-	rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
+	rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
 
 	/* dma_addr_cache maps dma addr to struct gvt_dma. */
 	parent = NULL;
-	link = &vgpu->vdev.dma_addr_cache.rb_node;
+	link = &vdev->dma_addr_cache.rb_node;
 	while (*link) {
 		parent = *link;
 		itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -296,46 +328,51 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
 			link = &parent->rb_right;
 	}
 	rb_link_node(&new->dma_addr_node, parent, link);
-	rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+	rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
 
-	vgpu->vdev.nr_cache_entries++;
+	vdev->nr_cache_entries++;
 	return 0;
 }
 
 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
 				struct gvt_dma *entry)
 {
-	rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
-	rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+	rb_erase(&entry->gfn_node, &vdev->gfn_cache);
+	rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
 	kfree(entry);
-	vgpu->vdev.nr_cache_entries--;
+	vdev->nr_cache_entries--;
 }
 
 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
 {
 	struct gvt_dma *dma;
 	struct rb_node *node = NULL;
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 
 	for (;;) {
-		mutex_lock(&vgpu->vdev.cache_lock);
-		node = rb_first(&vgpu->vdev.gfn_cache);
+		mutex_lock(&vdev->cache_lock);
+		node = rb_first(&vdev->gfn_cache);
 		if (!node) {
-			mutex_unlock(&vgpu->vdev.cache_lock);
+			mutex_unlock(&vdev->cache_lock);
 			break;
 		}
 		dma = rb_entry(node, struct gvt_dma, gfn_node);
 		gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
 		__gvt_cache_remove_entry(vgpu, dma);
-		mutex_unlock(&vgpu->vdev.cache_lock);
+		mutex_unlock(&vdev->cache_lock);
 	}
 }
 
 static void gvt_cache_init(struct intel_vgpu *vgpu)
 {
-	vgpu->vdev.gfn_cache = RB_ROOT;
-	vgpu->vdev.dma_addr_cache = RB_ROOT;
-	vgpu->vdev.nr_cache_entries = 0;
-	mutex_init(&vgpu->vdev.cache_lock);
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+	vdev->gfn_cache = RB_ROOT;
+	vdev->dma_addr_cache = RB_ROOT;
+	vdev->nr_cache_entries = 0;
+	mutex_init(&vdev->cache_lock);
 }
 
 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
@@ -409,16 +446,18 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
 		size_t count, loff_t *ppos, bool iswrite)
 {
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
 			VFIO_PCI_NUM_REGIONS;
-	void *base = vgpu->vdev.region[i].data;
+	void *base = vdev->region[i].data;
 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
 
-	if (pos >= vgpu->vdev.region[i].size || iswrite) {
+
+	if (pos >= vdev->region[i].size || iswrite) {
 		gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
 		return -EINVAL;
 	}
-	count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+	count = min(count, (size_t)(vdev->region[i].size - pos));
 	memcpy(buf, base + pos, count);
 
 	return count;
@@ -512,7 +551,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
 	unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
 			VFIO_PCI_NUM_REGIONS;
 	struct vfio_edid_region *region =
-		(struct vfio_edid_region *)vgpu->vdev.region[i].data;
+		(struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
 
 	if (pos < region->vfio_edid_regs.edid_offset) {
@@ -544,32 +583,34 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
 		const struct intel_vgpu_regops *ops,
 		size_t size, u32 flags, void *data)
 {
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 	struct vfio_region *region;
 
-	region = krealloc(vgpu->vdev.region,
-			(vgpu->vdev.num_regions + 1) * sizeof(*region),
+	region = krealloc(vdev->region,
+			(vdev->num_regions + 1) * sizeof(*region),
 			GFP_KERNEL);
 	if (!region)
 		return -ENOMEM;
 
-	vgpu->vdev.region = region;
-	vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
-	vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
-	vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
-	vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
-	vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
-	vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
-	vgpu->vdev.num_regions++;
+	vdev->region = region;
+	vdev->region[vdev->num_regions].type = type;
+	vdev->region[vdev->num_regions].subtype = subtype;
+	vdev->region[vdev->num_regions].ops = ops;
+	vdev->region[vdev->num_regions].size = size;
+	vdev->region[vdev->num_regions].flags = flags;
+	vdev->region[vdev->num_regions].data = data;
+	vdev->num_regions++;
 	return 0;
 }
 
 static int kvmgt_get_vfio_device(void *p_vgpu)
 {
 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 
-	vgpu->vdev.vfio_device = vfio_device_get_from_dev(
-		mdev_dev(vgpu->vdev.mdev));
-	if (!vgpu->vdev.vfio_device) {
+	vdev->vfio_device = vfio_device_get_from_dev(
+		mdev_dev(vdev->mdev));
+	if (!vdev->vfio_device) {
 		gvt_vgpu_err("failed to get vfio device\n");
 		return -ENODEV;
 	}
@@ -637,10 +678,12 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
 
 static void kvmgt_put_vfio_device(void *vgpu)
 {
-	if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+	struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
+
+	if (WARN_ON(!vdev->vfio_device))
 		return;
 
-	vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+	vfio_device_put(vdev->vfio_device);
 }
 
 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
@@ -669,9 +712,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 		goto out;
 	}
 
-	INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+	INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
 
-	vgpu->vdev.mdev = mdev;
+	kvmgt_vdev(vgpu)->mdev = mdev;
 	mdev_set_drvdata(mdev, vgpu);
 
 	gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
@@ -696,9 +739,10 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
 				     unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-					struct intel_vgpu,
-					vdev.iommu_notifier);
+	struct kvmgt_vdev *vdev = container_of(nb,
+					       struct kvmgt_vdev,
+					       iommu_notifier);
+	struct intel_vgpu *vgpu = vdev->vgpu;
 
 	if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
 		struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -708,7 +752,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
 		iov_pfn = unmap->iova >> PAGE_SHIFT;
 		end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
 
-		mutex_lock(&vgpu->vdev.cache_lock);
+		mutex_lock(&vdev->cache_lock);
 		for (; iov_pfn < end_iov_pfn; iov_pfn++) {
 			entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
 			if (!entry)
@@ -718,7 +762,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
 					   entry->size);
 			__gvt_cache_remove_entry(vgpu, entry);
 		}
-		mutex_unlock(&vgpu->vdev.cache_lock);
+		mutex_unlock(&vdev->cache_lock);
 	}
 
 	return NOTIFY_OK;
@@ -727,16 +771,16 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
 static int intel_vgpu_group_notifier(struct notifier_block *nb,
 				     unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-					struct intel_vgpu,
-					vdev.group_notifier);
+	struct kvmgt_vdev *vdev = container_of(nb,
+					       struct kvmgt_vdev,
+					       group_notifier);
 
 	/* the only action we care about */
 	if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
-		vgpu->vdev.kvm = data;
+		vdev->kvm = data;
 
 		if (!data)
-			schedule_work(&vgpu->vdev.release_work);
+			schedule_work(&vdev->release_work);
 	}
 
 	return NOTIFY_OK;
@@ -745,15 +789,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
 static int intel_vgpu_open(struct mdev_device *mdev)
 {
 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 	unsigned long events;
 	int ret;
 
-	vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
-	vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+	vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+	vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
 
 	events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
-				&vgpu->vdev.iommu_notifier);
+				&vdev->iommu_notifier);
 	if (ret != 0) {
 		gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
 			ret);
@@ -762,7 +807,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
 
 	events = VFIO_GROUP_NOTIFY_SET_KVM;
 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
-				&vgpu->vdev.group_notifier);
+				&vdev->group_notifier);
 	if (ret != 0) {
 		gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
 			ret);
@@ -781,51 +826,56 @@ static int intel_vgpu_open(struct mdev_device *mdev)
 
 	intel_gvt_ops->vgpu_activate(vgpu);
 
-	atomic_set(&vgpu->vdev.released, 0);
+	atomic_set(&vdev->released, 0);
 	return ret;
 
 undo_group:
 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
-					&vgpu->vdev.group_notifier);
+					&vdev->group_notifier);
 
 undo_iommu:
 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
-					&vgpu->vdev.iommu_notifier);
+					&vdev->iommu_notifier);
 out:
 	return ret;
 }
 
 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
 {
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 	struct eventfd_ctx *trigger;
 
-	trigger = vgpu->vdev.msi_trigger;
+	trigger = vdev->msi_trigger;
 	if (trigger) {
 		eventfd_ctx_put(trigger);
-		vgpu->vdev.msi_trigger = NULL;
+		vdev->msi_trigger = NULL;
 	}
 }
 
 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
 {
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct kvmgt_guest_info *info;
 	int ret;
 
 	if (!handle_valid(vgpu->handle))
 		return;
 
-	if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+	if (atomic_cmpxchg(&vdev->released, 0, 1))
 		return;
 
 	intel_gvt_ops->vgpu_release(vgpu);
 
-	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
-					&vgpu->vdev.iommu_notifier);
-	WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+	ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
+					&vdev->iommu_notifier);
+	drm_WARN(&i915->drm, ret,
+		 "vfio_unregister_notifier for iommu failed: %d\n", ret);
 
-	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
-					&vgpu->vdev.group_notifier);
-	WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+	ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
+					&vdev->group_notifier);
+	drm_WARN(&i915->drm, ret,
+		 "vfio_unregister_notifier for group failed: %d\n", ret);
 
 	/* dereference module reference taken at open */
 	module_put(THIS_MODULE);
@@ -835,7 +885,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
 
 	intel_vgpu_release_msi_eventfd_ctx(vgpu);
 
-	vgpu->vdev.kvm = NULL;
+	vdev->kvm = NULL;
 	vgpu->handle = 0;
 }
 
@@ -848,10 +898,10 @@ static void intel_vgpu_release(struct mdev_device *mdev)
 
 static void intel_vgpu_release_work(struct work_struct *work)
 {
-	struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
-					vdev.release_work);
+	struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
+					       release_work);
 
-	__intel_vgpu_release(vgpu);
+	__intel_vgpu_release(vdev->vgpu);
 }
 
 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -913,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
 		return -EINVAL;
 	}
 
-	aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+	aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
 					ALIGN_DOWN(off, PAGE_SIZE),
 					count + offset_in_page(off));
 	if (!aperture_va)
@@ -933,12 +983,13 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 			size_t count, loff_t *ppos, bool is_write)
 {
 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
 	u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
 	int ret = -EINVAL;
 
 
-	if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
+	if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
 		gvt_vgpu_err("invalid index: %u\n", index);
 		return -EINVAL;
 	}
@@ -967,11 +1018,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 	case VFIO_PCI_ROM_REGION_INDEX:
 		break;
 	default:
-		if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+		if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
 			return -EINVAL;
 
 		index -= VFIO_PCI_NUM_REGIONS;
-		return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+		return vdev->region[index].ops->rw(vgpu, buf, count,
 				ppos, is_write);
 	}
 
@@ -1224,7 +1275,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
 			gvt_vgpu_err("eventfd_ctx_fdget failed\n");
 			return PTR_ERR(trigger);
 		}
-		vgpu->vdev.msi_trigger = trigger;
+		kvmgt_vdev(vgpu)->msi_trigger = trigger;
 	} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
 		intel_vgpu_release_msi_eventfd_ctx(vgpu);
 
@@ -1276,6 +1327,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 			     unsigned long arg)
 {
 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 	unsigned long minsz;
 
 	gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1294,7 +1346,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 		info.flags = VFIO_DEVICE_FLAGS_PCI;
 		info.flags |= VFIO_DEVICE_FLAGS_RESET;
 		info.num_regions = VFIO_PCI_NUM_REGIONS +
-				vgpu->vdev.num_regions;
+				vdev->num_regions;
 		info.num_irqs = VFIO_PCI_NUM_IRQS;
 
 		return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1385,22 +1437,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
 					.header.version = 1 };
 
 				if (info.index >= VFIO_PCI_NUM_REGIONS +
-						vgpu->vdev.num_regions)
+						vdev->num_regions)
 					return -EINVAL;
 				info.index =
 					array_index_nospec(info.index,
 							VFIO_PCI_NUM_REGIONS +
-							vgpu->vdev.num_regions);
+							vdev->num_regions);
 
 				i = info.index - VFIO_PCI_NUM_REGIONS;
 
 				info.offset =
 					VFIO_PCI_INDEX_TO_OFFSET(info.index);
-				info.size = vgpu->vdev.region[i].size;
-				info.flags = vgpu->vdev.region[i].flags;
+				info.size = vdev->region[i].size;
+				info.flags = vdev->region[i].flags;
 
-				cap_type.type = vgpu->vdev.region[i].type;
-				cap_type.subtype = vgpu->vdev.region[i].subtype;
+				cap_type.type = vdev->region[i].type;
+				cap_type.subtype = vdev->region[i].subtype;
 
 				ret = vfio_info_add_capability(&caps,
 							&cap_type.header,
@@ -1597,12 +1649,10 @@ static struct mdev_parent_ops intel_vgpu_ops = {
 
 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
 {
-	struct attribute **kvm_type_attrs;
 	struct attribute_group **kvm_vgpu_type_groups;
 
 	intel_gvt_ops = ops;
-	if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
-			&kvm_vgpu_type_groups))
+	if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
 		return -EFAULT;
 	intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
 
@@ -1742,13 +1792,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 {
 	struct kvmgt_guest_info *info;
 	struct intel_vgpu *vgpu;
+	struct kvmgt_vdev *vdev;
 	struct kvm *kvm;
 
 	vgpu = mdev_get_drvdata(mdev);
 	if (handle_valid(vgpu->handle))
 		return -EEXIST;
 
-	kvm = vgpu->vdev.kvm;
+	vdev = kvmgt_vdev(vgpu);
+	kvm = vdev->kvm;
 	if (!kvm || kvm->mm != current->mm) {
 		gvt_vgpu_err("KVM is required to use Intel vGPU\n");
 		return -ESRCH;
@@ -1769,8 +1821,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 	kvmgt_protect_table_init(info);
 	gvt_cache_init(vgpu);
 
-	init_completion(&vgpu->vblank_done);
-
 	info->track_node.track_write = kvmgt_page_track_write;
 	info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
 	kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1778,7 +1828,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 	info->debugfs_cache_entries = debugfs_create_ulong(
 						"kvmgt_nr_cache_entries",
 						0444, vgpu->debugfs,
-						&vgpu->vdev.nr_cache_entries);
+						&vdev->nr_cache_entries);
 	return 0;
 }
 
@@ -1795,9 +1845,17 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 	return true;
 }
 
-static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
 {
-	/* nothing to do here */
+	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+	vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
+
+	if (!vgpu->vdev)
+		return -ENOMEM;
+
+	kvmgt_vdev(vgpu)->vgpu = vgpu;
+
 	return 0;
 }
 
@@ -1805,29 +1863,34 @@ static void kvmgt_detach_vgpu(void *p_vgpu)
 {
 	int i;
 	struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+	struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
 
-	if (!vgpu->vdev.region)
+	if (!vdev->region)
 		return;
 
-	for (i = 0; i < vgpu->vdev.num_regions; i++)
-		if (vgpu->vdev.region[i].ops->release)
-			vgpu->vdev.region[i].ops->release(vgpu,
-					&vgpu->vdev.region[i]);
-	vgpu->vdev.num_regions = 0;
-	kfree(vgpu->vdev.region);
-	vgpu->vdev.region = NULL;
+	for (i = 0; i < vdev->num_regions; i++)
+		if (vdev->region[i].ops->release)
+			vdev->region[i].ops->release(vgpu,
+					&vdev->region[i]);
+	vdev->num_regions = 0;
+	kfree(vdev->region);
+	vdev->region = NULL;
+
+	kfree(vdev);
 }
 
 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
 {
 	struct kvmgt_guest_info *info;
 	struct intel_vgpu *vgpu;
+	struct kvmgt_vdev *vdev;
 
 	if (!handle_valid(handle))
 		return -ESRCH;
 
 	info = (struct kvmgt_guest_info *)handle;
 	vgpu = info->vgpu;
+	vdev = kvmgt_vdev(vgpu);
 
 	/*
 	 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
@@ -1838,10 +1901,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
 	 * enabled by guest. so if msi_trigger is null, success is still
 	 * returned and don't inject interrupt into guest.
 	 */
-	if (vgpu->vdev.msi_trigger == NULL)
+	if (vdev->msi_trigger == NULL)
 		return 0;
 
-	if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+	if (eventfd_signal(vdev->msi_trigger, 1) == 1)
 		return 0;
 
 	return -EFAULT;
@@ -1867,26 +1930,26 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 		unsigned long size, dma_addr_t *dma_addr)
 {
-	struct kvmgt_guest_info *info;
 	struct intel_vgpu *vgpu;
+	struct kvmgt_vdev *vdev;
 	struct gvt_dma *entry;
 	int ret;
 
 	if (!handle_valid(handle))
 		return -EINVAL;
 
-	info = (struct kvmgt_guest_info *)handle;
-	vgpu = info->vgpu;
+	vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+	vdev = kvmgt_vdev(vgpu);
 
-	mutex_lock(&info->vgpu->vdev.cache_lock);
+	mutex_lock(&vdev->cache_lock);
 
-	entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+	entry = __gvt_cache_find_gfn(vgpu, gfn);
 	if (!entry) {
 		ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
 		if (ret)
 			goto err_unlock;
 
-		ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+		ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
 		if (ret)
 			goto err_unmap;
 	} else if (entry->size != size) {
@@ -1898,7 +1961,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 		if (ret)
 			goto err_unlock;
 
-		ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+		ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
 		if (ret)
 			goto err_unmap;
 	} else {
@@ -1906,19 +1969,20 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 		*dma_addr = entry->dma_addr;
 	}
 
-	mutex_unlock(&info->vgpu->vdev.cache_lock);
+	mutex_unlock(&vdev->cache_lock);
 	return 0;
 
 err_unmap:
 	gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
 err_unlock:
-	mutex_unlock(&info->vgpu->vdev.cache_lock);
+	mutex_unlock(&vdev->cache_lock);
 	return ret;
 }
 
 static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
 {
 	struct kvmgt_guest_info *info;
+	struct kvmgt_vdev *vdev;
 	struct gvt_dma *entry;
 	int ret = 0;
 
@@ -1926,14 +1990,15 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
 		return -ENODEV;
 
 	info = (struct kvmgt_guest_info *)handle;
+	vdev = kvmgt_vdev(info->vgpu);
 
-	mutex_lock(&info->vgpu->vdev.cache_lock);
+	mutex_lock(&vdev->cache_lock);
 	entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
 	if (entry)
 		kref_get(&entry->ref);
 	else
 		ret = -ENOMEM;
-	mutex_unlock(&info->vgpu->vdev.cache_lock);
+	mutex_unlock(&vdev->cache_lock);
 
 	return ret;
 }
@@ -1949,19 +2014,21 @@ static void __gvt_dma_release(struct kref *ref)
 
 static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
 {
-	struct kvmgt_guest_info *info;
+	struct intel_vgpu *vgpu;
+	struct kvmgt_vdev *vdev;
 	struct gvt_dma *entry;
 
 	if (!handle_valid(handle))
 		return;
 
-	info = (struct kvmgt_guest_info *)handle;
+	vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+	vdev = kvmgt_vdev(vgpu);
 
-	mutex_lock(&info->vgpu->vdev.cache_lock);
-	entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+	mutex_lock(&vdev->cache_lock);
+	entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
 	if (entry)
 		kref_put(&entry->ref, __gvt_dma_release);
-	mutex_unlock(&info->vgpu->vdev.cache_lock);
+	mutex_unlock(&vdev->cache_lock);
 }
 
 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a551788..2919936 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -103,6 +103,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
 		void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *i915 = gvt->gt->i915;
 	unsigned int offset = 0;
 	int ret = -EINVAL;
 
@@ -114,15 +115,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
 
 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
-	if (WARN_ON(bytes > 8))
+	if (drm_WARN_ON(&i915->drm, bytes > 8))
 		goto err;
 
 	if (reg_is_gtt(gvt, offset)) {
-		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+				!IS_ALIGNED(offset, 8)))
 			goto err;
-		if (WARN_ON(bytes != 4 && bytes != 8))
+		if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
 			goto err;
-		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+		if (drm_WARN_ON(&i915->drm,
+				!reg_is_gtt(gvt, offset + bytes - 1)))
 			goto err;
 
 		ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
@@ -132,16 +135,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
 		goto out;
 	}
 
-	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+	if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
 		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
 		goto out;
 	}
 
-	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+	if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1)))
 		goto err;
 
 	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
-		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes)))
 			goto err;
 	}
 
@@ -175,6 +178,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
 		void *p_data, unsigned int bytes)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *i915 = gvt->gt->i915;
 	unsigned int offset = 0;
 	int ret = -EINVAL;
 
@@ -187,15 +191,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
 
 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
-	if (WARN_ON(bytes > 8))
+	if (drm_WARN_ON(&i915->drm, bytes > 8))
 		goto err;
 
 	if (reg_is_gtt(gvt, offset)) {
-		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+		if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+				!IS_ALIGNED(offset, 8)))
 			goto err;
-		if (WARN_ON(bytes != 4 && bytes != 8))
+		if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
 			goto err;
-		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+		if (drm_WARN_ON(&i915->drm,
+				!reg_is_gtt(gvt, offset + bytes - 1)))
 			goto err;
 
 		ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
@@ -205,7 +211,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
 		goto out;
 	}
 
-	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+	if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
 		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
 		goto out;
 	}
@@ -245,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 		/* set the bit 0:2(Core C-State ) to C0 */
 		vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
 
-		if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+		if (IS_BROXTON(vgpu->gvt->gt->i915)) {
 			vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
 				    ~(BIT(0) | BIT(1));
 			vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 2e68f4b..cc48126 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info {
 	struct hlist_node node;
 };
 
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
-		unsigned int reg);
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
 unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
 bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
 
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index aaf1591..2ccaf78 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
 	[VECS0] = 0xcb00,
 };
 
-static void load_render_mocs(struct drm_i915_private *dev_priv)
+static void load_render_mocs(const struct intel_engine_cs *engine)
 {
-	struct intel_gvt *gvt = dev_priv->gvt;
-	i915_reg_t offset;
+	struct intel_gvt *gvt = engine->i915->gvt;
+	struct intel_uncore *uncore = engine->uncore;
 	u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
 	u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
+	i915_reg_t offset;
 	int ring_id, i;
 
 	/* Platform doesn't have mocs mmios. */
@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
 		return;
 
 	for (ring_id = 0; ring_id < cnt; ring_id++) {
-		if (!HAS_ENGINE(dev_priv, ring_id))
+		if (!HAS_ENGINE(engine->i915, ring_id))
 			continue;
+
 		offset.reg = regs[ring_id];
 		for (i = 0; i < GEN9_MOCS_SIZE; i++) {
 			gen9_render_mocs.control_table[ring_id][i] =
-				I915_READ_FW(offset);
+				intel_uncore_read_fw(uncore, offset);
 			offset.reg += 4;
 		}
 	}
@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
 	offset.reg = 0xb020;
 	for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
 		gen9_render_mocs.l3cc_table[i] =
-			I915_READ_FW(offset);
+			intel_uncore_read_fw(uncore, offset);
 		offset.reg += 4;
 	}
 	gen9_render_mocs.initialized = true;
@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
 	*cs++ = MI_LOAD_REGISTER_IMM(count);
 	for (mmio = gvt->engine_mmio_list.mmio;
 	     i915_mmio_reg_valid(mmio->reg); mmio++) {
-		if (mmio->ring_id != ring_id ||
-		    !mmio->in_context)
+		if (mmio->id != ring_id || !mmio->in_context)
 			continue;
 
 		*cs++ = i915_mmio_reg_offset(mmio->reg);
-		*cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
-				(mmio->mask << 16);
+		*cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
 		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
 			      *(cs-2), *(cs-1), vgpu->id, ring_id);
 	}
@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = {
 	[VECS0] = 0x4270,
 };
 
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
+				     const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	struct intel_uncore *uncore = &dev_priv->uncore;
+	struct intel_uncore *uncore = engine->uncore;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
 	u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	if (!regs)
 		return;
 
-	if (WARN_ON(ring_id >= cnt))
+	if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
 		return;
 
-	if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+	if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
 		return;
 
-	reg = _MMIO(regs[ring_id]);
+	reg = _MMIO(regs[engine->id]);
 
 	/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
 	 * we need to put a forcewake when invalidating RCS TLB caches,
@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
 	 */
 	fw = intel_uncore_forcewake_for_reg(uncore, reg,
 					    FW_REG_READ | FW_REG_WRITE);
-	if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
+	if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
 		fw |= FORCEWAKE_RENDER;
 
 	intel_uncore_forcewake_get(uncore, fw);
 
 	intel_uncore_write_fw(uncore, reg, 0x1);
 
-	if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
-		gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+	if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
+		gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
+			     engine->name);
 	else
 		vgpu_vreg_t(vgpu, reg) = 0;
 
 	intel_uncore_forcewake_put(uncore, fw);
 
-	gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+	gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
 }
 
 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
-			int ring_id)
+			const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv;
-	i915_reg_t offset, l3_offset;
-	u32 old_v, new_v;
-
 	u32 regs[] = {
 		[RCS0]  = 0xc800,
 		[VCS0]  = 0xc900,
@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 		[BCS0]  = 0xcc00,
 		[VECS0] = 0xcb00,
 	};
+	struct intel_uncore *uncore = engine->uncore;
+	i915_reg_t offset, l3_offset;
+	u32 old_v, new_v;
 	int i;
 
-	dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+	if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
 		return;
 
-	if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
+	if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
 		return;
 
 	if (!pre && !gen9_render_mocs.initialized)
-		load_render_mocs(dev_priv);
+		load_render_mocs(engine);
 
-	offset.reg = regs[ring_id];
+	offset.reg = regs[engine->id];
 	for (i = 0; i < GEN9_MOCS_SIZE; i++) {
 		if (pre)
 			old_v = vgpu_vreg_t(pre, offset);
 		else
-			old_v = gen9_render_mocs.control_table[ring_id][i];
+			old_v = gen9_render_mocs.control_table[engine->id][i];
 		if (next)
 			new_v = vgpu_vreg_t(next, offset);
 		else
-			new_v = gen9_render_mocs.control_table[ring_id][i];
+			new_v = gen9_render_mocs.control_table[engine->id][i];
 
 		if (old_v != new_v)
-			I915_WRITE_FW(offset, new_v);
+			intel_uncore_write_fw(uncore, offset, new_v);
 
 		offset.reg += 4;
 	}
 
-	if (ring_id == RCS0) {
+	if (engine->id == RCS0) {
 		l3_offset.reg = 0xb020;
 		for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
 			if (pre)
@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 				new_v = gen9_render_mocs.l3cc_table[i];
 
 			if (old_v != new_v)
-				I915_WRITE_FW(l3_offset, new_v);
+				intel_uncore_write_fw(uncore, l3_offset, new_v);
 
 			l3_offset.reg += 4;
 		}
@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce)
 /* Switch ring mmio values (context). */
 static void switch_mmio(struct intel_vgpu *pre,
 			struct intel_vgpu *next,
-			int ring_id)
+			const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv;
+	struct intel_uncore *uncore = engine->uncore;
 	struct intel_vgpu_submission *s;
 	struct engine_mmio *mmio;
 	u32 old_v, new_v;
 
-	dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-	if (INTEL_GEN(dev_priv) >= 9)
-		switch_mocs(pre, next, ring_id);
+	if (INTEL_GEN(engine->i915) >= 9)
+		switch_mocs(pre, next, engine);
 
-	for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+	for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
 	     i915_mmio_reg_valid(mmio->reg); mmio++) {
-		if (mmio->ring_id != ring_id)
+		if (mmio->id != engine->id)
 			continue;
 		/*
 		 * No need to do save or restore of the mmio which is in context
 		 * state image on gen9, it's initialized by lri command and
 		 * save or restore with context together.
 		 */
-		if (IS_GEN(dev_priv, 9) && mmio->in_context)
+		if (IS_GEN(engine->i915, 9) && mmio->in_context)
 			continue;
 
 		// save
 		if (pre) {
-			vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+			vgpu_vreg_t(pre, mmio->reg) =
+				intel_uncore_read_fw(uncore, mmio->reg);
 			if (mmio->mask)
 				vgpu_vreg_t(pre, mmio->reg) &=
-						~(mmio->mask << 16);
+					~(mmio->mask << 16);
 			old_v = vgpu_vreg_t(pre, mmio->reg);
-		} else
-			old_v = mmio->value = I915_READ_FW(mmio->reg);
+		} else {
+			old_v = mmio->value =
+				intel_uncore_read_fw(uncore, mmio->reg);
+		}
 
 		// restore
 		if (next) {
@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre,
 			 * itself.
 			 */
 			if (mmio->in_context &&
-			    !is_inhibit_context(s->shadow[ring_id]))
+			    !is_inhibit_context(s->shadow[engine->id]))
 				continue;
 
 			if (mmio->mask)
 				new_v = vgpu_vreg_t(next, mmio->reg) |
-							(mmio->mask << 16);
+					(mmio->mask << 16);
 			else
 				new_v = vgpu_vreg_t(next, mmio->reg);
 		} else {
@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre,
 				new_v = mmio->value;
 		}
 
-		I915_WRITE_FW(mmio->reg, new_v);
+		intel_uncore_write_fw(uncore, mmio->reg, new_v);
 
 		trace_render_mmio(pre ? pre->id : 0,
 				  next ? next->id : 0,
@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre,
 	}
 
 	if (next)
-		handle_tlb_pending_event(next, ring_id);
+		handle_tlb_pending_event(next, engine);
 }
 
 /**
  * intel_gvt_switch_render_mmio - switch mmio context of specific engine
  * @pre: the last vGPU that own the engine
  * @next: the vGPU to switch to
- * @ring_id: specify the engine
+ * @engine: the engine
  *
  * If pre is null indicates that host own the engine. If next is null
  * indicates that we are switching to host workload.
  */
 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
-			   struct intel_vgpu *next, int ring_id)
+			   struct intel_vgpu *next,
+			   const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv;
-
-	if (WARN_ON(!pre && !next))
+	if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
+		 engine->name))
 		return;
 
-	gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+	gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
 		       pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
 
-	dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
 	/**
 	 * We are using raw mmio access wrapper to improve the
 	 * performace for batch mmio read/write, so we need
 	 * handle forcewake mannually.
 	 */
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-	switch_mmio(pre, next, ring_id);
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+	switch_mmio(pre, next, engine);
+	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
 }
 
 /**
@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 {
 	struct engine_mmio *mmio;
 
-	if (INTEL_GEN(gvt->dev_priv) >= 9) {
+	if (INTEL_GEN(gvt->gt->i915) >= 9) {
 		gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
 		gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
 		gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 	for (mmio = gvt->engine_mmio_list.mmio;
 	     i915_mmio_reg_valid(mmio->reg); mmio++) {
 		if (mmio->in_context) {
-			gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+			gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
 			intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index f7eaa44..970704b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -37,7 +37,7 @@
 #define __GVT_RENDER_H__
 
 struct engine_mmio {
-	int ring_id;
+	enum intel_engine_id id;
 	i915_reg_t reg;
 	u32 mask;
 	bool in_context;
@@ -45,7 +45,8 @@ struct engine_mmio {
 };
 
 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
-			   struct intel_vgpu *next, int ring_id);
+			   struct intel_vgpu *next,
+			   const struct intel_engine_cs *engine);
 
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 2369d4a..036b74f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
 	enum intel_engine_id i;
 	struct intel_engine_cs *engine;
 
-	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
-		if (!list_empty(workload_q_head(vgpu, i)))
+	for_each_engine(engine, vgpu->gvt->gt, i) {
+		if (!list_empty(workload_q_head(vgpu, engine)))
 			return true;
 	}
 
@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 	scheduler->need_reschedule = true;
 
 	/* still have uncompleted workload? */
-	for_each_engine(engine, gvt->dev_priv, i) {
-		if (scheduler->current_workload[i])
+	for_each_engine(engine, gvt->gt, i) {
+		if (scheduler->current_workload[engine->id])
 			return;
 	}
 
@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 	scheduler->need_reschedule = false;
 
 	/* wake up workload dispatch thread */
-	for_each_engine(engine, gvt->dev_priv, i)
-		wake_up(&scheduler->waitq[i]);
+	for_each_engine(engine, gvt->gt, i)
+		wake_up(&scheduler->waitq[engine->id]);
 }
 
 static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt_workload_scheduler *scheduler =
 		&vgpu->gvt->scheduler;
-	int ring_id;
 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 
 	if (!vgpu_data->active)
 		return;
@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 
 	intel_runtime_pm_get(&dev_priv->runtime_pm);
 	spin_lock_bh(&scheduler->mmio_context_lock);
-	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
-		if (scheduler->engine_owner[ring_id] == vgpu) {
-			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
-			scheduler->engine_owner[ring_id] = NULL;
+	for_each_engine(engine, vgpu->gvt->gt, id) {
+		if (scheduler->engine_owner[engine->id] == vgpu) {
+			intel_gvt_switch_mmio(vgpu, NULL, engine);
+			scheduler->engine_owner[engine->id] = NULL;
 		}
 	}
 	spin_unlock_bh(&scheduler->mmio_context_lock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 685d1e0..1c95bf8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 static void sr_oa_regs(struct intel_vgpu_workload *workload,
 		u32 *reg_state, bool save)
 {
-	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
 	u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
 	u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
 	int i = 0;
@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
 		i915_mmio_reg_offset(EU_PERF_CNTL6),
 	};
 
-	if (workload->ring_id != RCS0)
+	if (workload->engine->id != RCS0)
 		return;
 
 	if (save) {
@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
-	int ring_id = workload->ring_id;
 	struct drm_i915_gem_object *ctx_obj =
 		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	COPY_REG_MASKED(ctx_ctrl);
 	COPY_REG(ctx_timestamp);
 
-	if (ring_id == RCS0) {
+	if (workload->engine->id == RCS0) {
 		COPY_REG(bb_per_ctx_ptr);
 		COPY_REG(rcs_indirect_ctx);
 		COPY_REG(rcs_indirect_ctx_offset);
@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
 		return 0;
 
-	gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-			workload->ctx_desc.lrca);
+	gvt_dbg_sched("ring %s workload lrca %x",
+		      workload->engine->name,
+		      workload->ctx_desc.lrca);
 
-	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
+	context_page_num = workload->engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
-	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+	if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
 		context_page_num = 19;
 
 	i = 2;
@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq)
 	return intel_context_force_single_submission(rq->context);
 }
 
-static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+static void save_ring_hw_state(struct intel_vgpu *vgpu,
+			       const struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+	struct intel_uncore *uncore = engine->uncore;
 	i915_reg_t reg;
 
-	reg = RING_INSTDONE(ring_base);
-	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
-	reg = RING_ACTHD(ring_base);
-	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
-	reg = RING_ACTHD_UDW(ring_base);
-	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+	reg = RING_INSTDONE(engine->mmio_base);
+	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+		intel_uncore_read(uncore, reg);
+
+	reg = RING_ACTHD(engine->mmio_base);
+	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+		intel_uncore_read(uncore, reg);
+
+	reg = RING_ACTHD_UDW(engine->mmio_base);
+	vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+		intel_uncore_read(uncore, reg);
 }
 
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
-	struct i915_request *req = data;
+	struct i915_request *rq = data;
 	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
-				shadow_ctx_notifier_block[req->engine->id]);
+				shadow_ctx_notifier_block[rq->engine->id]);
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	enum intel_engine_id ring_id = req->engine->id;
+	enum intel_engine_id ring_id = rq->engine->id;
 	struct intel_vgpu_workload *workload;
 	unsigned long flags;
 
-	if (!is_gvt_request(req)) {
+	if (!is_gvt_request(rq)) {
 		spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
 		if (action == INTEL_CONTEXT_SCHEDULE_IN &&
 		    scheduler->engine_owner[ring_id]) {
 			/* Switch ring from vGPU to host. */
 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
-					      NULL, ring_id);
+					      NULL, rq->engine);
 			scheduler->engine_owner[ring_id] = NULL;
 		}
 		spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		if (workload->vgpu != scheduler->engine_owner[ring_id]) {
 			/* Switch ring from host to vGPU or vGPU to vGPU. */
 			intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
-					      workload->vgpu, ring_id);
+					      workload->vgpu, rq->engine);
 			scheduler->engine_owner[ring_id] = workload->vgpu;
 		} else
 			gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
 		atomic_set(&workload->shadow_ctx_active, 1);
 		break;
 	case INTEL_CONTEXT_SCHEDULE_OUT:
-		save_ring_hw_state(workload->vgpu, ring_id);
+		save_ring_hw_state(workload->vgpu, rq->engine);
 		atomic_set(&workload->shadow_ctx_active, 0);
 		break;
 	case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
-		save_ring_hw_state(workload->vgpu, ring_id);
+		save_ring_hw_state(workload->vgpu, rq->engine);
 		break;
 	default:
 		WARN_ON(1);
@@ -391,7 +395,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
 	if (workload->req)
 		return 0;
 
-	rq = i915_request_create(s->shadow[workload->ring_id]);
+	rq = i915_request_create(s->shadow[workload->engine->id]);
 	if (IS_ERR(rq)) {
 		gvt_vgpu_err("fail to allocate gem request\n");
 		return PTR_ERR(rq);
@@ -420,15 +424,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	if (workload->shadow)
 		return 0;
 
-	if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
-		shadow_context_descriptor_update(s->shadow[workload->ring_id],
+	if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
+		shadow_context_descriptor_update(s->shadow[workload->engine->id],
 						 workload);
 
 	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
 	if (ret)
 		return ret;
 
-	if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
+	if (workload->engine->id == RCS0 &&
+	    workload->wa_ctx.indirect_ctx.size) {
 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
 		if (ret)
 			goto err_shadow;
@@ -436,6 +441,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 
 	workload->shadow = true;
 	return 0;
+
 err_shadow:
 	release_shadow_wa_ctx(&workload->wa_ctx);
 	return ret;
@@ -567,12 +573,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 
 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
 {
-	struct intel_vgpu *vgpu = workload->vgpu;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	u32 ring_base;
-
-	ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
-	vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+	vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
+		workload->rb_start;
 }
 
 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -608,7 +610,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_vgpu_submission *s = &vgpu->submission;
-	int ring = workload->ring_id;
 	int ret = 0;
 
 	ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -625,7 +626,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 
 	update_shadow_pdps(workload);
 
-	set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
+	set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
 
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	if (ret) {
@@ -677,11 +678,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct i915_request *rq;
-	int ring_id = workload->ring_id;
 	int ret;
 
-	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
-		ring_id, workload);
+	gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
+		      workload->engine->name, workload);
 
 	mutex_lock(&vgpu->vgpu_lock);
 
@@ -710,8 +710,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 	}
 
 	if (!IS_ERR_OR_NULL(workload->req)) {
-		gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
-				ring_id, workload->req);
+		gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
+			      workload->engine->name, workload->req);
 		i915_request_add(workload->req);
 		workload->dispatched = true;
 	}
@@ -722,8 +722,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 	return ret;
 }
 
-static struct intel_vgpu_workload *pick_next_workload(
-		struct intel_gvt *gvt, int ring_id)
+static struct intel_vgpu_workload *
+pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload = NULL;
@@ -735,27 +735,27 @@ static struct intel_vgpu_workload *pick_next_workload(
 	 * bail out
 	 */
 	if (!scheduler->current_vgpu) {
-		gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+		gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
 		goto out;
 	}
 
 	if (scheduler->need_reschedule) {
-		gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+		gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
 		goto out;
 	}
 
 	if (!scheduler->current_vgpu->active ||
-	    list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+	    list_empty(workload_q_head(scheduler->current_vgpu, engine)))
 		goto out;
 
 	/*
 	 * still have current workload, maybe the workload disptacher
 	 * fail to submit it for some reason, resubmit it.
 	 */
-	if (scheduler->current_workload[ring_id]) {
-		workload = scheduler->current_workload[ring_id];
-		gvt_dbg_sched("ring id %d still have current workload %p\n",
-				ring_id, workload);
+	if (scheduler->current_workload[engine->id]) {
+		workload = scheduler->current_workload[engine->id];
+		gvt_dbg_sched("ring %s still have current workload %p\n",
+			      engine->name, workload);
 		goto out;
 	}
 
@@ -765,13 +765,14 @@ static struct intel_vgpu_workload *pick_next_workload(
 	 * will wait the current workload is finished when trying to
 	 * schedule out a vgpu.
 	 */
-	scheduler->current_workload[ring_id] = container_of(
-			workload_q_head(scheduler->current_vgpu, ring_id)->next,
-			struct intel_vgpu_workload, list);
+	scheduler->current_workload[engine->id] =
+		list_first_entry(workload_q_head(scheduler->current_vgpu,
+						 engine),
+				 struct intel_vgpu_workload, list);
 
-	workload = scheduler->current_workload[ring_id];
+	workload = scheduler->current_workload[engine->id];
 
-	gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+	gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
 
 	atomic_inc(&workload->vgpu->submission.running_workload_num);
 out:
@@ -783,14 +784,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 {
 	struct i915_request *rq = workload->req;
 	struct intel_vgpu *vgpu = workload->vgpu;
-	struct intel_gvt *gvt = vgpu->gvt;
 	struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *src;
 	unsigned long context_gpa, context_page_num;
 	int i;
-	struct drm_i915_private *dev_priv = gvt->dev_priv;
 	u32 ring_base;
 	u32 head, tail;
 	u16 wrap_count;
@@ -811,14 +810,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 
 	head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
 
-	ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+	ring_base = rq->engine->mmio_base;
 	vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
 	vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
 
 	context_page_num = rq->engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
-	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+	if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
 		context_page_num = 19;
 
 	i = 2;
@@ -869,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
 				intel_engine_mask_t engine_mask)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 	struct intel_engine_cs *engine;
 	struct intel_vgpu_workload *pos, *n;
 	intel_engine_mask_t tmp;
@@ -966,54 +965,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 	mutex_unlock(&vgpu->vgpu_lock);
 }
 
-struct workload_thread_param {
-	struct intel_gvt *gvt;
-	int ring_id;
-};
-
-static int workload_thread(void *priv)
+static int workload_thread(void *arg)
 {
-	struct workload_thread_param *p = (struct workload_thread_param *)priv;
-	struct intel_gvt *gvt = p->gvt;
-	int ring_id = p->ring_id;
+	struct intel_engine_cs *engine = arg;
+	const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+	struct intel_gvt *gvt = engine->i915->gvt;
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload = NULL;
 	struct intel_vgpu *vgpu = NULL;
 	int ret;
-	bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
-	struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
 
-	kfree(p);
-
-	gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+	gvt_dbg_core("workload thread for ring %s started\n", engine->name);
 
 	while (!kthread_should_stop()) {
-		add_wait_queue(&scheduler->waitq[ring_id], &wait);
+		intel_wakeref_t wakeref;
+
+		add_wait_queue(&scheduler->waitq[engine->id], &wait);
 		do {
-			workload = pick_next_workload(gvt, ring_id);
+			workload = pick_next_workload(gvt, engine);
 			if (workload)
 				break;
 			wait_woken(&wait, TASK_INTERRUPTIBLE,
 				   MAX_SCHEDULE_TIMEOUT);
 		} while (!kthread_should_stop());
-		remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+		remove_wait_queue(&scheduler->waitq[engine->id], &wait);
 
 		if (!workload)
 			break;
 
-		gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
-				workload->ring_id, workload,
-				workload->vgpu->id);
+		gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
+			      engine->name, workload,
+			      workload->vgpu->id);
 
-		intel_runtime_pm_get(rpm);
+		wakeref = intel_runtime_pm_get(engine->uncore->rpm);
 
-		gvt_dbg_sched("ring id %d will dispatch workload %p\n",
-				workload->ring_id, workload);
+		gvt_dbg_sched("ring %s will dispatch workload %p\n",
+			      engine->name, workload);
 
 		if (need_force_wake)
-			intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
-					FORCEWAKE_ALL);
+			intel_uncore_forcewake_get(engine->uncore,
+						   FORCEWAKE_ALL);
 		/*
 		 * Update the vReg of the vGPU which submitted this
 		 * workload. The vGPU may use these registers for checking
@@ -1030,21 +1022,21 @@ static int workload_thread(void *priv)
 			goto complete;
 		}
 
-		gvt_dbg_sched("ring id %d wait workload %p\n",
-				workload->ring_id, workload);
+		gvt_dbg_sched("ring %s wait workload %p\n",
+			      engine->name, workload);
 		i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
 
 complete:
 		gvt_dbg_sched("will complete workload %p, status: %d\n",
-				workload, workload->status);
+			      workload, workload->status);
 
-		complete_current_workload(gvt, ring_id);
+		complete_current_workload(gvt, engine->id);
 
 		if (need_force_wake)
-			intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
-					FORCEWAKE_ALL);
+			intel_uncore_forcewake_put(engine->uncore,
+						   FORCEWAKE_ALL);
 
-		intel_runtime_pm_put_unchecked(rpm);
+		intel_runtime_pm_put(engine->uncore->rpm, wakeref);
 		if (ret && (vgpu_is_vm_unhealthy(ret)))
 			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
 	}
@@ -1073,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 
 	gvt_dbg_core("clean workload scheduler\n");
 
-	for_each_engine(engine, gvt->dev_priv, i) {
+	for_each_engine(engine, gvt->gt, i) {
 		atomic_notifier_chain_unregister(
 					&engine->context_status_notifier,
 					&gvt->shadow_ctx_notifier_block[i]);
@@ -1084,7 +1076,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	struct workload_thread_param *param = NULL;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id i;
 	int ret;
@@ -1093,20 +1084,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 
 	init_waitqueue_head(&scheduler->workload_complete_wq);
 
-	for_each_engine(engine, gvt->dev_priv, i) {
+	for_each_engine(engine, gvt->gt, i) {
 		init_waitqueue_head(&scheduler->waitq[i]);
 
-		param = kzalloc(sizeof(*param), GFP_KERNEL);
-		if (!param) {
-			ret = -ENOMEM;
-			goto err;
-		}
-
-		param->gvt = gvt;
-		param->ring_id = i;
-
-		scheduler->thread[i] = kthread_run(workload_thread, param,
-			"gvt workload %d", i);
+		scheduler->thread[i] = kthread_run(workload_thread, engine,
+						   "gvt:%s", engine->name);
 		if (IS_ERR(scheduler->thread[i])) {
 			gvt_err("fail to create workload thread\n");
 			ret = PTR_ERR(scheduler->thread[i]);
@@ -1118,11 +1100,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 		atomic_notifier_chain_register(&engine->context_status_notifier,
 					&gvt->shadow_ctx_notifier_block[i]);
 	}
+
 	return 0;
+
 err:
 	intel_gvt_clean_workload_scheduler(gvt);
-	kfree(param);
-	param = NULL;
 	return ret;
 }
 
@@ -1160,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
 	intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
 
 	i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
-	for_each_engine(engine, vgpu->gvt->dev_priv, id)
+	for_each_engine(engine, vgpu->gvt->gt, id)
 		intel_context_unpin(s->shadow[id]);
 
 	kmem_cache_destroy(s->workloads);
@@ -1217,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
  */
 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 {
-	struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_engine_cs *engine;
 	struct i915_ppgtt *ppgtt;
@@ -1230,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 
 	i915_context_ppgtt_root_save(s, ppgtt);
 
-	for_each_engine(engine, i915, i) {
+	for_each_engine(engine, vgpu->gvt->gt, i) {
 		struct intel_context *ce;
 
 		INIT_LIST_HEAD(&s->workload_q_head[i]);
@@ -1246,7 +1228,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		ce->vm = i915_vm_get(&ppgtt->vm);
 		intel_context_set_single_submission(ce);
 
-		if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+		/* Max ring buffer size */
+		if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
 			const unsigned int ring_size = 512 * SZ_4K;
 
 			ce->ring = __intel_context_ring_size(ring_size);
@@ -1282,7 +1265,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 
 out_shadow_ctx:
 	i915_context_ppgtt_root_restore(s, ppgtt);
-	for_each_engine(engine, i915, i) {
+	for_each_engine(engine, vgpu->gvt->gt, i) {
 		if (IS_ERR(s->shadow[i]))
 			break;
 
@@ -1309,6 +1292,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
 				     intel_engine_mask_t engine_mask,
 				     unsigned int interface)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	const struct intel_vgpu_submission_ops *ops[] = {
 		[INTEL_VGPU_EXECLIST_SUBMISSION] =
@@ -1316,10 +1300,11 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
 	};
 	int ret;
 
-	if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+	if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
 		return -EINVAL;
 
-	if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+	if (drm_WARN_ON(&i915->drm,
+			interface == 0 && engine_mask != ALL_ENGINES))
 		return -EINVAL;
 
 	if (s->active)
@@ -1441,7 +1426,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
 /**
  * intel_vgpu_create_workload - create a vGPU workload
  * @vgpu: a vGPU
- * @ring_id: ring index
+ * @engine: the engine
  * @desc: a guest context descriptor
  *
  * This function is called when creating a vGPU workload.
@@ -1452,14 +1437,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
  *
  */
 struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+			   const struct intel_engine_cs *engine,
 			   struct execlist_ctx_descriptor_format *desc)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct list_head *q = workload_q_head(vgpu, ring_id);
+	struct list_head *q = workload_q_head(vgpu, engine);
 	struct intel_vgpu_workload *last_workload = NULL;
 	struct intel_vgpu_workload *workload = NULL;
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	u64 ring_context_gpa;
 	u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
 	u32 guest_head;
@@ -1486,10 +1471,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	list_for_each_entry_reverse(last_workload, q, list) {
 
 		if (same_context(&last_workload->ctx_desc, desc)) {
-			gvt_dbg_el("ring id %d cur workload == last\n",
-					ring_id);
+			gvt_dbg_el("ring %s cur workload == last\n",
+				   engine->name);
 			gvt_dbg_el("ctx head %x real head %lx\n", head,
-					last_workload->rb_tail);
+				   last_workload->rb_tail);
 			/*
 			 * cannot use guest context head pointer here,
 			 * as it might not be updated at this time
@@ -1499,7 +1484,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 		}
 	}
 
-	gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+	gvt_dbg_el("ring %s begin a new workload\n", engine->name);
 
 	/* record some ring buffer register values for scan and shadow */
 	intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1519,7 +1504,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	if (IS_ERR(workload))
 		return workload;
 
-	workload->ring_id = ring_id;
+	workload->engine = engine;
 	workload->ctx_desc = *desc;
 	workload->ring_context_gpa = ring_context_gpa;
 	workload->rb_head = head;
@@ -1528,7 +1513,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	workload->rb_start = start;
 	workload->rb_ctl = ctl;
 
-	if (ring_id == RCS0) {
+	if (engine->id == RCS0) {
 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
 			RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
 		intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1566,8 +1551,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 		}
 	}
 
-	gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
-			workload, ring_id, head, tail, start, ctl);
+	gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
+		   workload, engine->name, head, tail, start, ctl);
 
 	ret = prepare_mm(workload);
 	if (ret) {
@@ -1578,10 +1563,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 	/* Only scan and shadow the first workload in the queue
 	 * as there is only one pre-allocated buf-obj for shadow.
 	 */
-	if (list_empty(workload_q_head(vgpu, ring_id))) {
-		intel_runtime_pm_get(&dev_priv->runtime_pm);
-		ret = intel_gvt_scan_and_shadow_workload(workload);
-		intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+	if (list_empty(q)) {
+		intel_wakeref_t wakeref;
+
+		with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
+			ret = intel_gvt_scan_and_shadow_workload(workload);
 	}
 
 	if (ret) {
@@ -1601,7 +1587,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
 {
 	list_add_tail(&workload->list,
-		workload_q_head(workload->vgpu, workload->ring_id));
+		      workload_q_head(workload->vgpu, workload->engine));
 	intel_gvt_kick_schedule(workload->vgpu->gvt);
-	wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+	wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
 }
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c50d14a..bf7fc0ca 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx {
 
 struct intel_vgpu_workload {
 	struct intel_vgpu *vgpu;
-	int ring_id;
+	const struct intel_engine_cs *engine;
 	struct i915_request *req;
 	/* if this workload has been dispatched to i915? */
 	bool dispatched;
@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb {
 	bool ppgtt;
 };
 
-#define workload_q_head(vgpu, ring_id) \
-	(&(vgpu->submission.workload_q_head[ring_id]))
+#define workload_q_head(vgpu, e) \
+	(&(vgpu)->submission.workload_q_head[(e)->id])
 
 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
 
@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops
 intel_vgpu_execlist_submission_ops;
 
 struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+			   const struct intel_engine_cs *engine,
 			   struct execlist_ctx_descriptor_format *desc);
 
 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 345c2aa..1d5ff88 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -37,6 +37,7 @@
 
 void populate_pvinfo_page(struct intel_vgpu *vgpu)
 {
+	struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
 	/* setup the ballooning information */
 	vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
 	vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
@@ -69,7 +70,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 		vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
 	gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
 
-	WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+	drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 }
 
 #define VGPU_MAX_WEIGHT 16
@@ -148,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
 		gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
 						   high_avail / vgpu_types[i].high_mm);
 
-		if (IS_GEN(gvt->dev_priv, 8))
+		if (IS_GEN(gvt->gt->i915, 8))
 			sprintf(gvt->types[i].name, "GVTg_V4_%s",
-						vgpu_types[i].name);
-		else if (IS_GEN(gvt->dev_priv, 9))
+				vgpu_types[i].name);
+		else if (IS_GEN(gvt->gt->i915, 9))
 			sprintf(gvt->types[i].name, "GVTg_V5_%s",
-						vgpu_types[i].name);
+				vgpu_types[i].name);
 
 		gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
 			     i, gvt->types[i].name,
@@ -271,8 +272,9 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
+	struct drm_i915_private *i915 = gvt->gt->i915;
 
-	WARN(vgpu->active, "vGPU is still active!\n");
+	drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
 
 	/*
 	 * remove idr first so later clean can judge if need to stop
@@ -432,9 +434,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
 	if (ret)
 		goto out_clean_sched_policy;
 
-	/*TODO: add more platforms support */
-	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
-		ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+	ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
 	if (ret)
 		goto out_clean_sched_policy;
 
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index b0a4997..c404862 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,6 +7,7 @@
 #include <linux/debugobjects.h>
 
 #include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_pm.h"
 #include "gt/intel_ring.h"
 
@@ -390,13 +391,23 @@ int i915_active_ref(struct i915_active *ref,
 	return err;
 }
 
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
 {
+	struct dma_fence *prev;
+
 	/* We expect the caller to manage the exclusive timeline ordering */
 	GEM_BUG_ON(i915_active_is_idle(ref));
 
-	if (!__i915_active_fence_set(&ref->excl, f))
+	rcu_read_lock();
+	prev = __i915_active_fence_set(&ref->excl, f);
+	if (prev)
+		prev = dma_fence_get_rcu(prev);
+	else
 		atomic_inc(&ref->count);
+	rcu_read_unlock();
+
+	return prev;
 }
 
 bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -442,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active)
 {
 	struct dma_fence *fence;
 
+	if (unlikely(is_barrier(active)))
+		return;
+
 	fence = i915_active_fence_get(active);
 	if (!fence)
 		return;
@@ -450,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active)
 	dma_fence_put(fence);
 }
 
-int i915_active_wait(struct i915_active *ref)
+static int flush_barrier(struct active_node *it)
+{
+	struct intel_engine_cs *engine;
+
+	if (likely(!is_barrier(&it->base)))
+		return 0;
+
+	engine = __barrier_to_engine(it);
+	smp_rmb(); /* serialise with add_active_barriers */
+	if (!is_barrier(&it->base))
+		return 0;
+
+	return intel_engine_flush_barriers(engine);
+}
+
+static int flush_lazy_signals(struct i915_active *ref)
 {
 	struct active_node *it, *n;
 	int err = 0;
 
+	enable_signaling(&ref->excl);
+	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+		err = flush_barrier(it); /* unconnected idle barrier? */
+		if (err)
+			break;
+
+		enable_signaling(&it->base);
+	}
+
+	return err;
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+	int err;
+
 	might_sleep();
 
 	if (!i915_active_acquire_if_busy(ref))
 		return 0;
 
-	/* Flush lazy signals */
-	enable_signaling(&ref->excl);
-	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-		if (is_barrier(&it->base)) /* unconnected idle barrier */
-			continue;
-
-		enable_signaling(&it->base);
-	}
 	/* Any fence added after the wait begins will not be auto-signaled */
-
+	err = flush_lazy_signals(ref);
 	i915_active_release(ref);
 	if (err)
 		return err;
@@ -481,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
 	return 0;
 }
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+			  int (*fn)(void *arg, struct dma_fence *fence),
+			  void *arg)
+{
+	struct dma_fence *fence;
+
+	if (is_barrier(active)) /* XXX flush the barrier? */
+		return 0;
+
+	fence = i915_active_fence_get(active);
+	if (fence) {
+		int err;
+
+		err = fn(arg, fence);
+		dma_fence_put(fence);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+static int await_active(struct i915_active *ref,
+			unsigned int flags,
+			int (*fn)(void *arg, struct dma_fence *fence),
+			void *arg)
 {
 	int err = 0;
 
+	/* We must always wait for the exclusive fence! */
 	if (rcu_access_pointer(ref->excl.fence)) {
-		struct dma_fence *fence;
-
-		rcu_read_lock();
-		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
-		rcu_read_unlock();
-		if (fence) {
-			err = i915_request_await_dma_fence(rq, fence);
-			dma_fence_put(fence);
-		}
+		err = __await_active(&ref->excl, fn, arg);
+		if (err)
+			return err;
 	}
 
-	/* In the future we may choose to await on all fences */
+	if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+		struct active_node *it, *n;
 
-	return err;
+		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+			err = __await_active(&it->base, fn, arg);
+			if (err)
+				break;
+		}
+		i915_active_release(ref);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+	return i915_request_await_dma_fence(arg, fence);
+}
+
+int i915_request_await_active(struct i915_request *rq,
+			      struct i915_active *ref,
+			      unsigned int flags)
+{
+	return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+	return i915_sw_fence_await_dma_fence(arg, fence, 0,
+					     GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+			       struct i915_active *ref,
+			       unsigned int flags)
+{
+	return await_active(ref, flags, sw_await_fence, fence);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -623,6 +716,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 	 * We can then use the preallocated nodes in
 	 * i915_active_acquire_barrier()
 	 */
+	GEM_BUG_ON(!mask);
 	for_each_engine_masked(engine, gt, mask, tmp) {
 		u64 idx = engine->kernel_context->timeline->fence_context;
 		struct llist_node *prev = first;
@@ -812,7 +906,6 @@ __i915_active_fence_set(struct i915_active_fence *active,
 		__list_del_entry(&active->cb.node);
 		spin_unlock(prev->lock); /* serialise with prev->cb_list */
 	}
-	GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
 	list_add_tail(&active->cb.node, &fence->cb_list);
 	spin_unlock_irqrestore(fence->lock, flags);
 
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 51e1e854..b3282ae 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -173,7 +173,8 @@ i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
 	return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
 }
 
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
 
 static inline bool i915_active_has_exclusive(struct i915_active *ref)
 {
@@ -182,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
 
 int i915_active_wait(struct i915_active *ref);
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+			       struct i915_active *ref,
+			       unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+			      struct i915_active *ref,
+			      unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
 
 int i915_active_acquire(struct i915_active *ref);
 bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 66883af..20babbd 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
 	return block;
 
 out_free:
-	__i915_buddy_free(mm, block);
+	if (i != order)
+		__i915_buddy_free(mm, block);
 	return ERR_PTR(err);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a0e437a..189b573 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -803,10 +803,11 @@ static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
 			u32 curr = desc->cmd.value & desc->cmd.mask;
 
 			if (curr < previous) {
-				DRM_ERROR("CMD: %s [%d] command table not sorted: "
-					  "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
-					  engine->name, engine->id,
-					  i, j, curr, previous);
+				drm_err(&engine->i915->drm,
+					"CMD: %s [%d] command table not sorted: "
+					"table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+					engine->name, engine->id,
+					i, j, curr, previous);
 				ret = false;
 			}
 
@@ -829,10 +830,11 @@ static bool check_sorted(const struct intel_engine_cs *engine,
 		u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
 
 		if (curr < previous) {
-			DRM_ERROR("CMD: %s [%d] register table not sorted: "
-				  "entry=%d reg=0x%08X prev=0x%08X\n",
-				  engine->name, engine->id,
-				  i, curr, previous);
+			drm_err(&engine->i915->drm,
+				"CMD: %s [%d] register table not sorted: "
+				"entry=%d reg=0x%08X prev=0x%08X\n",
+				engine->name, engine->id,
+				i, curr, previous);
 			ret = false;
 		}
 
@@ -1010,18 +1012,21 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
 	}
 
 	if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
-		DRM_ERROR("%s: command descriptions are not sorted\n",
-			  engine->name);
+		drm_err(&engine->i915->drm,
+			"%s: command descriptions are not sorted\n",
+			engine->name);
 		return;
 	}
 	if (!validate_regs_sorted(engine)) {
-		DRM_ERROR("%s: registers are not sorted\n", engine->name);
+		drm_err(&engine->i915->drm,
+			"%s: registers are not sorted\n", engine->name);
 		return;
 	}
 
 	ret = init_hash_table(engine, cmd_tables, cmd_table_count);
 	if (ret) {
-		DRM_ERROR("%s: initialised failed!\n", engine->name);
+		drm_err(&engine->i915->drm,
+			"%s: initialised failed!\n", engine->name);
 		fini_hash_table(engine);
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d5a9b8a..6ca7971 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,14 +30,6 @@
 #include <linux/sort.h>
 
 #include <drm/drm_debugfs.h>
-#include <drm/drm_fourcc.h>
-
-#include "display/intel_display_types.h"
-#include "display/intel_dp.h"
-#include "display/intel_fbc.h"
-#include "display/intel_hdcp.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_psr.h"
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_gt_pm.h"
@@ -48,9 +40,9 @@
 #include "gt/uc/intel_guc_submission.h"
 
 #include "i915_debugfs.h"
+#include "i915_debugfs_params.h"
 #include "i915_irq.h"
 #include "i915_trace.h"
-#include "intel_csr.h"
 #include "intel_pm.h"
 #include "intel_sideband.h"
 
@@ -127,8 +119,8 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
 	}
 }
 
-static void
-describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+void
+i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
@@ -673,7 +665,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 		if (!vma)
 			seq_puts(m, "unused");
 		else
-			describe_obj(m, vma->obj);
+			i915_debugfs_describe_obj(m, vma->obj);
 		seq_putc(m, '\n');
 	}
 	rcu_read_unlock();
@@ -1004,367 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 	return ret;
 }
 
-static int ilk_drpc_info(struct seq_file *m)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	struct intel_uncore *uncore = &i915->uncore;
-	u32 rgvmodectl, rstdbyctl;
-	u16 crstandvid;
-
-	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
-	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
-	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
-
-	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
-	seq_printf(m, "Boost freq: %d\n",
-		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
-		   MEMMODE_BOOST_FREQ_SHIFT);
-	seq_printf(m, "HW control enabled: %s\n",
-		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
-	seq_printf(m, "SW control enabled: %s\n",
-		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
-	seq_printf(m, "Gated voltage change: %s\n",
-		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
-	seq_printf(m, "Starting frequency: P%d\n",
-		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
-	seq_printf(m, "Max P-state: P%d\n",
-		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
-	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
-	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
-	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
-	seq_printf(m, "Render standby enabled: %s\n",
-		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
-	seq_puts(m, "Current RS state: ");
-	switch (rstdbyctl & RSX_STATUS_MASK) {
-	case RSX_STATUS_ON:
-		seq_puts(m, "on\n");
-		break;
-	case RSX_STATUS_RC1:
-		seq_puts(m, "RC1\n");
-		break;
-	case RSX_STATUS_RC1E:
-		seq_puts(m, "RC1E\n");
-		break;
-	case RSX_STATUS_RS1:
-		seq_puts(m, "RS1\n");
-		break;
-	case RSX_STATUS_RS2:
-		seq_puts(m, "RS2 (RC6)\n");
-		break;
-	case RSX_STATUS_RS3:
-		seq_puts(m, "RC3 (RC6+)\n");
-		break;
-	default:
-		seq_puts(m, "unknown\n");
-		break;
-	}
-
-	return 0;
-}
-
-static int i915_forcewake_domains(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	struct intel_uncore *uncore = &i915->uncore;
-	struct intel_uncore_forcewake_domain *fw_domain;
-	unsigned int tmp;
-
-	seq_printf(m, "user.bypass_count = %u\n",
-		   uncore->user_forcewake_count);
-
-	for_each_fw_domain(fw_domain, uncore, tmp)
-		seq_printf(m, "%s.wake_count = %u\n",
-			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
-			   READ_ONCE(fw_domain->wake_count));
-
-	return 0;
-}
-
-static void print_rc6_res(struct seq_file *m,
-			  const char *title,
-			  const i915_reg_t reg)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	intel_wakeref_t wakeref;
-
-	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-		seq_printf(m, "%s %u (%llu us)\n", title,
-			   intel_uncore_read(&i915->uncore, reg),
-			   intel_rc6_residency_us(&i915->gt.rc6, reg));
-}
-
-static int vlv_drpc_info(struct seq_file *m)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	u32 rcctl1, pw_status;
-
-	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
-	rcctl1 = I915_READ(GEN6_RC_CONTROL);
-
-	seq_printf(m, "RC6 Enabled: %s\n",
-		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
-					GEN6_RC_CTL_EI_MODE(1))));
-	seq_printf(m, "Render Power Well: %s\n",
-		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
-	seq_printf(m, "Media Power Well: %s\n",
-		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
-
-	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
-	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
-
-	return i915_forcewake_domains(m, NULL);
-}
-
-static int gen6_drpc_info(struct seq_file *m)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	u32 gt_core_status, rcctl1, rc6vids = 0;
-	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
-
-	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
-	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
-
-	rcctl1 = I915_READ(GEN6_RC_CONTROL);
-	if (INTEL_GEN(dev_priv) >= 9) {
-		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
-		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
-	}
-
-	if (INTEL_GEN(dev_priv) <= 7)
-		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
-				       &rc6vids, NULL);
-
-	seq_printf(m, "RC1e Enabled: %s\n",
-		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
-	seq_printf(m, "RC6 Enabled: %s\n",
-		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
-	if (INTEL_GEN(dev_priv) >= 9) {
-		seq_printf(m, "Render Well Gating Enabled: %s\n",
-			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
-		seq_printf(m, "Media Well Gating Enabled: %s\n",
-			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
-	}
-	seq_printf(m, "Deep RC6 Enabled: %s\n",
-		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
-	seq_printf(m, "Deepest RC6 Enabled: %s\n",
-		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
-	seq_puts(m, "Current RC state: ");
-	switch (gt_core_status & GEN6_RCn_MASK) {
-	case GEN6_RC0:
-		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
-			seq_puts(m, "Core Power Down\n");
-		else
-			seq_puts(m, "on\n");
-		break;
-	case GEN6_RC3:
-		seq_puts(m, "RC3\n");
-		break;
-	case GEN6_RC6:
-		seq_puts(m, "RC6\n");
-		break;
-	case GEN6_RC7:
-		seq_puts(m, "RC7\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_printf(m, "Core Power Down: %s\n",
-		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
-	if (INTEL_GEN(dev_priv) >= 9) {
-		seq_printf(m, "Render Power Well: %s\n",
-			(gen9_powergate_status &
-			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
-		seq_printf(m, "Media Power Well: %s\n",
-			(gen9_powergate_status &
-			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
-	}
-
-	/* Not exactly sure what this is */
-	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
-		      GEN6_GT_GFX_RC6_LOCKED);
-	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
-	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
-	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
-
-	if (INTEL_GEN(dev_priv) <= 7) {
-		seq_printf(m, "RC6   voltage: %dmV\n",
-			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
-		seq_printf(m, "RC6+  voltage: %dmV\n",
-			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
-		seq_printf(m, "RC6++ voltage: %dmV\n",
-			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
-	}
-
-	return i915_forcewake_domains(m, NULL);
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	intel_wakeref_t wakeref;
-	int err = -ENODEV;
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			err = vlv_drpc_info(m);
-		else if (INTEL_GEN(dev_priv) >= 6)
-			err = gen6_drpc_info(m);
-		else
-			err = ilk_drpc_info(m);
-	}
-
-	return err;
-}
-
-static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-
-	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
-		   dev_priv->fb_tracking.busy_bits);
-
-	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
-		   dev_priv->fb_tracking.flip_bits);
-
-	return 0;
-}
-
-static int i915_fbc_status(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_fbc *fbc = &dev_priv->fbc;
-	intel_wakeref_t wakeref;
-
-	if (!HAS_FBC(dev_priv))
-		return -ENODEV;
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-	mutex_lock(&fbc->lock);
-
-	if (intel_fbc_is_active(dev_priv))
-		seq_puts(m, "FBC enabled\n");
-	else
-		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
-
-	if (intel_fbc_is_active(dev_priv)) {
-		u32 mask;
-
-		if (INTEL_GEN(dev_priv) >= 8)
-			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
-		else if (INTEL_GEN(dev_priv) >= 7)
-			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
-		else if (INTEL_GEN(dev_priv) >= 5)
-			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
-		else if (IS_G4X(dev_priv))
-			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
-		else
-			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
-							FBC_STAT_COMPRESSED);
-
-		seq_printf(m, "Compressing: %s\n", yesno(mask));
-	}
-
-	mutex_unlock(&fbc->lock);
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
-static int i915_fbc_false_color_get(void *data, u64 *val)
-{
-	struct drm_i915_private *dev_priv = data;
-
-	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
-		return -ENODEV;
-
-	*val = dev_priv->fbc.false_color;
-
-	return 0;
-}
-
-static int i915_fbc_false_color_set(void *data, u64 val)
-{
-	struct drm_i915_private *dev_priv = data;
-	u32 reg;
-
-	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
-		return -ENODEV;
-
-	mutex_lock(&dev_priv->fbc.lock);
-
-	reg = I915_READ(ILK_DPFC_CONTROL);
-	dev_priv->fbc.false_color = val;
-
-	I915_WRITE(ILK_DPFC_CONTROL, val ?
-		   (reg | FBC_CTL_FALSE_COLOR) :
-		   (reg & ~FBC_CTL_FALSE_COLOR));
-
-	mutex_unlock(&dev_priv->fbc.lock);
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
-			i915_fbc_false_color_get, i915_fbc_false_color_set,
-			"%llu\n");
-
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	intel_wakeref_t wakeref;
-
-	if (!HAS_IPS(dev_priv))
-		return -ENODEV;
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	seq_printf(m, "Enabled by kernel parameter: %s\n",
-		   yesno(i915_modparams.enable_ips));
-
-	if (INTEL_GEN(dev_priv) >= 8) {
-		seq_puts(m, "Currently: unknown\n");
-	} else {
-		if (I915_READ(IPS_CTL) & IPS_ENABLE)
-			seq_puts(m, "Currently: enabled\n");
-		else
-			seq_puts(m, "Currently: disabled\n");
-	}
-
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
-static int i915_sr_status(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	intel_wakeref_t wakeref;
-	bool sr_enabled = false;
-
-	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		/* no global SR status; inspect per-plane WM */;
-	else if (HAS_PCH_SPLIT(dev_priv))
-		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
-		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
-		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
-	else if (IS_I915GM(dev_priv))
-		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
-	else if (IS_PINEVIEW(dev_priv))
-		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
-	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
-
-	return 0;
-}
-
 static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1406,70 +1037,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_opregion(struct seq_file *m, void *unused)
-{
-	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
-	if (opregion->header)
-		seq_write(m, opregion->header, OPREGION_SIZE);
-
-	return 0;
-}
-
-static int i915_vbt(struct seq_file *m, void *unused)
-{
-	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
-	if (opregion->vbt)
-		seq_write(m, opregion->vbt, opregion->vbt_size);
-
-	return 0;
-}
-
-static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_framebuffer *fbdev_fb = NULL;
-	struct drm_framebuffer *drm_fb;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
-		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
-
-		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
-			   fbdev_fb->base.width,
-			   fbdev_fb->base.height,
-			   fbdev_fb->base.format->depth,
-			   fbdev_fb->base.format->cpp[0] * 8,
-			   fbdev_fb->base.modifier,
-			   drm_framebuffer_read_refcount(&fbdev_fb->base));
-		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
-		seq_putc(m, '\n');
-	}
-#endif
-
-	mutex_lock(&dev->mode_config.fb_lock);
-	drm_for_each_fb(drm_fb, dev) {
-		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
-		if (fb == fbdev_fb)
-			continue;
-
-		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
-			   fb->base.width,
-			   fb->base.height,
-			   fb->base.format->depth,
-			   fb->base.format->cpp[0] * 8,
-			   fb->base.modifier,
-			   drm_framebuffer_read_refcount(&fb->base));
-		describe_obj(m, intel_fb_obj(&fb->base));
-		seq_putc(m, '\n');
-	}
-	mutex_unlock(&dev->mode_config.fb_lock);
-
-	return 0;
-}
-
 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 {
 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
@@ -1515,7 +1082,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 			if (intel_context_pin_if_active(ce)) {
 				seq_printf(m, "%s: ", ce->engine->name);
 				if (ce->state)
-					describe_obj(m, ce->state->obj);
+					i915_debugfs_describe_obj(m, ce->state->obj);
 				describe_ctx_ring(m, ce->ring);
 				seq_putc(m, '\n');
 				intel_context_unpin(ce);
@@ -1752,10 +1319,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
 	return "";
 }
 
-static void i915_guc_log_info(struct seq_file *m,
-			      struct drm_i915_private *dev_priv)
+static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
 {
-	struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
 	enum guc_log_buffer_type type;
 
 	if (!intel_guc_log_relay_created(log)) {
@@ -1779,11 +1344,12 @@ static void i915_guc_log_info(struct seq_file *m,
 static int i915_guc_info(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct intel_uc *uc = &dev_priv->gt.uc;
 
-	if (!USES_GUC(dev_priv))
+	if (!intel_uc_uses_guc(uc))
 		return -ENODEV;
 
-	i915_guc_log_info(m, dev_priv);
+	i915_guc_log_info(m, &uc->guc.log);
 
 	/* Add more as required ... */
 
@@ -1793,11 +1359,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
 static int i915_guc_stage_pool(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
-	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+	struct intel_uc *uc = &dev_priv->gt.uc;
+	struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
 	int index;
 
-	if (!USES_GUC_SUBMISSION(dev_priv))
+	if (!intel_uc_uses_guc_submission(uc))
 		return -ENODEV;
 
 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
@@ -1884,11 +1450,12 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 static int i915_guc_log_level_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
+	struct intel_uc *uc = &dev_priv->gt.uc;
 
-	if (!USES_GUC(dev_priv))
+	if (!intel_uc_uses_guc(uc))
 		return -ENODEV;
 
-	*val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
+	*val = intel_guc_log_get_level(&uc->guc.log);
 
 	return 0;
 }
@@ -1896,11 +1463,12 @@ static int i915_guc_log_level_get(void *data, u64 *val)
 static int i915_guc_log_level_set(void *data, u64 val)
 {
 	struct drm_i915_private *dev_priv = data;
+	struct intel_uc *uc = &dev_priv->gt.uc;
 
-	if (!USES_GUC(dev_priv))
+	if (!intel_uc_uses_guc(uc))
 		return -ENODEV;
 
-	return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
+	return intel_guc_log_set_level(&uc->guc.log, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -1913,7 +1481,7 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
 	struct intel_guc *guc = &i915->gt.uc.guc;
 	struct intel_guc_log *log = &guc->log;
 
-	if (!intel_guc_is_running(guc))
+	if (!intel_guc_is_ready(guc))
 		return -ENODEV;
 
 	file->private_data = log;
@@ -1963,253 +1531,6 @@ static const struct file_operations i915_guc_log_relay_fops = {
 	.release = i915_guc_log_relay_release,
 };
 
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
-	u8 val;
-	static const char * const sink_status[] = {
-		"inactive",
-		"transition to active, capture and display",
-		"active, display from RFB",
-		"active, capture and display on sink device timings",
-		"transition to inactive, capture and display, timing re-sync",
-		"reserved",
-		"reserved",
-		"sink internal error",
-	};
-	struct drm_connector *connector = m->private;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-	struct intel_dp *intel_dp =
-		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-	int ret;
-
-	if (!CAN_PSR(dev_priv)) {
-		seq_puts(m, "PSR Unsupported\n");
-		return -ENODEV;
-	}
-
-	if (connector->status != connector_status_connected)
-		return -ENODEV;
-
-	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
-	if (ret == 1) {
-		const char *str = "unknown";
-
-		val &= DP_PSR_SINK_STATE_MASK;
-		if (val < ARRAY_SIZE(sink_status))
-			str = sink_status[val];
-		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
-	} else {
-		return ret;
-	}
-
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
-{
-	u32 val, status_val;
-	const char *status = "unknown";
-
-	if (dev_priv->psr.psr2_enabled) {
-		static const char * const live_status[] = {
-			"IDLE",
-			"CAPTURE",
-			"CAPTURE_FS",
-			"SLEEP",
-			"BUFON_FW",
-			"ML_UP",
-			"SU_STANDBY",
-			"FAST_SLEEP",
-			"DEEP_SLEEP",
-			"BUF_ON",
-			"TG_ON"
-		};
-		val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
-		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
-			      EDP_PSR2_STATUS_STATE_SHIFT;
-		if (status_val < ARRAY_SIZE(live_status))
-			status = live_status[status_val];
-	} else {
-		static const char * const live_status[] = {
-			"IDLE",
-			"SRDONACK",
-			"SRDENT",
-			"BUFOFF",
-			"BUFON",
-			"AUXACK",
-			"SRDOFFACK",
-			"SRDENT_ON",
-		};
-		val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
-		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
-			      EDP_PSR_STATUS_STATE_SHIFT;
-		if (status_val < ARRAY_SIZE(live_status))
-			status = live_status[status_val];
-	}
-
-	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_psr *psr = &dev_priv->psr;
-	intel_wakeref_t wakeref;
-	const char *status;
-	bool enabled;
-	u32 val;
-
-	if (!HAS_PSR(dev_priv))
-		return -ENODEV;
-
-	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
-	if (psr->dp)
-		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
-	seq_puts(m, "\n");
-
-	if (!psr->sink_support)
-		return 0;
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-	mutex_lock(&psr->lock);
-
-	if (psr->enabled)
-		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
-	else
-		status = "disabled";
-	seq_printf(m, "PSR mode: %s\n", status);
-
-	if (!psr->enabled) {
-		seq_printf(m, "PSR sink not reliable: %s\n",
-			   yesno(psr->sink_not_reliable));
-
-		goto unlock;
-	}
-
-	if (psr->psr2_enabled) {
-		val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
-		enabled = val & EDP_PSR2_ENABLE;
-	} else {
-		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
-		enabled = val & EDP_PSR_ENABLE;
-	}
-	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
-		   enableddisabled(enabled), val);
-	psr_source_status(dev_priv, m);
-	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
-		   psr->busy_frontbuffer_bits);
-
-	/*
-	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
-	 */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
-		val &= EDP_PSR_PERF_CNT_MASK;
-		seq_printf(m, "Performance counter: %u\n", val);
-	}
-
-	if (psr->debug & I915_PSR_DEBUG_IRQ) {
-		seq_printf(m, "Last attempted entry at: %lld\n",
-			   psr->last_entry_attempt);
-		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
-	}
-
-	if (psr->psr2_enabled) {
-		u32 su_frames_val[3];
-		int frame;
-
-		/*
-		 * Reading all 3 registers before hand to minimize crossing a
-		 * frame boundary between register reads
-		 */
-		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
-			val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
-						       frame));
-			su_frames_val[frame / 3] = val;
-		}
-
-		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
-		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
-			u32 su_blocks;
-
-			su_blocks = su_frames_val[frame / 3] &
-				    PSR2_SU_STATUS_MASK(frame);
-			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
-			seq_printf(m, "%d\t%d\n", frame, su_blocks);
-		}
-	}
-
-unlock:
-	mutex_unlock(&psr->lock);
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
-	struct drm_i915_private *dev_priv = data;
-	intel_wakeref_t wakeref;
-	int ret;
-
-	if (!CAN_PSR(dev_priv))
-		return -ENODEV;
-
-	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	ret = intel_psr_debug_set(dev_priv, val);
-
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
-	struct drm_i915_private *dev_priv = data;
-
-	if (!CAN_PSR(dev_priv))
-		return -ENODEV;
-
-	*val = READ_ONCE(dev_priv->psr.debug);
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
-			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
-			"%llu\n");
-
-static int i915_energy_uJ(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	unsigned long long power;
-	intel_wakeref_t wakeref;
-	u32 units;
-
-	if (INTEL_GEN(dev_priv) < 6)
-		return -ENODEV;
-
-	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
-		return -ENODEV;
-
-	units = (power & 0x1f00) >> 8;
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
-		power = I915_READ(MCH_SECP_NRG_STTS);
-
-	power = (1000000 * power) >> units; /* convert to uJ */
-	seq_printf(m, "%llu", power);
-
-	return 0;
-}
-
 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2243,452 +1564,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_power_domain_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-	int i;
-
-	mutex_lock(&power_domains->lock);
-
-	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
-	for (i = 0; i < power_domains->power_well_count; i++) {
-		struct i915_power_well *power_well;
-		enum intel_display_power_domain power_domain;
-
-		power_well = &power_domains->power_wells[i];
-		seq_printf(m, "%-25s %d\n", power_well->desc->name,
-			   power_well->count);
-
-		for_each_power_domain(power_domain, power_well->desc->domains)
-			seq_printf(m, "  %-23s %d\n",
-				 intel_display_power_domain_str(power_domain),
-				 power_domains->domain_use_count[power_domain]);
-	}
-
-	mutex_unlock(&power_domains->lock);
-
-	return 0;
-}
-
-static int i915_dmc_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	intel_wakeref_t wakeref;
-	struct intel_csr *csr;
-	i915_reg_t dc5_reg, dc6_reg = {};
-
-	if (!HAS_CSR(dev_priv))
-		return -ENODEV;
-
-	csr = &dev_priv->csr;
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
-	seq_printf(m, "path: %s\n", csr->fw_path);
-
-	if (!csr->dmc_payload)
-		goto out;
-
-	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
-		   CSR_VERSION_MINOR(csr->version));
-
-	if (INTEL_GEN(dev_priv) >= 12) {
-		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
-		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
-		/*
-		 * NOTE: DMC_DEBUG3 is a general purpose reg.
-		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
-		 * reg for DC3CO debugging and validation,
-		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
-		 */
-		seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
-	} else {
-		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
-						 SKL_CSR_DC3_DC5_COUNT;
-		if (!IS_GEN9_LP(dev_priv))
-			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
-	}
-
-	seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
-	if (dc6_reg.reg)
-		seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
-
-out:
-	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
-	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
-	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
-
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
-static void intel_seq_print_mode(struct seq_file *m, int tabs,
-				 const struct drm_display_mode *mode)
-{
-	int i;
-
-	for (i = 0; i < tabs; i++)
-		seq_putc(m, '\t');
-
-	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_encoder_info(struct seq_file *m,
-			       struct intel_crtc *crtc,
-			       struct intel_encoder *encoder)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_connector_list_iter conn_iter;
-	struct drm_connector *connector;
-
-	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
-		   encoder->base.base.id, encoder->base.name);
-
-	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		const struct drm_connector_state *conn_state =
-			connector->state;
-
-		if (conn_state->best_encoder != &encoder->base)
-			continue;
-
-		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
-			   connector->base.id, connector->name);
-	}
-	drm_connector_list_iter_end(&conn_iter);
-}
-
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
-{
-	const struct drm_display_mode *mode = panel->fixed_mode;
-
-	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_hdcp_info(struct seq_file *m,
-			    struct intel_connector *intel_connector)
-{
-	bool hdcp_cap, hdcp2_cap;
-
-	hdcp_cap = intel_hdcp_capable(intel_connector);
-	hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
-	if (hdcp_cap)
-		seq_puts(m, "HDCP1.4 ");
-	if (hdcp2_cap)
-		seq_puts(m, "HDCP2.2 ");
-
-	if (!hdcp_cap && !hdcp2_cap)
-		seq_puts(m, "None");
-
-	seq_puts(m, "\n");
-}
-
-static void intel_dp_info(struct seq_file *m,
-			  struct intel_connector *intel_connector)
-{
-	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
-
-	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
-	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
-	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
-		intel_panel_info(m, &intel_connector->panel);
-
-	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
-				&intel_dp->aux);
-	if (intel_connector->hdcp.shim) {
-		seq_puts(m, "\tHDCP version: ");
-		intel_hdcp_info(m, intel_connector);
-	}
-}
-
-static void intel_dp_mst_info(struct seq_file *m,
-			  struct intel_connector *intel_connector)
-{
-	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct intel_dp_mst_encoder *intel_mst =
-		enc_to_mst(intel_encoder);
-	struct intel_digital_port *intel_dig_port = intel_mst->primary;
-	struct intel_dp *intel_dp = &intel_dig_port->dp;
-	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-					intel_connector->port);
-
-	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
-static void intel_hdmi_info(struct seq_file *m,
-			    struct intel_connector *intel_connector)
-{
-	struct intel_encoder *intel_encoder = intel_connector->encoder;
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
-
-	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
-	if (intel_connector->hdcp.shim) {
-		seq_puts(m, "\tHDCP version: ");
-		intel_hdcp_info(m, intel_connector);
-	}
-}
-
-static void intel_lvds_info(struct seq_file *m,
-			    struct intel_connector *intel_connector)
-{
-	intel_panel_info(m, &intel_connector->panel);
-}
-
-static void intel_connector_info(struct seq_file *m,
-				 struct drm_connector *connector)
-{
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-	const struct drm_connector_state *conn_state = connector->state;
-	struct intel_encoder *encoder =
-		to_intel_encoder(conn_state->best_encoder);
-	const struct drm_display_mode *mode;
-
-	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
-		   connector->base.id, connector->name,
-		   drm_get_connector_status_name(connector->status));
-
-	if (connector->status == connector_status_disconnected)
-		return;
-
-	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
-		   connector->display_info.width_mm,
-		   connector->display_info.height_mm);
-	seq_printf(m, "\tsubpixel order: %s\n",
-		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
-	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
-
-	if (!encoder)
-		return;
-
-	switch (connector->connector_type) {
-	case DRM_MODE_CONNECTOR_DisplayPort:
-	case DRM_MODE_CONNECTOR_eDP:
-		if (encoder->type == INTEL_OUTPUT_DP_MST)
-			intel_dp_mst_info(m, intel_connector);
-		else
-			intel_dp_info(m, intel_connector);
-		break;
-	case DRM_MODE_CONNECTOR_LVDS:
-		if (encoder->type == INTEL_OUTPUT_LVDS)
-			intel_lvds_info(m, intel_connector);
-		break;
-	case DRM_MODE_CONNECTOR_HDMIA:
-		if (encoder->type == INTEL_OUTPUT_HDMI ||
-		    encoder->type == INTEL_OUTPUT_DDI)
-			intel_hdmi_info(m, intel_connector);
-		break;
-	default:
-		break;
-	}
-
-	seq_printf(m, "\tmodes:\n");
-	list_for_each_entry(mode, &connector->modes, head)
-		intel_seq_print_mode(m, 2, mode);
-}
-
-static const char *plane_type(enum drm_plane_type type)
-{
-	switch (type) {
-	case DRM_PLANE_TYPE_OVERLAY:
-		return "OVL";
-	case DRM_PLANE_TYPE_PRIMARY:
-		return "PRI";
-	case DRM_PLANE_TYPE_CURSOR:
-		return "CUR";
-	/*
-	 * Deliberately omitting default: to generate compiler warnings
-	 * when a new drm_plane_type gets added.
-	 */
-	}
-
-	return "unknown";
-}
-
-static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
-{
-	/*
-	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
-	 * will print them all to visualize if the values are misused
-	 */
-	snprintf(buf, bufsize,
-		 "%s%s%s%s%s%s(0x%08x)",
-		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
-		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
-		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
-		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
-		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
-		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
-		 rotation);
-}
-
-static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
-{
-	const struct intel_plane_state *plane_state =
-		to_intel_plane_state(plane->base.state);
-	const struct drm_framebuffer *fb = plane_state->uapi.fb;
-	struct drm_format_name_buf format_name;
-	struct drm_rect src, dst;
-	char rot_str[48];
-
-	src = drm_plane_state_src(&plane_state->uapi);
-	dst = drm_plane_state_dest(&plane_state->uapi);
-
-	if (fb)
-		drm_get_format_name(fb->format->format, &format_name);
-
-	plane_rotation(rot_str, sizeof(rot_str),
-		       plane_state->uapi.rotation);
-
-	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
-		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
-		   fb ? fb->width : 0, fb ? fb->height : 0,
-		   DRM_RECT_FP_ARG(&src),
-		   DRM_RECT_ARG(&dst),
-		   rot_str);
-}
-
-static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
-{
-	const struct intel_plane_state *plane_state =
-		to_intel_plane_state(plane->base.state);
-	const struct drm_framebuffer *fb = plane_state->hw.fb;
-	struct drm_format_name_buf format_name;
-	char rot_str[48];
-
-	if (!fb)
-		return;
-
-	drm_get_format_name(fb->format->format, &format_name);
-
-	plane_rotation(rot_str, sizeof(rot_str),
-		       plane_state->hw.rotation);
-
-	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
-		   fb->base.id, format_name.str,
-		   fb->width, fb->height,
-		   yesno(plane_state->uapi.visible),
-		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
-		   DRM_RECT_ARG(&plane_state->uapi.dst),
-		   rot_str);
-}
-
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct intel_plane *plane;
-
-	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
-		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
-			   plane->base.base.id, plane->base.name,
-			   plane_type(plane->base.type));
-		intel_plane_uapi_info(m, plane);
-		intel_plane_hw_info(m, plane);
-	}
-}
-
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
-{
-	const struct intel_crtc_state *crtc_state =
-		to_intel_crtc_state(crtc->base.state);
-	int num_scalers = crtc->num_scalers;
-	int i;
-
-	/* Not all platformas have a scaler */
-	if (num_scalers) {
-		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
-			   num_scalers,
-			   crtc_state->scaler_state.scaler_users,
-			   crtc_state->scaler_state.scaler_id);
-
-		for (i = 0; i < num_scalers; i++) {
-			const struct intel_scaler *sc =
-				&crtc_state->scaler_state.scalers[i];
-
-			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
-				   i, yesno(sc->in_use), sc->mode);
-		}
-		seq_puts(m, "\n");
-	} else {
-		seq_puts(m, "\tNo scalers available on this platform\n");
-	}
-}
-
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	const struct intel_crtc_state *crtc_state =
-		to_intel_crtc_state(crtc->base.state);
-	struct intel_encoder *encoder;
-
-	seq_printf(m, "[CRTC:%d:%s]:\n",
-		   crtc->base.base.id, crtc->base.name);
-
-	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
-		   yesno(crtc_state->uapi.enable),
-		   yesno(crtc_state->uapi.active),
-		   DRM_MODE_ARG(&crtc_state->uapi.mode));
-
-	if (crtc_state->hw.enable) {
-		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
-			   yesno(crtc_state->hw.active),
-			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
-
-		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
-			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
-			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
-
-		intel_scaler_info(m, crtc);
-	}
-
-	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
-				    crtc_state->uapi.encoder_mask)
-		intel_encoder_info(m, crtc, encoder);
-
-	intel_plane_info(m, crtc);
-
-	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
-		   yesno(!crtc->cpu_fifo_underrun_disabled),
-		   yesno(!crtc->pch_fifo_underrun_disabled));
-}
-
-static int i915_display_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	intel_wakeref_t wakeref;
-
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
-	drm_modeset_lock_all(dev);
-
-	seq_printf(m, "CRTC info\n");
-	seq_printf(m, "---------\n");
-	for_each_intel_crtc(dev, crtc)
-		intel_crtc_info(m, crtc);
-
-	seq_printf(m, "\n");
-	seq_printf(m, "Connector info\n");
-	seq_printf(m, "--------------\n");
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter)
-		intel_connector_info(m, connector);
-	drm_connector_list_iter_end(&conn_iter);
-
-	drm_modeset_unlock_all(dev);
-
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
-	return 0;
-}
-
 static int i915_engine_info(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2733,55 +1608,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_shared_dplls_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	int i;
-
-	drm_modeset_lock_all(dev);
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
-		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
-			   pll->info->id);
-		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
-			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
-		seq_printf(m, " tracked hardware state:\n");
-		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
-		seq_printf(m, " dpll_md: 0x%08x\n",
-			   pll->state.hw_state.dpll_md);
-		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
-		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
-		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
-		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
-		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
-		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
-			   pll->state.hw_state.mg_refclkin_ctl);
-		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
-			   pll->state.hw_state.mg_clktop2_coreclkctl1);
-		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
-			   pll->state.hw_state.mg_clktop2_hsclkctl);
-		seq_printf(m, " mg_pll_div0:  0x%08x\n",
-			   pll->state.hw_state.mg_pll_div0);
-		seq_printf(m, " mg_pll_div1:  0x%08x\n",
-			   pll->state.hw_state.mg_pll_div1);
-		seq_printf(m, " mg_pll_lf:    0x%08x\n",
-			   pll->state.hw_state.mg_pll_lf);
-		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
-			   pll->state.hw_state.mg_pll_frac_lock);
-		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
-			   pll->state.hw_state.mg_pll_ssc);
-		seq_printf(m, " mg_pll_bias:  0x%08x\n",
-			   pll->state.hw_state.mg_pll_bias);
-		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
-			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
-	}
-	drm_modeset_unlock_all(dev);
-
-	return 0;
-}
-
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -2802,7 +1628,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 		for (wa = wal->list; count--; wa++)
 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
 				   i915_mmio_reg_offset(wa->reg),
-				   wa->val, wa->mask);
+				   wa->set, wa->clr);
 
 		seq_printf(m, "\n");
 	}
@@ -2810,646 +1636,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 	return 0;
 }
 
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-
-	seq_printf(m, "Isochronous Priority Control: %s\n",
-			yesno(dev_priv->ipc_enabled));
-	return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
-	struct drm_i915_private *dev_priv = inode->i_private;
-
-	if (!HAS_IPC(dev_priv))
-		return -ENODEV;
-
-	return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
-				     size_t len, loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	intel_wakeref_t wakeref;
-	bool enable;
-	int ret;
-
-	ret = kstrtobool_from_user(ubuf, len, &enable);
-	if (ret < 0)
-		return ret;
-
-	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
-		if (!dev_priv->ipc_enabled && enable)
-			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
-		dev_priv->wm.distrust_bios_wm = true;
-		dev_priv->ipc_enabled = enable;
-		intel_enable_ipc(dev_priv);
-	}
-
-	return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_ipc_status_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = i915_ipc_status_write
-};
-
-static int i915_ddb_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct skl_ddb_entry *entry;
-	struct intel_crtc *crtc;
-
-	if (INTEL_GEN(dev_priv) < 9)
-		return -ENODEV;
-
-	drm_modeset_lock_all(dev);
-
-	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
-
-	for_each_intel_crtc(&dev_priv->drm, crtc) {
-		struct intel_crtc_state *crtc_state =
-			to_intel_crtc_state(crtc->base.state);
-		enum pipe pipe = crtc->pipe;
-		enum plane_id plane_id;
-
-		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
-
-		for_each_plane_id_on_crtc(crtc, plane_id) {
-			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
-			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
-				   entry->start, entry->end,
-				   skl_ddb_entry_size(entry));
-		}
-
-		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
-		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
-			   entry->end, skl_ddb_entry_size(entry));
-	}
-
-	drm_modeset_unlock_all(dev);
-
-	return 0;
-}
-
-static void drrs_status_per_crtc(struct seq_file *m,
-				 struct drm_device *dev,
-				 struct intel_crtc *intel_crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_drrs *drrs = &dev_priv->drrs;
-	int vrefresh = 0;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		if (connector->state->crtc != &intel_crtc->base)
-			continue;
-
-		seq_printf(m, "%s:\n", connector->name);
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
-		seq_puts(m, "\tVBT: DRRS_type: Static");
-	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
-		seq_puts(m, "\tVBT: DRRS_type: Seamless");
-	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
-		seq_puts(m, "\tVBT: DRRS_type: None");
-	else
-		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
-
-	seq_puts(m, "\n\n");
-
-	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
-		struct intel_panel *panel;
-
-		mutex_lock(&drrs->mutex);
-		/* DRRS Supported */
-		seq_puts(m, "\tDRRS Supported: Yes\n");
-
-		/* disable_drrs() will make drrs->dp NULL */
-		if (!drrs->dp) {
-			seq_puts(m, "Idleness DRRS: Disabled\n");
-			if (dev_priv->psr.enabled)
-				seq_puts(m,
-				"\tAs PSR is enabled, DRRS is not enabled\n");
-			mutex_unlock(&drrs->mutex);
-			return;
-		}
-
-		panel = &drrs->dp->attached_connector->panel;
-		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
-					drrs->busy_frontbuffer_bits);
-
-		seq_puts(m, "\n\t\t");
-		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
-			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
-			vrefresh = panel->fixed_mode->vrefresh;
-		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
-			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
-			vrefresh = panel->downclock_mode->vrefresh;
-		} else {
-			seq_printf(m, "DRRS_State: Unknown(%d)\n",
-						drrs->refresh_rate_type);
-			mutex_unlock(&drrs->mutex);
-			return;
-		}
-		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
-
-		seq_puts(m, "\n\t\t");
-		mutex_unlock(&drrs->mutex);
-	} else {
-		/* DRRS not supported. Print the VBT parameter*/
-		seq_puts(m, "\tDRRS Supported : No");
-	}
-	seq_puts(m, "\n");
-}
-
-static int i915_drrs_status(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *intel_crtc;
-	int active_crtc_cnt = 0;
-
-	drm_modeset_lock_all(dev);
-	for_each_intel_crtc(dev, intel_crtc) {
-		if (intel_crtc->base.state->active) {
-			active_crtc_cnt++;
-			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
-
-			drrs_status_per_crtc(m, dev, intel_crtc);
-		}
-	}
-	drm_modeset_unlock_all(dev);
-
-	if (!active_crtc_cnt)
-		seq_puts(m, "No active crtc found\n");
-
-	return 0;
-}
-
-static int i915_dp_mst_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_encoder *intel_encoder;
-	struct intel_digital_port *intel_dig_port;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
-			continue;
-
-		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
-		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
-			continue;
-
-		intel_dig_port = enc_to_dig_port(intel_encoder);
-		if (!intel_dig_port->dp.can_mst)
-			continue;
-
-		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
-			   intel_dig_port->base.base.base.id,
-			   intel_dig_port->base.base.name);
-		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	return 0;
-}
-
-static ssize_t i915_displayport_test_active_write(struct file *file,
-						  const char __user *ubuf,
-						  size_t len, loff_t *offp)
-{
-	char *input_buffer;
-	int status = 0;
-	struct drm_device *dev;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	struct intel_dp *intel_dp;
-	int val = 0;
-
-	dev = ((struct seq_file *)file->private_data)->private;
-
-	if (len == 0)
-		return 0;
-
-	input_buffer = memdup_user_nul(ubuf, len);
-	if (IS_ERR(input_buffer))
-		return PTR_ERR(input_buffer);
-
-	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		struct intel_encoder *encoder;
-
-		if (connector->connector_type !=
-		    DRM_MODE_CONNECTOR_DisplayPort)
-			continue;
-
-		encoder = to_intel_encoder(connector->encoder);
-		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
-			continue;
-
-		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(encoder);
-			status = kstrtoint(input_buffer, 10, &val);
-			if (status < 0)
-				break;
-			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
-			/* To prevent erroneous activation of the compliance
-			 * testing code, only accept an actual value of 1 here
-			 */
-			if (val == 1)
-				intel_dp->compliance.test_active = true;
-			else
-				intel_dp->compliance.test_active = false;
-		}
-	}
-	drm_connector_list_iter_end(&conn_iter);
-	kfree(input_buffer);
-	if (status < 0)
-		return status;
-
-	*offp += len;
-	return len;
-}
-
-static int i915_displayport_test_active_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	struct intel_dp *intel_dp;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		struct intel_encoder *encoder;
-
-		if (connector->connector_type !=
-		    DRM_MODE_CONNECTOR_DisplayPort)
-			continue;
-
-		encoder = to_intel_encoder(connector->encoder);
-		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
-			continue;
-
-		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(encoder);
-			if (intel_dp->compliance.test_active)
-				seq_puts(m, "1");
-			else
-				seq_puts(m, "0");
-		} else
-			seq_puts(m, "0");
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	return 0;
-}
-
-static int i915_displayport_test_active_open(struct inode *inode,
-					     struct file *file)
-{
-	return single_open(file, i915_displayport_test_active_show,
-			   inode->i_private);
-}
-
-static const struct file_operations i915_displayport_test_active_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_displayport_test_active_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = i915_displayport_test_active_write
-};
-
-static int i915_displayport_test_data_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	struct intel_dp *intel_dp;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		struct intel_encoder *encoder;
-
-		if (connector->connector_type !=
-		    DRM_MODE_CONNECTOR_DisplayPort)
-			continue;
-
-		encoder = to_intel_encoder(connector->encoder);
-		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
-			continue;
-
-		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(encoder);
-			if (intel_dp->compliance.test_type ==
-			    DP_TEST_LINK_EDID_READ)
-				seq_printf(m, "%lx",
-					   intel_dp->compliance.test_data.edid);
-			else if (intel_dp->compliance.test_type ==
-				 DP_TEST_LINK_VIDEO_PATTERN) {
-				seq_printf(m, "hdisplay: %d\n",
-					   intel_dp->compliance.test_data.hdisplay);
-				seq_printf(m, "vdisplay: %d\n",
-					   intel_dp->compliance.test_data.vdisplay);
-				seq_printf(m, "bpc: %u\n",
-					   intel_dp->compliance.test_data.bpc);
-			}
-		} else
-			seq_puts(m, "0");
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
-
-static int i915_displayport_test_type_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_connector *connector;
-	struct drm_connector_list_iter conn_iter;
-	struct intel_dp *intel_dp;
-
-	drm_connector_list_iter_begin(dev, &conn_iter);
-	drm_for_each_connector_iter(connector, &conn_iter) {
-		struct intel_encoder *encoder;
-
-		if (connector->connector_type !=
-		    DRM_MODE_CONNECTOR_DisplayPort)
-			continue;
-
-		encoder = to_intel_encoder(connector->encoder);
-		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
-			continue;
-
-		if (encoder && connector->status == connector_status_connected) {
-			intel_dp = enc_to_intel_dp(encoder);
-			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
-		} else
-			seq_puts(m, "0");
-	}
-	drm_connector_list_iter_end(&conn_iter);
-
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
-	int level;
-	int num_levels;
-
-	if (IS_CHERRYVIEW(dev_priv))
-		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
-		num_levels = 1;
-	else if (IS_G4X(dev_priv))
-		num_levels = 3;
-	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
-
-	drm_modeset_lock_all(dev);
-
-	for (level = 0; level < num_levels; level++) {
-		unsigned int latency = wm[level];
-
-		/*
-		 * - WM1+ latency values in 0.5us units
-		 * - latencies are in us on gen9/vlv/chv
-		 */
-		if (INTEL_GEN(dev_priv) >= 9 ||
-		    IS_VALLEYVIEW(dev_priv) ||
-		    IS_CHERRYVIEW(dev_priv) ||
-		    IS_G4X(dev_priv))
-			latency *= 10;
-		else if (level > 0)
-			latency *= 5;
-
-		seq_printf(m, "WM%d %u (%u.%u usec)\n",
-			   level, wm[level], latency / 10, latency % 10);
-	}
-
-	drm_modeset_unlock_all(dev);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	const u16 *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.pri_latency;
-
-	wm_latency_show(m, latencies);
-
-	return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	const u16 *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.spr_latency;
-
-	wm_latency_show(m, latencies);
-
-	return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	const u16 *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.cur_latency;
-
-	wm_latency_show(m, latencies);
-
-	return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
-	struct drm_i915_private *dev_priv = inode->i_private;
-
-	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
-		return -ENODEV;
-
-	return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
-	struct drm_i915_private *dev_priv = inode->i_private;
-
-	if (HAS_GMCH(dev_priv))
-		return -ENODEV;
-
-	return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
-	struct drm_i915_private *dev_priv = inode->i_private;
-
-	if (HAS_GMCH(dev_priv))
-		return -ENODEV;
-
-	return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
-				size_t len, loff_t *offp, u16 wm[8])
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	struct drm_device *dev = &dev_priv->drm;
-	u16 new[8] = { 0 };
-	int num_levels;
-	int level;
-	int ret;
-	char tmp[32];
-
-	if (IS_CHERRYVIEW(dev_priv))
-		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
-		num_levels = 1;
-	else if (IS_G4X(dev_priv))
-		num_levels = 3;
-	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
-
-	if (len >= sizeof(tmp))
-		return -EINVAL;
-
-	if (copy_from_user(tmp, ubuf, len))
-		return -EFAULT;
-
-	tmp[len] = '\0';
-
-	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
-		     &new[0], &new[1], &new[2], &new[3],
-		     &new[4], &new[5], &new[6], &new[7]);
-	if (ret != num_levels)
-		return -EINVAL;
-
-	drm_modeset_lock_all(dev);
-
-	for (level = 0; level < num_levels; level++)
-		wm[level] = new[level];
-
-	drm_modeset_unlock_all(dev);
-
-	return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
-				    size_t len, loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	u16 *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.pri_latency;
-
-	return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
-				    size_t len, loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	u16 *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.spr_latency;
-
-	return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
-				    size_t len, loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	u16 *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.cur_latency;
-
-	return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
-	.owner = THIS_MODULE,
-	.open = pri_wm_latency_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
-	.owner = THIS_MODULE,
-	.open = spr_wm_latency_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
-	.owner = THIS_MODULE,
-	.open = cur_wm_latency_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = cur_wm_latency_write
-};
-
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -3641,7 +1827,8 @@ i915_cache_sharing_set(void *data, u64 val)
 	if (val > 3)
 		return -EINVAL;
 
-	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+	drm_dbg(&dev_priv->drm,
+		"Manually setting uncore sharing to %llu\n", val);
 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
 		u32 snpcr;
 
@@ -3947,292 +2134,6 @@ static const struct file_operations i915_forcewake_fops = {
 	.release = i915_forcewake_release,
 };
 
-static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-	struct i915_hotplug *hotplug = &dev_priv->hotplug;
-
-	/* Synchronize with everything first in case there's been an HPD
-	 * storm, but we haven't finished handling it in the kernel yet
-	 */
-	intel_synchronize_irq(dev_priv);
-	flush_work(&dev_priv->hotplug.dig_port_work);
-	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
-
-	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
-	seq_printf(m, "Detected: %s\n",
-		   yesno(delayed_work_pending(&hotplug->reenable_work)));
-
-	return 0;
-}
-
-static ssize_t i915_hpd_storm_ctl_write(struct file *file,
-					const char __user *ubuf, size_t len,
-					loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	struct i915_hotplug *hotplug = &dev_priv->hotplug;
-	unsigned int new_threshold;
-	int i;
-	char *newline;
-	char tmp[16];
-
-	if (len >= sizeof(tmp))
-		return -EINVAL;
-
-	if (copy_from_user(tmp, ubuf, len))
-		return -EFAULT;
-
-	tmp[len] = '\0';
-
-	/* Strip newline, if any */
-	newline = strchr(tmp, '\n');
-	if (newline)
-		*newline = '\0';
-
-	if (strcmp(tmp, "reset") == 0)
-		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
-	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
-		return -EINVAL;
-
-	if (new_threshold > 0)
-		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
-			      new_threshold);
-	else
-		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	hotplug->hpd_storm_threshold = new_threshold;
-	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
-	for_each_hpd_pin(i)
-		hotplug->stats[i].count = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	/* Re-enable hpd immediately if we were in an irq storm */
-	flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
-	return len;
-}
-
-static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
-}
-
-static const struct file_operations i915_hpd_storm_ctl_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_hpd_storm_ctl_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = i915_hpd_storm_ctl_write
-};
-
-static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = m->private;
-
-	seq_printf(m, "Enabled: %s\n",
-		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
-
-	return 0;
-}
-
-static int
-i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, i915_hpd_short_storm_ctl_show,
-			   inode->i_private);
-}
-
-static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
-					      const char __user *ubuf,
-					      size_t len, loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct drm_i915_private *dev_priv = m->private;
-	struct i915_hotplug *hotplug = &dev_priv->hotplug;
-	char *newline;
-	char tmp[16];
-	int i;
-	bool new_state;
-
-	if (len >= sizeof(tmp))
-		return -EINVAL;
-
-	if (copy_from_user(tmp, ubuf, len))
-		return -EFAULT;
-
-	tmp[len] = '\0';
-
-	/* Strip newline, if any */
-	newline = strchr(tmp, '\n');
-	if (newline)
-		*newline = '\0';
-
-	/* Reset to the "default" state for this system */
-	if (strcmp(tmp, "reset") == 0)
-		new_state = !HAS_DP_MST(dev_priv);
-	else if (kstrtobool(tmp, &new_state) != 0)
-		return -EINVAL;
-
-	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
-		      new_state ? "En" : "Dis");
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	hotplug->hpd_short_storm_enabled = new_state;
-	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
-	for_each_hpd_pin(i)
-		hotplug->stats[i].count = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	/* Re-enable hpd immediately if we were in an irq storm */
-	flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
-	return len;
-}
-
-static const struct file_operations i915_hpd_short_storm_ctl_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_hpd_short_storm_ctl_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = i915_hpd_short_storm_ctl_write,
-};
-
-static int i915_drrs_ctl_set(void *data, u64 val)
-{
-	struct drm_i915_private *dev_priv = data;
-	struct drm_device *dev = &dev_priv->drm;
-	struct intel_crtc *crtc;
-
-	if (INTEL_GEN(dev_priv) < 7)
-		return -ENODEV;
-
-	for_each_intel_crtc(dev, crtc) {
-		struct drm_connector_list_iter conn_iter;
-		struct intel_crtc_state *crtc_state;
-		struct drm_connector *connector;
-		struct drm_crtc_commit *commit;
-		int ret;
-
-		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
-		if (ret)
-			return ret;
-
-		crtc_state = to_intel_crtc_state(crtc->base.state);
-
-		if (!crtc_state->hw.active ||
-		    !crtc_state->has_drrs)
-			goto out;
-
-		commit = crtc_state->uapi.commit;
-		if (commit) {
-			ret = wait_for_completion_interruptible(&commit->hw_done);
-			if (ret)
-				goto out;
-		}
-
-		drm_connector_list_iter_begin(dev, &conn_iter);
-		drm_for_each_connector_iter(connector, &conn_iter) {
-			struct intel_encoder *encoder;
-			struct intel_dp *intel_dp;
-
-			if (!(crtc_state->uapi.connector_mask &
-			      drm_connector_mask(connector)))
-				continue;
-
-			encoder = intel_attached_encoder(to_intel_connector(connector));
-			if (encoder->type != INTEL_OUTPUT_EDP)
-				continue;
-
-			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
-						val ? "en" : "dis", val);
-
-			intel_dp = enc_to_intel_dp(encoder);
-			if (val)
-				intel_edp_drrs_enable(intel_dp,
-						      crtc_state);
-			else
-				intel_edp_drrs_disable(intel_dp,
-						       crtc_state);
-		}
-		drm_connector_list_iter_end(&conn_iter);
-
-out:
-		drm_modeset_unlock(&crtc->base.mutex);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
-
-static ssize_t
-i915_fifo_underrun_reset_write(struct file *filp,
-			       const char __user *ubuf,
-			       size_t cnt, loff_t *ppos)
-{
-	struct drm_i915_private *dev_priv = filp->private_data;
-	struct intel_crtc *intel_crtc;
-	struct drm_device *dev = &dev_priv->drm;
-	int ret;
-	bool reset;
-
-	ret = kstrtobool_from_user(ubuf, cnt, &reset);
-	if (ret)
-		return ret;
-
-	if (!reset)
-		return cnt;
-
-	for_each_intel_crtc(dev, intel_crtc) {
-		struct drm_crtc_commit *commit;
-		struct intel_crtc_state *crtc_state;
-
-		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
-		if (ret)
-			return ret;
-
-		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
-		commit = crtc_state->uapi.commit;
-		if (commit) {
-			ret = wait_for_completion_interruptible(&commit->hw_done);
-			if (!ret)
-				ret = wait_for_completion_interruptible(&commit->flip_done);
-		}
-
-		if (!ret && crtc_state->hw.active) {
-			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
-				      pipe_name(intel_crtc->pipe));
-
-			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
-		}
-
-		drm_modeset_unlock(&intel_crtc->base.mutex);
-
-		if (ret)
-			return ret;
-	}
-
-	ret = intel_fbc_reset_underrun(dev_priv);
-	if (ret)
-		return ret;
-
-	return cnt;
-}
-
-static const struct file_operations i915_fifo_underrun_reset_ops = {
-	.owner = THIS_MODULE,
-	.open = simple_open,
-	.write = i915_fifo_underrun_reset_write,
-	.llseek = default_llseek,
-};
-
 static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4245,34 +2146,16 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
 	{"i915_frequency_info", i915_frequency_info, 0},
-	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
-	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
-	{"i915_fbc_status", i915_fbc_status, 0},
-	{"i915_ips_status", i915_ips_status, 0},
-	{"i915_sr_status", i915_sr_status, 0},
-	{"i915_opregion", i915_opregion, 0},
-	{"i915_vbt", i915_vbt, 0},
-	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
 	{"i915_context_status", i915_context_status, 0},
-	{"i915_forcewake_domains", i915_forcewake_domains, 0},
 	{"i915_swizzle_info", i915_swizzle_info, 0},
 	{"i915_llc", i915_llc, 0},
-	{"i915_edp_psr_status", i915_edp_psr_status, 0},
-	{"i915_energy_uJ", i915_energy_uJ, 0},
 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
-	{"i915_power_domain_info", i915_power_domain_info, 0},
-	{"i915_dmc_info", i915_dmc_info, 0},
-	{"i915_display_info", i915_display_info, 0},
 	{"i915_engine_info", i915_engine_info, 0},
 	{"i915_rcs_topology", i915_rcs_topology, 0},
 	{"i915_shrinker_info", i915_shrinker_info, 0},
-	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
-	{"i915_dp_mst_info", i915_dp_mst_info, 0},
 	{"i915_wa_registers", i915_wa_registers, 0},
-	{"i915_ddb_info", i915_ddb_info, 0},
 	{"i915_sseu_status", i915_sseu_status, 0},
-	{"i915_drrs_status", i915_drrs_status, 0},
 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -4289,21 +2172,8 @@ static const struct i915_debugfs_files {
 	{"i915_error_state", &i915_error_state_fops},
 	{"i915_gpu_info", &i915_gpu_info_fops},
 #endif
-	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
-	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
-	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
-	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
-	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
-	{"i915_dp_test_data", &i915_displayport_test_data_fops},
-	{"i915_dp_test_type", &i915_displayport_test_type_fops},
-	{"i915_dp_test_active", &i915_displayport_test_active_fops},
 	{"i915_guc_log_level", &i915_guc_log_level_fops},
 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
-	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
-	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
-	{"i915_ipc_status", &i915_ipc_status_fops},
-	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
-	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
 };
 
 int i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -4311,9 +2181,10 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
 	struct drm_minor *minor = dev_priv->drm.primary;
 	int i;
 
+	i915_debugfs_params(dev_priv);
+
 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
 			    to_i915(minor->dev), &i915_forcewake_fops);
-
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		debugfs_create_file(i915_debugfs_files[i].name,
 				    S_IRUGO | S_IWUSR,
@@ -4326,254 +2197,3 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
 					I915_DEBUGFS_ENTRIES,
 					minor->debugfs_root, minor);
 }
-
-struct dpcd_block {
-	/* DPCD dump start address. */
-	unsigned int offset;
-	/* DPCD dump end address, inclusive. If unset, .size will be used. */
-	unsigned int end;
-	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
-	size_t size;
-	/* Only valid for eDP. */
-	bool edp;
-};
-
-static const struct dpcd_block i915_dpcd_debug[] = {
-	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
-	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
-	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
-	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
-	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
-	{ .offset = DP_SET_POWER },
-	{ .offset = DP_EDP_DPCD_REV },
-	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
-	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
-	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
-};
-
-static int i915_dpcd_show(struct seq_file *m, void *data)
-{
-	struct drm_connector *connector = m->private;
-	struct intel_dp *intel_dp =
-		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-	u8 buf[16];
-	ssize_t err;
-	int i;
-
-	if (connector->status != connector_status_connected)
-		return -ENODEV;
-
-	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
-		const struct dpcd_block *b = &i915_dpcd_debug[i];
-		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
-
-		if (b->edp &&
-		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
-			continue;
-
-		/* low tech for now */
-		if (WARN_ON(size > sizeof(buf)))
-			continue;
-
-		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
-		if (err < 0)
-			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
-		else
-			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
-	}
-
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
-
-static int i915_panel_show(struct seq_file *m, void *data)
-{
-	struct drm_connector *connector = m->private;
-	struct intel_dp *intel_dp =
-		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-
-	if (connector->status != connector_status_connected)
-		return -ENODEV;
-
-	seq_printf(m, "Panel power up delay: %d\n",
-		   intel_dp->panel_power_up_delay);
-	seq_printf(m, "Panel power down delay: %d\n",
-		   intel_dp->panel_power_down_delay);
-	seq_printf(m, "Backlight on delay: %d\n",
-		   intel_dp->backlight_on_delay);
-	seq_printf(m, "Backlight off delay: %d\n",
-		   intel_dp->backlight_off_delay);
-
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
-static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
-{
-	struct drm_connector *connector = m->private;
-	struct intel_connector *intel_connector = to_intel_connector(connector);
-
-	if (connector->status != connector_status_connected)
-		return -ENODEV;
-
-	/* HDCP is supported by connector */
-	if (!intel_connector->hdcp.shim)
-		return -EINVAL;
-
-	seq_printf(m, "%s:%d HDCP version: ", connector->name,
-		   connector->base.id);
-	intel_hdcp_info(m, intel_connector);
-
-	return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-
-static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
-{
-	struct drm_connector *connector = m->private;
-	struct drm_device *dev = connector->dev;
-	struct drm_crtc *crtc;
-	struct intel_dp *intel_dp;
-	struct drm_modeset_acquire_ctx ctx;
-	struct intel_crtc_state *crtc_state = NULL;
-	int ret = 0;
-	bool try_again = false;
-
-	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
-	do {
-		try_again = false;
-		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
-				       &ctx);
-		if (ret) {
-			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
-				try_again = true;
-				continue;
-			}
-			break;
-		}
-		crtc = connector->state->crtc;
-		if (connector->status != connector_status_connected || !crtc) {
-			ret = -ENODEV;
-			break;
-		}
-		ret = drm_modeset_lock(&crtc->mutex, &ctx);
-		if (ret == -EDEADLK) {
-			ret = drm_modeset_backoff(&ctx);
-			if (!ret) {
-				try_again = true;
-				continue;
-			}
-			break;
-		} else if (ret) {
-			break;
-		}
-		intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-		crtc_state = to_intel_crtc_state(crtc->state);
-		seq_printf(m, "DSC_Enabled: %s\n",
-			   yesno(crtc_state->dsc.compression_enable));
-		seq_printf(m, "DSC_Sink_Support: %s\n",
-			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
-		seq_printf(m, "Force_DSC_Enable: %s\n",
-			   yesno(intel_dp->force_dsc_en));
-		if (!intel_dp_is_edp(intel_dp))
-			seq_printf(m, "FEC_Sink_Support: %s\n",
-				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
-	} while (try_again);
-
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	return ret;
-}
-
-static ssize_t i915_dsc_fec_support_write(struct file *file,
-					  const char __user *ubuf,
-					  size_t len, loff_t *offp)
-{
-	bool dsc_enable = false;
-	int ret;
-	struct drm_connector *connector =
-		((struct seq_file *)file->private_data)->private;
-	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
-	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-	if (len == 0)
-		return 0;
-
-	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
-			 len);
-
-	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
-	if (ret < 0)
-		return ret;
-
-	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
-			 (dsc_enable) ? "true" : "false");
-	intel_dp->force_dsc_en = dsc_enable;
-
-	*offp += len;
-	return len;
-}
-
-static int i915_dsc_fec_support_open(struct inode *inode,
-				     struct file *file)
-{
-	return single_open(file, i915_dsc_fec_support_show,
-			   inode->i_private);
-}
-
-static const struct file_operations i915_dsc_fec_support_fops = {
-	.owner = THIS_MODULE,
-	.open = i915_dsc_fec_support_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = single_release,
-	.write = i915_dsc_fec_support_write
-};
-
-/**
- * i915_debugfs_connector_add - add i915 specific connector debugfs files
- * @connector: pointer to a registered drm_connector
- *
- * Cleanup will be done by drm_connector_unregister() through a call to
- * drm_debugfs_connector_remove().
- *
- * Returns 0 on success, negative error codes on error.
- */
-int i915_debugfs_connector_add(struct drm_connector *connector)
-{
-	struct dentry *root = connector->debugfs_entry;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
-	/* The connector must have been registered beforehands. */
-	if (!root)
-		return -ENODEV;
-
-	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
-	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-		debugfs_create_file("i915_dpcd", S_IRUGO, root,
-				    connector, &i915_dpcd_fops);
-
-	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
-		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
-				    connector, &i915_panel_fops);
-		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
-				    connector, &i915_psr_sink_status_fops);
-	}
-
-	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
-	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
-	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
-		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
-				    connector, &i915_hdcp_sink_capability_fops);
-	}
-
-	if (INTEL_GEN(dev_priv) >= 10 &&
-	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
-	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
-		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
-				    connector, &i915_dsc_fec_support_fops);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index c0cd22e..6da39c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -6,15 +6,17 @@
 #ifndef __I915_DEBUGFS_H__
 #define __I915_DEBUGFS_H__
 
-struct drm_i915_private;
 struct drm_connector;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct seq_file;
 
 #ifdef CONFIG_DEBUG_FS
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
-int i915_debugfs_connector_add(struct drm_connector *connector);
+void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
 #else
 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
+static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
 #endif
 
 #endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
new file mode 100644
index 0000000..62b2c5f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "i915_debugfs_params.h"
+#include "i915_drv.h"
+#include "i915_params.h"
+
+/* int param */
+static int i915_param_int_show(struct seq_file *m, void *data)
+{
+	int *value = m->private;
+
+	seq_printf(m, "%d\n", *value);
+
+	return 0;
+}
+
+static int i915_param_int_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_param_int_show, inode->i_private);
+}
+
+static ssize_t i915_param_int_write(struct file *file,
+				    const char __user *ubuf, size_t len,
+				    loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	int *value = m->private;
+	int ret;
+
+	ret = kstrtoint_from_user(ubuf, len, 0, value);
+	if (ret) {
+		/* support boolean values too */
+		bool b;
+
+		ret = kstrtobool_from_user(ubuf, len, &b);
+		if (!ret)
+			*value = b;
+	}
+
+	return ret ?: len;
+}
+
+static const struct file_operations i915_param_int_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_param_int_open,
+	.read = seq_read,
+	.write = i915_param_int_write,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+static const struct file_operations i915_param_int_fops_ro = {
+	.owner = THIS_MODULE,
+	.open = i915_param_int_open,
+	.read = seq_read,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+/* unsigned int param */
+static int i915_param_uint_show(struct seq_file *m, void *data)
+{
+	unsigned int *value = m->private;
+
+	seq_printf(m, "%u\n", *value);
+
+	return 0;
+}
+
+static int i915_param_uint_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_param_uint_show, inode->i_private);
+}
+
+static ssize_t i915_param_uint_write(struct file *file,
+				     const char __user *ubuf, size_t len,
+				     loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	unsigned int *value = m->private;
+	int ret;
+
+	ret = kstrtouint_from_user(ubuf, len, 0, value);
+	if (ret) {
+		/* support boolean values too */
+		bool b;
+
+		ret = kstrtobool_from_user(ubuf, len, &b);
+		if (!ret)
+			*value = b;
+	}
+
+	return ret ?: len;
+}
+
+static const struct file_operations i915_param_uint_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_param_uint_open,
+	.read = seq_read,
+	.write = i915_param_uint_write,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+static const struct file_operations i915_param_uint_fops_ro = {
+	.owner = THIS_MODULE,
+	.open = i915_param_uint_open,
+	.read = seq_read,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+/* char * param */
+static int i915_param_charp_show(struct seq_file *m, void *data)
+{
+	const char **s = m->private;
+
+	seq_printf(m, "%s\n", *s);
+
+	return 0;
+}
+
+static int i915_param_charp_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, i915_param_charp_show, inode->i_private);
+}
+
+static ssize_t i915_param_charp_write(struct file *file,
+				      const char __user *ubuf, size_t len,
+				      loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	char **s = m->private;
+	char *new, *old;
+
+	/* FIXME: remove locking after params aren't the module params */
+	kernel_param_lock(THIS_MODULE);
+
+	old = *s;
+	new = strndup_user(ubuf, PAGE_SIZE);
+	if (IS_ERR(new)) {
+		len = PTR_ERR(new);
+		goto out;
+	}
+
+	*s = new;
+
+	kfree(old);
+out:
+	kernel_param_unlock(THIS_MODULE);
+
+	return len;
+}
+
+static const struct file_operations i915_param_charp_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_param_charp_open,
+	.read = seq_read,
+	.write = i915_param_charp_write,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+static const struct file_operations i915_param_charp_fops_ro = {
+	.owner = THIS_MODULE,
+	.open = i915_param_charp_open,
+	.read = seq_read,
+	.llseek = default_llseek,
+	.release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+static struct dentry *
+i915_debugfs_create_int(const char *name, umode_t mode,
+			struct dentry *parent, int *value)
+{
+	return debugfs_create_file_unsafe(name, mode, parent, value,
+					  RO(mode) ? &i915_param_int_fops_ro :
+					  &i915_param_int_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_uint(const char *name, umode_t mode,
+			 struct dentry *parent, unsigned int *value)
+{
+	return debugfs_create_file_unsafe(name, mode, parent, value,
+					  RO(mode) ? &i915_param_uint_fops_ro :
+					  &i915_param_uint_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_charp(const char *name, umode_t mode,
+			  struct dentry *parent, char **value)
+{
+	return debugfs_create_file(name, mode, parent, value,
+				   RO(mode) ? &i915_param_charp_fops_ro :
+				   &i915_param_charp_fops);
+}
+
+static __always_inline void
+_i915_param_create_file(struct dentry *parent, const char *name,
+			const char *type, int mode, void *value)
+{
+	if (!mode)
+		return;
+
+	if (!__builtin_strcmp(type, "bool"))
+		debugfs_create_bool(name, mode, parent, value);
+	else if (!__builtin_strcmp(type, "int"))
+		i915_debugfs_create_int(name, mode, parent, value);
+	else if (!__builtin_strcmp(type, "unsigned int"))
+		i915_debugfs_create_uint(name, mode, parent, value);
+	else if (!__builtin_strcmp(type, "unsigned long"))
+		debugfs_create_ulong(name, mode, parent, value);
+	else if (!__builtin_strcmp(type, "char *"))
+		i915_debugfs_create_charp(name, mode, parent, value);
+	else
+		WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
+		     type, name);
+}
+
+/* add a subdirectory with files for each i915 param */
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
+{
+	struct drm_minor *minor = i915->drm.primary;
+	struct i915_params *params = &i915_modparams;
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+	if (IS_ERR(dir))
+		return dir;
+
+	/*
+	 * Note: We could create files for params needing special handling
+	 * here. Set mode in params to 0 to skip the generic create file, or
+	 * just let the generic create file fail silently with -EEXIST.
+	 */
+
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+	I915_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+
+	return dir;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.h b/drivers/gpu/drm/i915/i915_debugfs_params.h
new file mode 100644
index 0000000..6656707
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_PARAMS__
+#define __I915_DEBUGFS_PARAMS__
+
+struct dentry;
+struct drm_i915_private;
+
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __I915_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index eec1d6a..81a4621 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,12 +44,13 @@
 #include <drm/drm_ioctl.h>
 #include <drm/drm_irq.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
 
 #include "display/intel_acpi.h"
 #include "display/intel_audio.h"
 #include "display/intel_bw.h"
 #include "display/intel_cdclk.h"
+#include "display/intel_csr.h"
+#include "display/intel_display_debugfs.h"
 #include "display/intel_display_types.h"
 #include "display/intel_dp.h"
 #include "display/intel_fbdev.h"
@@ -69,6 +70,7 @@
 
 #include "i915_debugfs.h"
 #include "i915_drv.h"
+#include "i915_ioc32.h"
 #include "i915_irq.h"
 #include "i915_memcpy.h"
 #include "i915_perf.h"
@@ -78,74 +80,14 @@
 #include "i915_sysfs.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
-#include "intel_csr.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
 #include "intel_memory_region.h"
 #include "intel_pm.h"
+#include "vlv_suspend.h"
 
 static struct drm_driver driver;
 
-struct vlv_s0ix_state {
-	/* GAM */
-	u32 wr_watermark;
-	u32 gfx_prio_ctrl;
-	u32 arb_mode;
-	u32 gfx_pend_tlb0;
-	u32 gfx_pend_tlb1;
-	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
-	u32 media_max_req_count;
-	u32 gfx_max_req_count;
-	u32 render_hwsp;
-	u32 ecochk;
-	u32 bsd_hwsp;
-	u32 blt_hwsp;
-	u32 tlb_rd_addr;
-
-	/* MBC */
-	u32 g3dctl;
-	u32 gsckgctl;
-	u32 mbctl;
-
-	/* GCP */
-	u32 ucgctl1;
-	u32 ucgctl3;
-	u32 rcgctl1;
-	u32 rcgctl2;
-	u32 rstctl;
-	u32 misccpctl;
-
-	/* GPM */
-	u32 gfxpause;
-	u32 rpdeuhwtc;
-	u32 rpdeuc;
-	u32 ecobus;
-	u32 pwrdwnupctl;
-	u32 rp_down_timeout;
-	u32 rp_deucsw;
-	u32 rcubmabdtmr;
-	u32 rcedata;
-	u32 spare2gh;
-
-	/* Display 1 CZ domain */
-	u32 gt_imr;
-	u32 gt_ier;
-	u32 pm_imr;
-	u32 pm_ier;
-	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
-	/* GT SA CZ domain */
-	u32 tilectl;
-	u32 gt_fifoctl;
-	u32 gtlc_wake_ctrl;
-	u32 gtlc_survive;
-	u32 pmwgicz;
-
-	/* Display 2 CZ domain */
-	u32 gu_ctl0;
-	u32 gu_ctl1;
-	u32 pcbr;
-	u32 clock_gate_dis2;
-};
-
 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
 {
 	int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
@@ -153,7 +95,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
 	dev_priv->bridge_dev =
 		pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
 	if (!dev_priv->bridge_dev) {
-		DRM_ERROR("bridge device not found\n");
+		drm_err(&dev_priv->drm, "bridge device not found\n");
 		return -1;
 	}
 	return 0;
@@ -190,7 +132,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
 				     0, pcibios_align_resource,
 				     dev_priv->bridge_dev);
 	if (ret) {
-		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+		drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
 		dev_priv->mch_res.start = 0;
 		return ret;
 	}
@@ -273,7 +215,8 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
 		release_resource(&dev_priv->mch_res);
 }
 
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
 {
 	int ret;
 
@@ -293,25 +236,32 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
 	if (ret)
 		goto out;
 
-	intel_register_dsm_handler();
-
-	ret = i915_switcheroo_register(i915);
-	if (ret)
-		goto cleanup_vga_client;
-
 	intel_power_domains_init_hw(i915, false);
 
 	intel_csr_ucode_init(i915);
 
-	ret = intel_irq_install(i915);
+	ret = intel_modeset_init_noirq(i915);
 	if (ret)
-		goto cleanup_csr;
+		goto cleanup_vga_client;
+
+	return 0;
+
+cleanup_vga_client:
+	intel_vga_unregister(i915);
+out:
+	return ret;
+}
+
+/* part #2: call after irq install */
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+{
+	int ret;
 
 	/* Important: The output setup functions called by modeset_init need
 	 * working irqs for e.g. gmbus and dp aux transfers. */
 	ret = intel_modeset_init(i915);
 	if (ret)
-		goto cleanup_irq;
+		goto out;
 
 	ret = i915_gem_init(i915);
 	if (ret)
@@ -340,29 +290,27 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
 	i915_gem_driver_remove(i915);
 	i915_gem_driver_release(i915);
 cleanup_modeset:
+	/* FIXME */
 	intel_modeset_driver_remove(i915);
-cleanup_irq:
 	intel_irq_uninstall(i915);
-cleanup_csr:
-	intel_csr_ucode_fini(i915);
-	intel_power_domains_driver_remove(i915);
-	i915_switcheroo_unregister(i915);
-cleanup_vga_client:
-	intel_vga_unregister(i915);
+	intel_modeset_driver_remove_noirq(i915);
 out:
 	return ret;
 }
 
+/* part #1: call before irq uninstall */
 static void i915_driver_modeset_remove(struct drm_i915_private *i915)
 {
 	intel_modeset_driver_remove(i915);
+}
 
-	intel_irq_uninstall(i915);
+/* part #2: call after irq uninstall */
+static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
+{
+	intel_modeset_driver_remove_noirq(i915);
 
 	intel_bios_driver_remove(i915);
 
-	i915_switcheroo_unregister(i915);
-
 	intel_vga_unregister(i915);
 
 	intel_csr_ucode_fini(i915);
@@ -412,7 +360,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 out_free_wq:
 	destroy_workqueue(dev_priv->wq);
 out_err:
-	DRM_ERROR("Failed to allocate workqueues.\n");
+	drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
 
 	return -ENOMEM;
 }
@@ -441,37 +389,15 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
 	pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
 	pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
 	pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+	pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
 
 	if (pre) {
-		DRM_ERROR("This is a pre-production stepping. "
+		drm_err(&dev_priv->drm, "This is a pre-production stepping. "
 			  "It may not be fully functional.\n");
 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
 	}
 }
 
-static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
-{
-	if (!IS_VALLEYVIEW(i915))
-		return 0;
-
-	/* we write all the values in the struct, so no need to zero it out */
-	i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
-				       GFP_KERNEL);
-	if (!i915->vlv_s0ix_state)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void vlv_free_s0ix_state(struct drm_i915_private *i915)
-{
-	if (!i915->vlv_s0ix_state)
-		return;
-
-	kfree(i915->vlv_s0ix_state);
-	i915->vlv_s0ix_state = NULL;
-}
-
 static void sanitize_gpu(struct drm_i915_private *i915)
 {
 	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -519,7 +445,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 	if (ret < 0)
 		return ret;
 
-	ret = vlv_alloc_s0ix_state(dev_priv);
+	ret = vlv_suspend_init(dev_priv);
 	if (ret < 0)
 		goto err_workqueues;
 
@@ -541,7 +467,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 	intel_init_display_hooks(dev_priv);
 	intel_init_clock_gating_hooks(dev_priv);
 	intel_init_audio_hooks(dev_priv);
-	intel_display_crc_init(dev_priv);
 
 	intel_detect_preproduction_hw(dev_priv);
 
@@ -550,7 +475,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 err_gem:
 	i915_gem_cleanup_early(dev_priv);
 	intel_gt_driver_late_release(&dev_priv->gt);
-	vlv_free_s0ix_state(dev_priv);
+	vlv_suspend_cleanup(dev_priv);
 err_workqueues:
 	i915_workqueues_cleanup(dev_priv);
 	return ret;
@@ -567,7 +492,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
 	intel_power_domains_cleanup(dev_priv);
 	i915_gem_cleanup_early(dev_priv);
 	intel_gt_driver_late_release(&dev_priv->gt);
-	vlv_free_s0ix_state(dev_priv);
+	vlv_suspend_cleanup(dev_priv);
 	i915_workqueues_cleanup(dev_priv);
 
 	cpu_latency_qos_remove_request(&dev_priv->sb_qos);
@@ -640,487 +565,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 	intel_gvt_sanitize_options(dev_priv);
 }
 
-#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-
-static const char *intel_dram_type_str(enum intel_dram_type type)
-{
-	static const char * const str[] = {
-		DRAM_TYPE_STR(UNKNOWN),
-		DRAM_TYPE_STR(DDR3),
-		DRAM_TYPE_STR(DDR4),
-		DRAM_TYPE_STR(LPDDR3),
-		DRAM_TYPE_STR(LPDDR4),
-	};
-
-	if (type >= ARRAY_SIZE(str))
-		type = INTEL_DRAM_UNKNOWN;
-
-	return str[type];
-}
-
-#undef DRAM_TYPE_STR
-
-static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
-{
-	return dimm->ranks * 64 / (dimm->width ?: 1);
-}
-
-/* Returns total GB for the whole DIMM */
-static int skl_get_dimm_size(u16 val)
-{
-	return val & SKL_DRAM_SIZE_MASK;
-}
-
-static int skl_get_dimm_width(u16 val)
-{
-	if (skl_get_dimm_size(val) == 0)
-		return 0;
-
-	switch (val & SKL_DRAM_WIDTH_MASK) {
-	case SKL_DRAM_WIDTH_X8:
-	case SKL_DRAM_WIDTH_X16:
-	case SKL_DRAM_WIDTH_X32:
-		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
-		return 8 << val;
-	default:
-		MISSING_CASE(val);
-		return 0;
-	}
-}
-
-static int skl_get_dimm_ranks(u16 val)
-{
-	if (skl_get_dimm_size(val) == 0)
-		return 0;
-
-	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
-
-	return val + 1;
-}
-
-/* Returns total GB for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
-{
-	return (val & CNL_DRAM_SIZE_MASK) / 2;
-}
-
-static int cnl_get_dimm_width(u16 val)
-{
-	if (cnl_get_dimm_size(val) == 0)
-		return 0;
-
-	switch (val & CNL_DRAM_WIDTH_MASK) {
-	case CNL_DRAM_WIDTH_X8:
-	case CNL_DRAM_WIDTH_X16:
-	case CNL_DRAM_WIDTH_X32:
-		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
-		return 8 << val;
-	default:
-		MISSING_CASE(val);
-		return 0;
-	}
-}
-
-static int cnl_get_dimm_ranks(u16 val)
-{
-	if (cnl_get_dimm_size(val) == 0)
-		return 0;
-
-	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
-
-	return val + 1;
-}
-
-static bool
-skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
-{
-	/* Convert total GB to Gb per DRAM device */
-	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
-}
-
-static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
-		       struct dram_dimm_info *dimm,
-		       int channel, char dimm_name, u16 val)
-{
-	if (INTEL_GEN(dev_priv) >= 10) {
-		dimm->size = cnl_get_dimm_size(val);
-		dimm->width = cnl_get_dimm_width(val);
-		dimm->ranks = cnl_get_dimm_ranks(val);
-	} else {
-		dimm->size = skl_get_dimm_size(val);
-		dimm->width = skl_get_dimm_width(val);
-		dimm->ranks = skl_get_dimm_ranks(val);
-	}
-
-	DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
-		      channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
-		      yesno(skl_is_16gb_dimm(dimm)));
-}
-
-static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
-			  struct dram_channel_info *ch,
-			  int channel, u32 val)
-{
-	skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
-			       channel, 'L', val & 0xffff);
-	skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
-			       channel, 'S', val >> 16);
-
-	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
-		DRM_DEBUG_KMS("CH%u not populated\n", channel);
-		return -EINVAL;
-	}
-
-	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
-		ch->ranks = 2;
-	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
-		ch->ranks = 2;
-	else
-		ch->ranks = 1;
-
-	ch->is_16gb_dimm =
-		skl_is_16gb_dimm(&ch->dimm_l) ||
-		skl_is_16gb_dimm(&ch->dimm_s);
-
-	DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
-		      channel, ch->ranks, yesno(ch->is_16gb_dimm));
-
-	return 0;
-}
-
-static bool
-intel_is_dram_symmetric(const struct dram_channel_info *ch0,
-			const struct dram_channel_info *ch1)
-{
-	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
-		(ch0->dimm_s.size == 0 ||
-		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
-}
-
-static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
-{
-	struct dram_info *dram_info = &dev_priv->dram_info;
-	struct dram_channel_info ch0 = {}, ch1 = {};
-	u32 val;
-	int ret;
-
-	val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
-	ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
-	if (ret == 0)
-		dram_info->num_channels++;
-
-	val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
-	ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
-	if (ret == 0)
-		dram_info->num_channels++;
-
-	if (dram_info->num_channels == 0) {
-		DRM_INFO("Number of memory channels is zero\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * If any of the channel is single rank channel, worst case output
-	 * will be same as if single rank memory, so consider single rank
-	 * memory.
-	 */
-	if (ch0.ranks == 1 || ch1.ranks == 1)
-		dram_info->ranks = 1;
-	else
-		dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
-	if (dram_info->ranks == 0) {
-		DRM_INFO("couldn't get memory rank information\n");
-		return -EINVAL;
-	}
-
-	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
-
-	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
-
-	DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
-		      yesno(dram_info->symmetric_memory));
-	return 0;
-}
-
-static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
-{
-	u32 val;
-
-	val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
-
-	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
-	case SKL_DRAM_DDR_TYPE_DDR3:
-		return INTEL_DRAM_DDR3;
-	case SKL_DRAM_DDR_TYPE_DDR4:
-		return INTEL_DRAM_DDR4;
-	case SKL_DRAM_DDR_TYPE_LPDDR3:
-		return INTEL_DRAM_LPDDR3;
-	case SKL_DRAM_DDR_TYPE_LPDDR4:
-		return INTEL_DRAM_LPDDR4;
-	default:
-		MISSING_CASE(val);
-		return INTEL_DRAM_UNKNOWN;
-	}
-}
-
-static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
-{
-	struct dram_info *dram_info = &dev_priv->dram_info;
-	u32 mem_freq_khz, val;
-	int ret;
-
-	dram_info->type = skl_get_dram_type(dev_priv);
-	DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
-
-	ret = skl_dram_get_channels_info(dev_priv);
-	if (ret)
-		return ret;
-
-	val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
-	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
-				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
-	dram_info->bandwidth_kbps = dram_info->num_channels *
-							mem_freq_khz * 8;
-
-	if (dram_info->bandwidth_kbps == 0) {
-		DRM_INFO("Couldn't get system memory bandwidth\n");
-		return -EINVAL;
-	}
-
-	dram_info->valid = true;
-	return 0;
-}
-
-/* Returns Gb per DRAM device */
-static int bxt_get_dimm_size(u32 val)
-{
-	switch (val & BXT_DRAM_SIZE_MASK) {
-	case BXT_DRAM_SIZE_4GBIT:
-		return 4;
-	case BXT_DRAM_SIZE_6GBIT:
-		return 6;
-	case BXT_DRAM_SIZE_8GBIT:
-		return 8;
-	case BXT_DRAM_SIZE_12GBIT:
-		return 12;
-	case BXT_DRAM_SIZE_16GBIT:
-		return 16;
-	default:
-		MISSING_CASE(val);
-		return 0;
-	}
-}
-
-static int bxt_get_dimm_width(u32 val)
-{
-	if (!bxt_get_dimm_size(val))
-		return 0;
-
-	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-
-	return 8 << val;
-}
-
-static int bxt_get_dimm_ranks(u32 val)
-{
-	if (!bxt_get_dimm_size(val))
-		return 0;
-
-	switch (val & BXT_DRAM_RANK_MASK) {
-	case BXT_DRAM_RANK_SINGLE:
-		return 1;
-	case BXT_DRAM_RANK_DUAL:
-		return 2;
-	default:
-		MISSING_CASE(val);
-		return 0;
-	}
-}
-
-static enum intel_dram_type bxt_get_dimm_type(u32 val)
-{
-	if (!bxt_get_dimm_size(val))
-		return INTEL_DRAM_UNKNOWN;
-
-	switch (val & BXT_DRAM_TYPE_MASK) {
-	case BXT_DRAM_TYPE_DDR3:
-		return INTEL_DRAM_DDR3;
-	case BXT_DRAM_TYPE_LPDDR3:
-		return INTEL_DRAM_LPDDR3;
-	case BXT_DRAM_TYPE_DDR4:
-		return INTEL_DRAM_DDR4;
-	case BXT_DRAM_TYPE_LPDDR4:
-		return INTEL_DRAM_LPDDR4;
-	default:
-		MISSING_CASE(val);
-		return INTEL_DRAM_UNKNOWN;
-	}
-}
-
-static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
-			      u32 val)
-{
-	dimm->width = bxt_get_dimm_width(val);
-	dimm->ranks = bxt_get_dimm_ranks(val);
-
-	/*
-	 * Size in register is Gb per DRAM device. Convert to total
-	 * GB to match the way we report this for non-LP platforms.
-	 */
-	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
-}
-
-static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
-{
-	struct dram_info *dram_info = &dev_priv->dram_info;
-	u32 dram_channels;
-	u32 mem_freq_khz, val;
-	u8 num_active_channels;
-	int i;
-
-	val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
-	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
-				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
-	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
-	num_active_channels = hweight32(dram_channels);
-
-	/* Each active bit represents 4-byte channel */
-	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
-	if (dram_info->bandwidth_kbps == 0) {
-		DRM_INFO("Couldn't get system memory bandwidth\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
-	 */
-	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
-		struct dram_dimm_info dimm;
-		enum intel_dram_type type;
-
-		val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
-		if (val == 0xFFFFFFFF)
-			continue;
-
-		dram_info->num_channels++;
-
-		bxt_get_dimm_info(&dimm, val);
-		type = bxt_get_dimm_type(val);
-
-		WARN_ON(type != INTEL_DRAM_UNKNOWN &&
-			dram_info->type != INTEL_DRAM_UNKNOWN &&
-			dram_info->type != type);
-
-		DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
-			      i - BXT_D_CR_DRP0_DUNIT_START,
-			      dimm.size, dimm.width, dimm.ranks,
-			      intel_dram_type_str(type));
-
-		/*
-		 * If any of the channel is single rank channel,
-		 * worst case output will be same as if single rank
-		 * memory, so consider single rank memory.
-		 */
-		if (dram_info->ranks == 0)
-			dram_info->ranks = dimm.ranks;
-		else if (dimm.ranks == 1)
-			dram_info->ranks = 1;
-
-		if (type != INTEL_DRAM_UNKNOWN)
-			dram_info->type = type;
-	}
-
-	if (dram_info->type == INTEL_DRAM_UNKNOWN ||
-	    dram_info->ranks == 0) {
-		DRM_INFO("couldn't get memory information\n");
-		return -EINVAL;
-	}
-
-	dram_info->valid = true;
-	return 0;
-}
-
-static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
-{
-	struct dram_info *dram_info = &dev_priv->dram_info;
-	int ret;
-
-	/*
-	 * Assume 16Gb DIMMs are present until proven otherwise.
-	 * This is only used for the level 0 watermark latency
-	 * w/a which does not apply to bxt/glk.
-	 */
-	dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
-
-	if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
-		return;
-
-	if (IS_GEN9_LP(dev_priv))
-		ret = bxt_get_dram_info(dev_priv);
-	else
-		ret = skl_get_dram_info(dev_priv);
-	if (ret)
-		return;
-
-	DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
-		      dram_info->bandwidth_kbps,
-		      dram_info->num_channels);
-
-	DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
-		      dram_info->ranks, yesno(dram_info->is_16gb_dimm));
-}
-
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
-{
-	static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
-	static const u8 sets[4] = { 1, 1, 2, 2 };
-
-	return EDRAM_NUM_BANKS(cap) *
-		ways[EDRAM_WAYS_IDX(cap)] *
-		sets[EDRAM_SETS_IDX(cap)];
-}
-
-static void edram_detect(struct drm_i915_private *dev_priv)
-{
-	u32 edram_cap = 0;
-
-	if (!(IS_HASWELL(dev_priv) ||
-	      IS_BROADWELL(dev_priv) ||
-	      INTEL_GEN(dev_priv) >= 9))
-		return;
-
-	edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
-
-	/* NB: We can't write IDICR yet because we don't have gt funcs set up */
-
-	if (!(edram_cap & EDRAM_ENABLED))
-		return;
-
-	/*
-	 * The needed capability bits for size calculation are not there with
-	 * pre gen9 so return 128MB always.
-	 */
-	if (INTEL_GEN(dev_priv) < 9)
-		dev_priv->edram_size_mb = 128;
-	else
-		dev_priv->edram_size_mb =
-			gen9_edram_size_mb(dev_priv, edram_cap);
-
-	dev_info(dev_priv->drm.dev,
-		 "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
-}
-
 /**
  * i915_driver_hw_probe - setup state requiring device access
  * @dev_priv: device private
@@ -1164,7 +608,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	intel_sanitize_options(dev_priv);
 
 	/* needs to be done before ggtt probe */
-	edram_detect(dev_priv);
+	intel_dram_edram_detect(dev_priv);
 
 	i915_perf_init(dev_priv);
 
@@ -1188,7 +632,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret) {
-		DRM_ERROR("failed to enable GGTT\n");
+		drm_err(&dev_priv->drm, "failed to enable GGTT\n");
 		goto err_mem_regions;
 	}
 
@@ -1204,7 +648,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	if (IS_GEN(dev_priv, 2)) {
 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
 		if (ret) {
-			DRM_ERROR("failed to set DMA mask\n");
+			drm_err(&dev_priv->drm, "failed to set DMA mask\n");
 
 			goto err_mem_regions;
 		}
@@ -1222,7 +666,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 		ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 		if (ret) {
-			DRM_ERROR("failed to set DMA mask\n");
+			drm_err(&dev_priv->drm, "failed to set DMA mask\n");
 
 			goto err_mem_regions;
 		}
@@ -1253,7 +697,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	 */
 	if (INTEL_GEN(dev_priv) >= 5) {
 		if (pci_enable_msi(pdev) < 0)
-			DRM_DEBUG_DRIVER("can't enable MSI");
+			drm_dbg(&dev_priv->drm, "can't enable MSI");
 	}
 
 	ret = intel_gvt_init(dev_priv);
@@ -1265,7 +709,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	 * Fill the dram structure to get the system raw bandwidth and
 	 * dram info. This will be used for memory latency calculation.
 	 */
-	intel_get_dram_info(dev_priv);
+	intel_dram_detect(dev_priv);
 
 	intel_bw_init_hw(dev_priv);
 
@@ -1314,22 +758,19 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 	i915_gem_driver_register(dev_priv);
 	i915_pmu_register(dev_priv);
 
-	/*
-	 * Notify a valid surface after modesetting,
-	 * when running inside a VM.
-	 */
-	if (intel_vgpu_active(dev_priv))
-		I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+	intel_vgpu_register(dev_priv);
 
 	/* Reveal our presence to userspace */
 	if (drm_dev_register(dev, 0) == 0) {
 		i915_debugfs_register(dev_priv);
+		intel_display_debugfs_register(dev_priv);
 		i915_setup_sysfs(dev_priv);
 
 		/* Depends on sysfs having been initialized */
 		i915_perf_register(dev_priv);
 	} else
-		DRM_ERROR("Failed to register driver for userspace access!\n");
+		drm_err(&dev_priv->drm,
+			"Failed to register driver for userspace access!\n");
 
 	if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
 		/* Must be done after probing outputs */
@@ -1359,6 +800,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
 
 	intel_power_domains_enable(dev_priv);
 	intel_runtime_pm_enable(&dev_priv->runtime_pm);
+
+	intel_register_dsm_handler();
+
+	if (i915_switcheroo_register(dev_priv))
+		drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
 }
 
 /**
@@ -1367,6 +813,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
 {
+	i915_switcheroo_unregister(dev_priv);
+
+	intel_unregister_dsm_handler();
+
 	intel_runtime_pm_disable(&dev_priv->runtime_pm);
 	intel_power_domains_disable(dev_priv);
 
@@ -1411,11 +861,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
 	}
 
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
-		DRM_INFO("DRM_I915_DEBUG enabled\n");
+		drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+		drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
-		DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+		drm_info(&dev_priv->drm,
+			 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
 }
 
 static struct drm_i915_private *
@@ -1437,8 +888,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
 		return ERR_PTR(err);
 	}
 
-	i915->drm.dev_private = i915;
-
 	i915->drm.pdev = pdev;
 	pci_set_drvdata(pdev, i915);
 
@@ -1478,16 +927,16 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	const struct intel_device_info *match_info =
 		(struct intel_device_info *)ent->driver_data;
-	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *i915;
 	int ret;
 
-	dev_priv = i915_driver_create(pdev, ent);
-	if (IS_ERR(dev_priv))
-		return PTR_ERR(dev_priv);
+	i915 = i915_driver_create(pdev, ent);
+	if (IS_ERR(i915))
+		return PTR_ERR(i915);
 
 	/* Disable nuclear pageflip by default on pre-ILK */
 	if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
-		dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+		i915->drm.driver_features &= ~DRIVER_ATOMIC;
 
 	/*
 	 * Check if we support fake LMEM -- for now we only unleash this for
@@ -1495,13 +944,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 */
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 	if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
-		if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+		if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
 		    i915_modparams.fake_lmem_start) {
-			mkwrite_device_info(dev_priv)->memory_regions =
+			mkwrite_device_info(i915)->memory_regions =
 				REGION_SMEM | REGION_LMEM | REGION_STOLEN;
-			mkwrite_device_info(dev_priv)->is_dgfx = true;
-			GEM_BUG_ON(!HAS_LMEM(dev_priv));
-			GEM_BUG_ON(!IS_DGFX(dev_priv));
+			mkwrite_device_info(i915)->is_dgfx = true;
+			GEM_BUG_ON(!HAS_LMEM(i915));
+			GEM_BUG_ON(!IS_DGFX(i915));
 		}
 	}
 #endif
@@ -1510,48 +959,60 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (ret)
 		goto out_fini;
 
-	ret = i915_driver_early_probe(dev_priv);
+	ret = i915_driver_early_probe(i915);
 	if (ret < 0)
 		goto out_pci_disable;
 
-	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+	disable_rpm_wakeref_asserts(&i915->runtime_pm);
 
-	i915_detect_vgpu(dev_priv);
+	intel_vgpu_detect(i915);
 
-	ret = i915_driver_mmio_probe(dev_priv);
+	ret = i915_driver_mmio_probe(i915);
 	if (ret < 0)
 		goto out_runtime_pm_put;
 
-	ret = i915_driver_hw_probe(dev_priv);
+	ret = i915_driver_hw_probe(i915);
 	if (ret < 0)
 		goto out_cleanup_mmio;
 
-	ret = i915_driver_modeset_probe(dev_priv);
+	ret = i915_driver_modeset_probe_noirq(i915);
 	if (ret < 0)
 		goto out_cleanup_hw;
 
-	i915_driver_register(dev_priv);
+	ret = intel_irq_install(i915);
+	if (ret)
+		goto out_cleanup_modeset;
 
-	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+	ret = i915_driver_modeset_probe(i915);
+	if (ret < 0)
+		goto out_cleanup_irq;
 
-	i915_welcome_messages(dev_priv);
+	i915_driver_register(i915);
+
+	enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+	i915_welcome_messages(i915);
 
 	return 0;
 
+out_cleanup_irq:
+	intel_irq_uninstall(i915);
+out_cleanup_modeset:
+	/* FIXME */
 out_cleanup_hw:
-	i915_driver_hw_remove(dev_priv);
-	intel_memory_regions_driver_release(dev_priv);
-	i915_ggtt_driver_release(dev_priv);
+	i915_driver_hw_remove(i915);
+	intel_memory_regions_driver_release(i915);
+	i915_ggtt_driver_release(i915);
 out_cleanup_mmio:
-	i915_driver_mmio_release(dev_priv);
+	i915_driver_mmio_release(i915);
 out_runtime_pm_put:
-	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
-	i915_driver_late_release(dev_priv);
+	enable_rpm_wakeref_asserts(&i915->runtime_pm);
+	i915_driver_late_release(i915);
 out_pci_disable:
 	pci_disable_device(pdev);
 out_fini:
-	i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
-	i915_driver_destroy(dev_priv);
+	i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
+	i915_driver_destroy(i915);
 	return ret;
 }
 
@@ -1561,13 +1022,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
 
 	i915_driver_unregister(i915);
 
-	/*
-	 * After unregistering the device to prevent any new users, cancel
-	 * all in-flight requests so that we can quickly unbind the active
-	 * resources.
-	 */
-	intel_gt_set_wedged(&i915->gt);
-
 	/* Flush any external code that still may be under the RCU lock */
 	synchronize_rcu();
 
@@ -1579,6 +1033,10 @@ void i915_driver_remove(struct drm_i915_private *i915)
 
 	i915_driver_modeset_remove(i915);
 
+	intel_irq_uninstall(i915);
+
+	i915_driver_modeset_remove_noirq(i915);
+
 	i915_reset_error_state(i915);
 	i915_gem_driver_remove(i915);
 
@@ -1665,10 +1123,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
 	drm_modeset_unlock_all(dev);
 }
 
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
-			      bool rpm_resume);
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
-
 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 {
 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
@@ -1720,7 +1174,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
 	intel_suspend_hw(dev_priv);
 
-	i915_gem_suspend_gtt_mappings(dev_priv);
+	i915_ggtt_suspend(&dev_priv->ggtt);
 
 	i915_save_state(dev_priv);
 
@@ -1755,7 +1209,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-	int ret = 0;
+	int ret;
 
 	disable_rpm_wakeref_asserts(rpm);
 
@@ -1768,11 +1222,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 	intel_display_power_suspend_late(dev_priv);
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_suspend_complete(dev_priv);
-
+	ret = vlv_suspend_complete(dev_priv);
 	if (ret) {
-		DRM_ERROR("Suspend complete failed: %d\n", ret);
+		drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
 		intel_power_domains_resume(dev_priv);
 
 		goto out;
@@ -1806,8 +1258,8 @@ int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
 {
 	int error;
 
-	if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
-			 state.event != PM_EVENT_FREEZE))
+	if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
+			     state.event != PM_EVENT_FREEZE))
 		return -EINVAL;
 
 	if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1831,9 +1283,9 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret)
-		DRM_ERROR("failed to re-enable GGTT\n");
+		drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
 
-	i915_gem_restore_gtt_mappings(dev_priv);
+	i915_ggtt_resume(&dev_priv->ggtt);
 	i915_gem_restore_fences(&dev_priv->ggtt);
 
 	intel_csr_ucode_resume(dev_priv);
@@ -1920,7 +1372,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
 	 */
 	ret = pci_set_power_state(pdev, PCI_D0);
 	if (ret) {
-		DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+		drm_err(&dev_priv->drm,
+			"failed to set PCI D0 power state (%d)\n", ret);
 		return ret;
 	}
 
@@ -1944,11 +1397,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_resume_prepare(dev_priv, false);
+	ret = vlv_resume_prepare(dev_priv, false);
 	if (ret)
-		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
-			  ret);
+		drm_err(&dev_priv->drm,
+			"Resume prepare failed: %d, continuing anyway\n", ret);
 
 	intel_uncore_resume_early(&dev_priv->uncore);
 
@@ -2115,391 +1567,16 @@ static int i915_pm_restore(struct device *kdev)
 	return i915_pm_resume(kdev);
 }
 
-/*
- * Save all Gunit registers that may be lost after a D3 and a subsequent
- * S0i[R123] transition. The list of registers needing a save/restore is
- * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
- * registers in the following way:
- * - Driver: saved/restored by the driver
- * - Punit : saved/restored by the Punit firmware
- * - No, w/o marking: no need to save/restore, since the register is R/O or
- *                    used internally by the HW in a way that doesn't depend
- *                    keeping the content across a suspend/resume.
- * - Debug : used for debugging
- *
- * We save/restore all registers marked with 'Driver', with the following
- * exceptions:
- * - Registers out of use, including also registers marked with 'Debug'.
- *   These have no effect on the driver's operation, so we don't save/restore
- *   them to reduce the overhead.
- * - Registers that are fully setup by an initialization function called from
- *   the resume path. For example many clock gating and RPS/RC6 registers.
- * - Registers that provide the right functionality with their reset defaults.
- *
- * TODO: Except for registers that based on the above 3 criteria can be safely
- * ignored, we save/restore all others, practically treating the HW context as
- * a black-box for the driver. Further investigation is needed to reduce the
- * saved/restored registers even further, by following the same 3 criteria.
- */
-static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
-	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
-	int i;
-
-	if (!s)
-		return;
-
-	/* GAM 0x4000-0x4770 */
-	s->wr_watermark		= I915_READ(GEN7_WR_WATERMARK);
-	s->gfx_prio_ctrl	= I915_READ(GEN7_GFX_PRIO_CTRL);
-	s->arb_mode		= I915_READ(ARB_MODE);
-	s->gfx_pend_tlb0	= I915_READ(GEN7_GFX_PEND_TLB0);
-	s->gfx_pend_tlb1	= I915_READ(GEN7_GFX_PEND_TLB1);
-
-	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-		s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
-
-	s->media_max_req_count	= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
-	s->gfx_max_req_count	= I915_READ(GEN7_GFX_MAX_REQ_COUNT);
-
-	s->render_hwsp		= I915_READ(RENDER_HWS_PGA_GEN7);
-	s->ecochk		= I915_READ(GAM_ECOCHK);
-	s->bsd_hwsp		= I915_READ(BSD_HWS_PGA_GEN7);
-	s->blt_hwsp		= I915_READ(BLT_HWS_PGA_GEN7);
-
-	s->tlb_rd_addr		= I915_READ(GEN7_TLB_RD_ADDR);
-
-	/* MBC 0x9024-0x91D0, 0x8500 */
-	s->g3dctl		= I915_READ(VLV_G3DCTL);
-	s->gsckgctl		= I915_READ(VLV_GSCKGCTL);
-	s->mbctl		= I915_READ(GEN6_MBCTL);
-
-	/* GCP 0x9400-0x9424, 0x8100-0x810C */
-	s->ucgctl1		= I915_READ(GEN6_UCGCTL1);
-	s->ucgctl3		= I915_READ(GEN6_UCGCTL3);
-	s->rcgctl1		= I915_READ(GEN6_RCGCTL1);
-	s->rcgctl2		= I915_READ(GEN6_RCGCTL2);
-	s->rstctl		= I915_READ(GEN6_RSTCTL);
-	s->misccpctl		= I915_READ(GEN7_MISCCPCTL);
-
-	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
-	s->gfxpause		= I915_READ(GEN6_GFXPAUSE);
-	s->rpdeuhwtc		= I915_READ(GEN6_RPDEUHWTC);
-	s->rpdeuc		= I915_READ(GEN6_RPDEUC);
-	s->ecobus		= I915_READ(ECOBUS);
-	s->pwrdwnupctl		= I915_READ(VLV_PWRDWNUPCTL);
-	s->rp_down_timeout	= I915_READ(GEN6_RP_DOWN_TIMEOUT);
-	s->rp_deucsw		= I915_READ(GEN6_RPDEUCSW);
-	s->rcubmabdtmr		= I915_READ(GEN6_RCUBMABDTMR);
-	s->rcedata		= I915_READ(VLV_RCEDATA);
-	s->spare2gh		= I915_READ(VLV_SPAREG2H);
-
-	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
-	s->gt_imr		= I915_READ(GTIMR);
-	s->gt_ier		= I915_READ(GTIER);
-	s->pm_imr		= I915_READ(GEN6_PMIMR);
-	s->pm_ier		= I915_READ(GEN6_PMIER);
-
-	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-		s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
-
-	/* GT SA CZ domain, 0x100000-0x138124 */
-	s->tilectl		= I915_READ(TILECTL);
-	s->gt_fifoctl		= I915_READ(GTFIFOCTL);
-	s->gtlc_wake_ctrl	= I915_READ(VLV_GTLC_WAKE_CTRL);
-	s->gtlc_survive		= I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-	s->pmwgicz		= I915_READ(VLV_PMWGICZ);
-
-	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
-	s->gu_ctl0		= I915_READ(VLV_GU_CTL0);
-	s->gu_ctl1		= I915_READ(VLV_GU_CTL1);
-	s->pcbr			= I915_READ(VLV_PCBR);
-	s->clock_gate_dis2	= I915_READ(VLV_GUNIT_CLOCK_GATE2);
-
-	/*
-	 * Not saving any of:
-	 * DFT,		0x9800-0x9EC0
-	 * SARB,	0xB000-0xB1FC
-	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
-	 * PCI CFG
-	 */
-}
-
-static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
-	struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
-	u32 val;
-	int i;
-
-	if (!s)
-		return;
-
-	/* GAM 0x4000-0x4770 */
-	I915_WRITE(GEN7_WR_WATERMARK,	s->wr_watermark);
-	I915_WRITE(GEN7_GFX_PRIO_CTRL,	s->gfx_prio_ctrl);
-	I915_WRITE(ARB_MODE,		s->arb_mode | (0xffff << 16));
-	I915_WRITE(GEN7_GFX_PEND_TLB0,	s->gfx_pend_tlb0);
-	I915_WRITE(GEN7_GFX_PEND_TLB1,	s->gfx_pend_tlb1);
-
-	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-		I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
-
-	I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
-	I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
-
-	I915_WRITE(RENDER_HWS_PGA_GEN7,	s->render_hwsp);
-	I915_WRITE(GAM_ECOCHK,		s->ecochk);
-	I915_WRITE(BSD_HWS_PGA_GEN7,	s->bsd_hwsp);
-	I915_WRITE(BLT_HWS_PGA_GEN7,	s->blt_hwsp);
-
-	I915_WRITE(GEN7_TLB_RD_ADDR,	s->tlb_rd_addr);
-
-	/* MBC 0x9024-0x91D0, 0x8500 */
-	I915_WRITE(VLV_G3DCTL,		s->g3dctl);
-	I915_WRITE(VLV_GSCKGCTL,	s->gsckgctl);
-	I915_WRITE(GEN6_MBCTL,		s->mbctl);
-
-	/* GCP 0x9400-0x9424, 0x8100-0x810C */
-	I915_WRITE(GEN6_UCGCTL1,	s->ucgctl1);
-	I915_WRITE(GEN6_UCGCTL3,	s->ucgctl3);
-	I915_WRITE(GEN6_RCGCTL1,	s->rcgctl1);
-	I915_WRITE(GEN6_RCGCTL2,	s->rcgctl2);
-	I915_WRITE(GEN6_RSTCTL,		s->rstctl);
-	I915_WRITE(GEN7_MISCCPCTL,	s->misccpctl);
-
-	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
-	I915_WRITE(GEN6_GFXPAUSE,	s->gfxpause);
-	I915_WRITE(GEN6_RPDEUHWTC,	s->rpdeuhwtc);
-	I915_WRITE(GEN6_RPDEUC,		s->rpdeuc);
-	I915_WRITE(ECOBUS,		s->ecobus);
-	I915_WRITE(VLV_PWRDWNUPCTL,	s->pwrdwnupctl);
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
-	I915_WRITE(GEN6_RPDEUCSW,	s->rp_deucsw);
-	I915_WRITE(GEN6_RCUBMABDTMR,	s->rcubmabdtmr);
-	I915_WRITE(VLV_RCEDATA,		s->rcedata);
-	I915_WRITE(VLV_SPAREG2H,	s->spare2gh);
-
-	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
-	I915_WRITE(GTIMR,		s->gt_imr);
-	I915_WRITE(GTIER,		s->gt_ier);
-	I915_WRITE(GEN6_PMIMR,		s->pm_imr);
-	I915_WRITE(GEN6_PMIER,		s->pm_ier);
-
-	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-		I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
-
-	/* GT SA CZ domain, 0x100000-0x138124 */
-	I915_WRITE(TILECTL,			s->tilectl);
-	I915_WRITE(GTFIFOCTL,			s->gt_fifoctl);
-	/*
-	 * Preserve the GT allow wake and GFX force clock bit, they are not
-	 * be restored, as they are used to control the s0ix suspend/resume
-	 * sequence by the caller.
-	 */
-	val = I915_READ(VLV_GTLC_WAKE_CTRL);
-	val &= VLV_GTLC_ALLOWWAKEREQ;
-	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
-	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-
-	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-	val &= VLV_GFX_CLK_FORCE_ON_BIT;
-	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
-	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
-	I915_WRITE(VLV_PMWGICZ,			s->pmwgicz);
-
-	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
-	I915_WRITE(VLV_GU_CTL0,			s->gu_ctl0);
-	I915_WRITE(VLV_GU_CTL1,			s->gu_ctl1);
-	I915_WRITE(VLV_PCBR,			s->pcbr);
-	I915_WRITE(VLV_GUNIT_CLOCK_GATE2,	s->clock_gate_dis2);
-}
-
-static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
-				  u32 mask, u32 val)
-{
-	i915_reg_t reg = VLV_GTLC_PW_STATUS;
-	u32 reg_value;
-	int ret;
-
-	/* The HW does not like us polling for PW_STATUS frequently, so
-	 * use the sleeping loop rather than risk the busy spin within
-	 * intel_wait_for_register().
-	 *
-	 * Transitioning between RC6 states should be at most 2ms (see
-	 * valleyview_enable_rps) so use a 3ms timeout.
-	 */
-	ret = wait_for(((reg_value =
-			 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
-		       == val, 3);
-
-	/* just trace the final value */
-	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
-
-	return ret;
-}
-
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
-{
-	u32 val;
-	int err;
-
-	val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
-	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
-	if (force_on)
-		val |= VLV_GFX_CLK_FORCE_ON_BIT;
-	I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
-	if (!force_on)
-		return 0;
-
-	err = intel_wait_for_register(&dev_priv->uncore,
-				      VLV_GTLC_SURVIVABILITY_REG,
-				      VLV_GFX_CLK_STATUS_BIT,
-				      VLV_GFX_CLK_STATUS_BIT,
-				      20);
-	if (err)
-		DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
-			  I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
-	return err;
-}
-
-static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
-{
-	u32 mask;
-	u32 val;
-	int err;
-
-	val = I915_READ(VLV_GTLC_WAKE_CTRL);
-	val &= ~VLV_GTLC_ALLOWWAKEREQ;
-	if (allow)
-		val |= VLV_GTLC_ALLOWWAKEREQ;
-	I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-	POSTING_READ(VLV_GTLC_WAKE_CTRL);
-
-	mask = VLV_GTLC_ALLOWWAKEACK;
-	val = allow ? mask : 0;
-
-	err = vlv_wait_for_pw_status(dev_priv, mask, val);
-	if (err)
-		DRM_ERROR("timeout disabling GT waking\n");
-
-	return err;
-}
-
-static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
-				  bool wait_for_on)
-{
-	u32 mask;
-	u32 val;
-
-	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
-	val = wait_for_on ? mask : 0;
-
-	/*
-	 * RC6 transitioning can be delayed up to 2 msec (see
-	 * valleyview_enable_rps), use 3 msec for safety.
-	 *
-	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
-	 * reset and we are trying to force the machine to sleep.
-	 */
-	if (vlv_wait_for_pw_status(dev_priv, mask, val))
-		DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
-				 onoff(wait_for_on));
-}
-
-static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
-{
-	if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
-		return;
-
-	DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
-	I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
-}
-
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
-{
-	u32 mask;
-	int err;
-
-	/*
-	 * Bspec defines the following GT well on flags as debug only, so
-	 * don't treat them as hard failures.
-	 */
-	vlv_wait_for_gt_wells(dev_priv, false);
-
-	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
-	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
-
-	vlv_check_no_gt_access(dev_priv);
-
-	err = vlv_force_gfx_clock(dev_priv, true);
-	if (err)
-		goto err1;
-
-	err = vlv_allow_gt_wake(dev_priv, false);
-	if (err)
-		goto err2;
-
-	vlv_save_gunit_s0ix_state(dev_priv);
-
-	err = vlv_force_gfx_clock(dev_priv, false);
-	if (err)
-		goto err2;
-
-	return 0;
-
-err2:
-	/* For safety always re-enable waking and disable gfx clock forcing */
-	vlv_allow_gt_wake(dev_priv, true);
-err1:
-	vlv_force_gfx_clock(dev_priv, false);
-
-	return err;
-}
-
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
-				bool rpm_resume)
-{
-	int err;
-	int ret;
-
-	/*
-	 * If any of the steps fail just try to continue, that's the best we
-	 * can do at this point. Return the first error code (which will also
-	 * leave RPM permanently disabled).
-	 */
-	ret = vlv_force_gfx_clock(dev_priv, true);
-
-	vlv_restore_gunit_s0ix_state(dev_priv);
-
-	err = vlv_allow_gt_wake(dev_priv, true);
-	if (!ret)
-		ret = err;
-
-	err = vlv_force_gfx_clock(dev_priv, false);
-	if (!ret)
-		ret = err;
-
-	vlv_check_no_gt_access(dev_priv);
-
-	if (rpm_resume)
-		intel_init_clock_gating(dev_priv);
-
-	return ret;
-}
-
 static int intel_runtime_suspend(struct device *kdev)
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-	int ret = 0;
+	int ret;
 
-	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
-	DRM_DEBUG_KMS("Suspending device\n");
+	drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
 
 	disable_rpm_wakeref_asserts(rpm);
 
@@ -2517,11 +1594,10 @@ static int intel_runtime_suspend(struct device *kdev)
 
 	intel_display_power_suspend(dev_priv);
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_suspend_complete(dev_priv);
-
+	ret = vlv_suspend_complete(dev_priv);
 	if (ret) {
-		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+		drm_err(&dev_priv->drm,
+			"Runtime suspend failed, disabling it (%d)\n", ret);
 		intel_uncore_runtime_resume(&dev_priv->uncore);
 
 		intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2539,7 +1615,8 @@ static int intel_runtime_suspend(struct device *kdev)
 	intel_runtime_pm_driver_release(rpm);
 
 	if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
-		DRM_ERROR("Unclaimed access detected prior to suspending\n");
+		drm_err(&dev_priv->drm,
+			"Unclaimed access detected prior to suspending\n");
 
 	rpm->suspended = true;
 
@@ -2571,7 +1648,7 @@ static int intel_runtime_suspend(struct device *kdev)
 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		intel_hpd_poll_init(dev_priv);
 
-	DRM_DEBUG_KMS("Device suspended\n");
+	drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
 	return 0;
 }
 
@@ -2579,25 +1656,25 @@ static int intel_runtime_resume(struct device *kdev)
 {
 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
-	int ret = 0;
+	int ret;
 
-	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
-	DRM_DEBUG_KMS("Resuming device\n");
+	drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
 
-	WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
+	drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
 	disable_rpm_wakeref_asserts(rpm);
 
 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
 	rpm->suspended = false;
 	if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
-		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
+		drm_dbg(&dev_priv->drm,
+			"Unclaimed access during suspend, bios?\n");
 
 	intel_display_power_resume(dev_priv);
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		ret = vlv_resume_prepare(dev_priv, true);
+	ret = vlv_resume_prepare(dev_priv, true);
 
 	intel_uncore_runtime_resume(&dev_priv->uncore);
 
@@ -2623,9 +1700,10 @@ static int intel_runtime_resume(struct device *kdev)
 	enable_rpm_wakeref_asserts(rpm);
 
 	if (ret)
-		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+		drm_err(&dev_priv->drm,
+			"Runtime resume failed, disabling it (%d)\n", ret);
 	else
-		DRM_DEBUG_KMS("Device resumed\n");
+		drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
 
 	return ret;
 }
@@ -2673,12 +1751,12 @@ const struct dev_pm_ops i915_pm_ops = {
 static const struct file_operations i915_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
-	.release = drm_release,
+	.release = drm_release_noglobal,
 	.unlocked_ioctl = drm_ioctl,
 	.mmap = i915_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
-	.compat_ioctl = i915_compat_ioctl,
+	.compat_ioctl = i915_ioc32_compat_ioctl,
 	.llseek = noop_llseek,
 };
 
@@ -2770,9 +1848,6 @@ static struct drm_driver driver = {
 	.gem_prime_export = i915_gem_prime_export,
 	.gem_prime_import = i915_gem_prime_import,
 
-	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-	.get_scanout_position = i915_get_crtc_scanoutpos,
-
 	.dumb_create = i915_gem_dumb_create,
 	.dumb_map_offset = i915_gem_dumb_mmap_offset,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 810e3cc..1f5b9a5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,6 @@
 #include <drm/drm_connector.h>
 #include <drm/i915_mei_hdcp_interface.h>
 
-#include "i915_fixed.h"
 #include "i915_params.h"
 #include "i915_reg.h"
 #include "i915_utils.h"
@@ -70,6 +69,7 @@
 #include "display/intel_dpll_mgr.h"
 #include "display/intel_dsb.h"
 #include "display/intel_frontbuffer.h"
+#include "display/intel_global_state.h"
 #include "display/intel_gmbus.h"
 #include "display/intel_opregion.h"
 
@@ -104,18 +104,23 @@
 
 #include "intel_region_lmem.h"
 
-#include "intel_gvt.h"
-
 /* General customization:
  */
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20200114"
-#define DRIVER_TIMESTAMP	1579001978
+#define DRIVER_DATE		"20200313"
+#define DRIVER_TIMESTAMP	1584144591
 
 struct drm_i915_gem_object;
 
+/*
+ * The code assumes that the hpd_pins below have consecutive values and
+ * starting with HPD_PORT_A, the HPD pin associated with any port can be
+ * retrieved by adding the corresponding port (or phy) enum value to
+ * HPD_PORT_A in most cases. For example:
+ * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
+ */
 enum hpd_pin {
 	HPD_NONE = 0,
 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
@@ -203,9 +208,7 @@ struct drm_i915_file_private {
 	} mm;
 
 	struct xarray context_xa;
-
-	struct idr vm_idr;
-	struct mutex vm_idr_lock; /* guards vm_idr */
+	struct xarray vm_xa;
 
 	unsigned int bsd_engine;
 
@@ -255,18 +258,19 @@ struct sdvo_device_mapping {
 struct intel_connector;
 struct intel_encoder;
 struct intel_atomic_state;
-struct intel_crtc_state;
+struct intel_cdclk_config;
+struct intel_cdclk_state;
+struct intel_cdclk_vals;
 struct intel_initial_plane_config;
 struct intel_crtc;
 struct intel_limit;
 struct dpll;
-struct intel_cdclk_state;
 
 struct drm_i915_display_funcs {
 	void (*get_cdclk)(struct drm_i915_private *dev_priv,
-			  struct intel_cdclk_state *cdclk_state);
+			  struct intel_cdclk_config *cdclk_config);
 	void (*set_cdclk)(struct drm_i915_private *dev_priv,
-			  const struct intel_cdclk_state *cdclk_state,
+			  const struct intel_cdclk_config *cdclk_config,
 			  enum pipe pipe);
 	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
 			     enum i9xx_plane_id i9xx_plane);
@@ -280,7 +284,7 @@ struct drm_i915_display_funcs {
 				    struct intel_crtc *crtc);
 	int (*compute_global_watermarks)(struct intel_atomic_state *state);
 	void (*update_wm)(struct intel_crtc *crtc);
-	int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+	int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
 	u8 (*calc_voltage_level)(int cdclk);
 	/* Returns the active state of the crtc, and if the crtc is active,
 	 * fills out the pipe-config with the hw state. */
@@ -504,7 +508,7 @@ struct i915_psr {
 	u16 su_x_granularity;
 	bool dc3co_enabled;
 	u32 dc3co_exit_delay;
-	struct delayed_work idle_work;
+	struct delayed_work dc3co_work;
 	bool force_mode_changed;
 };
 
@@ -732,19 +736,10 @@ enum intel_ddb_partitioning {
 	INTEL_DDB_PART_5_6, /* IVB+ */
 };
 
-struct intel_wm_level {
-	bool enable;
-	u32 pri_val;
-	u32 spr_val;
-	u32 cur_val;
-	u32 fbc_val;
-};
-
 struct ilk_wm_values {
 	u32 wm_pipe[3];
 	u32 wm_lp[3];
 	u32 wm_lp_spr[3];
-	u32 wm_linetime[3];
 	bool enable_fbc_wm;
 	enum intel_ddb_partitioning partitioning;
 };
@@ -799,65 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 	return false;
 }
 
-struct skl_ddb_allocation {
-	u8 enabled_slices; /* GEN11 has configurable 2 slices */
-};
-
-struct skl_ddb_values {
-	unsigned dirty_pipes;
-	struct skl_ddb_allocation ddb;
-};
-
-struct skl_wm_level {
-	u16 min_ddb_alloc;
-	u16 plane_res_b;
-	u8 plane_res_l;
-	bool plane_en;
-	bool ignore_lines;
-};
-
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
-	bool x_tiled, y_tiled;
-	bool rc_surface;
-	bool is_planar;
-	u32 width;
-	u8 cpp;
-	u32 plane_pixel_rate;
-	u32 y_min_scanlines;
-	u32 plane_bytes_per_line;
-	uint_fixed_16_16_t plane_blocks_per_line;
-	uint_fixed_16_16_t y_tile_minimum;
-	u32 linetime_us;
-	u32 dbuf_block_size;
-};
-
-enum intel_pipe_crc_source {
-	INTEL_PIPE_CRC_SOURCE_NONE,
-	INTEL_PIPE_CRC_SOURCE_PLANE1,
-	INTEL_PIPE_CRC_SOURCE_PLANE2,
-	INTEL_PIPE_CRC_SOURCE_PLANE3,
-	INTEL_PIPE_CRC_SOURCE_PLANE4,
-	INTEL_PIPE_CRC_SOURCE_PLANE5,
-	INTEL_PIPE_CRC_SOURCE_PLANE6,
-	INTEL_PIPE_CRC_SOURCE_PLANE7,
-	INTEL_PIPE_CRC_SOURCE_PIPE,
-	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
-	INTEL_PIPE_CRC_SOURCE_TV,
-	INTEL_PIPE_CRC_SOURCE_DP_B,
-	INTEL_PIPE_CRC_SOURCE_DP_C,
-	INTEL_PIPE_CRC_SOURCE_DP_D,
-	INTEL_PIPE_CRC_SOURCE_AUTO,
-	INTEL_PIPE_CRC_SOURCE_MAX,
-};
-
-#define INTEL_PIPE_CRC_ENTRIES_NR	128
-struct intel_pipe_crc {
-	spinlock_t lock;
-	int skipped;
-	enum intel_pipe_crc_source source;
-};
-
 struct i915_frontbuffer_tracking {
 	spinlock_t lock;
 
@@ -875,14 +811,7 @@ struct i915_virtual_gpu {
 	u32 caps;
 };
 
-/* used in computing the new watermarks state */
-struct intel_wm_config {
-	unsigned int num_pipes_active;
-	bool sprites_enabled;
-	bool sprites_scaled;
-};
-
-struct intel_cdclk_state {
+struct intel_cdclk_config {
 	unsigned int cdclk, vco, ref, bypass;
 	u8 voltage_level;
 };
@@ -1002,33 +931,18 @@ struct drm_i915_private {
 	unsigned int max_cdclk_freq;
 
 	unsigned int max_dotclk_freq;
-	unsigned int rawclk_freq;
 	unsigned int hpll_freq;
 	unsigned int fdi_pll_freq;
 	unsigned int czclk_freq;
 
-	/*
-	 * For reading holding any crtc lock is sufficient,
-	 * for writing must hold all of them.
-	 */
 	struct {
-		/*
-		 * The current logical cdclk state.
-		 * See intel_atomic_state.cdclk.logical
-		 */
-		struct intel_cdclk_state logical;
-		/*
-		 * The current actual cdclk state.
-		 * See intel_atomic_state.cdclk.actual
-		 */
-		struct intel_cdclk_state actual;
-		/* The current hardware cdclk state */
-		struct intel_cdclk_state hw;
+		/* The current hardware cdclk configuration */
+		struct intel_cdclk_config hw;
 
 		/* cdclk, divider, and ratio table from bspec */
 		const struct intel_cdclk_vals *table;
 
-		int force_min_cdclk;
+		struct intel_global_obj obj;
 	} cdclk;
 
 	/**
@@ -1068,31 +982,32 @@ struct drm_i915_private {
 	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
 	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
 
-#ifdef CONFIG_DEBUG_FS
-	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
-#endif
-
-	/* dpll and cdclk state is protected by connection_mutex */
-	int num_shared_dpll;
-	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
-	const struct intel_dpll_mgr *dpll_mgr;
-
-	/*
-	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
-	 * Must be global rather than per dpll, because on some platforms
-	 * plls share registers.
+	/**
+	 * dpll and cdclk state is protected by connection_mutex
+	 * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
+	 * Must be global rather than per dpll, because on some platforms plls
+	 * share registers.
 	 */
-	struct mutex dpll_lock;
+	struct {
+		struct mutex lock;
+
+		int num_shared_dpll;
+		struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+		const struct intel_dpll_mgr *mgr;
+
+		struct {
+			int nssc;
+			int ssc;
+		} ref_clks;
+	} dpll;
+
+	struct list_head global_obj_list;
 
 	/*
-	 * For reading active_pipes, min_cdclk, min_voltage_level holding
-	 * any crtc lock is sufficient, for writing must hold all of them.
+	 * For reading active_pipes holding any crtc lock is
+	 * sufficient, for writing must hold all of them.
 	 */
 	u8 active_pipes;
-	/* minimum acceptable cdclk for each pipe */
-	int min_cdclk[I915_MAX_PIPES];
-	/* minimum acceptable voltage level for each pipe */
-	u8 min_voltage_level[I915_MAX_PIPES];
 
 	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
@@ -1105,8 +1020,6 @@ struct drm_i915_private {
 		struct work_struct free_work;
 	} atomic_helper;
 
-	u16 orig_clock;
-
 	bool mchbar_need_disable;
 
 	struct intel_l3_parity l3_parity;
@@ -1191,7 +1104,6 @@ struct drm_i915_private {
 		/* current hardware state */
 		union {
 			struct ilk_wm_values hw;
-			struct skl_ddb_values skl_hw;
 			struct vlv_wm_values vlv;
 			struct g4x_wm_values g4x;
 		};
@@ -1213,6 +1125,8 @@ struct drm_i915_private {
 		bool distrust_bios_wm;
 	} wm;
 
+	u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
+
 	struct dram_info {
 		bool valid;
 		bool is_16gb_dimm;
@@ -1236,7 +1150,7 @@ struct drm_i915_private {
 		u8 num_planes;
 	} max_bw[6];
 
-	struct drm_private_obj bw_obj;
+	struct intel_global_obj bw_obj;
 
 	struct intel_runtime_pm runtime_pm;
 
@@ -1300,16 +1214,6 @@ struct drm_i915_private {
 	 */
 };
 
-struct dram_dimm_info {
-	u8 size, width, ranks;
-};
-
-struct dram_channel_info {
-	struct dram_dimm_info dimm_l, dimm_s;
-	u8 ranks;
-	bool is_16gb_dimm;
-};
-
 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
 {
 	return container_of(dev, struct drm_i915_private, drm);
@@ -1580,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define GLK_REVID_A0		0x0
 #define GLK_REVID_A1		0x1
+#define GLK_REVID_A2		0x2
+#define GLK_REVID_B0		0x3
 
 #define IS_GLK_REVID(dev_priv, since, until) \
 	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
@@ -1718,10 +1624,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_GT_UC(dev_priv)	(INTEL_INFO(dev_priv)->has_gt_uc)
 
-/* Having GuC is not the same as using GuC */
-#define USES_GUC(dev_priv)		intel_uc_uses_guc(&(dev_priv)->gt.uc)
-#define USES_GUC_SUBMISSION(dev_priv)	intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
-
 #define HAS_POOLED_EU(dev_priv)	(INTEL_INFO(dev_priv)->has_pooled_eu)
 
 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)	(INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1767,11 +1669,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
 }
 
 /* i915_drv.c */
-#ifdef CONFIG_COMPAT
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
-#define i915_compat_ioctl NULL
-#endif
 extern const struct dev_pm_ops i915_pm_ops;
 
 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -1780,18 +1677,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
 int i915_resume_switcheroo(struct drm_i915_private *i915);
 int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
 
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->gvt;
-}
-
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->vgpu.active;
-}
-
 int i915_getparam_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 
@@ -1856,12 +1741,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
-static inline int __must_check
-i915_mutex_lock_interruptible(struct drm_device *dev)
-{
-	return mutex_lock_interruptible(&dev->struct_mutex);
-}
-
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -2010,20 +1889,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
 #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
 #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
 
-/* register wait wrappers for display regs */
-#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
-	intel_wait_for_register(&(dev_priv_)->uncore, \
-				(reg_), (mask_), (value_), (timeout_))
-
-#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({	\
-	u32 mask__ = (mask_);						\
-	intel_de_wait_for_register((dev_priv_), (reg_),			\
-				   mask__, mask__, (timeout_)); \
-})
-
-#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
-	intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
-
 /* i915_mm.c */
 int remap_io_mapping(struct vm_area_struct *vma,
 		     unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2046,10 +1911,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
 	return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
 }
 
-static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
-{
-	return intel_guc_is_submission_supported(guc) &&
-		intel_guc_is_running(guc);
-}
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5f6e639..ca54200 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
  */
 
 #include <drm/drm_vma_manager.h>
-#include <drm/i915_drm.h>
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
 #include <linux/dma-resv.h>
@@ -941,9 +940,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	if (i915_gem_object_never_bind_ggtt(obj))
-		return ERR_PTR(-ENODEV);
-
 	if (flags & PIN_MAPPABLE &&
 	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
 		/*
@@ -1009,6 +1005,12 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ERR_PTR(ret);
 
+	ret = i915_vma_wait_for_bind(vma);
+	if (ret) {
+		i915_vma_unpin(vma);
+		return ERR_PTR(ret);
+	}
+
 	return vma;
 }
 
@@ -1153,7 +1155,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 		/* Minimal basic recovery for KMS */
 		ret = i915_ggtt_enable_hw(dev_priv);
-		i915_gem_restore_gtt_mappings(dev_priv);
+		i915_ggtt_resume(&dev_priv->ggtt);
 		i915_gem_restore_fences(&dev_priv->ggtt);
 		intel_init_clock_gating(dev_priv);
 	}
@@ -1201,7 +1203,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 
 	i915_gem_drain_freed_objects(dev_priv);
 
-	WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
+	drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
 }
 
 static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1229,7 +1231,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 	i915_gem_drain_freed_objects(dev_priv);
 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
-	WARN_ON(dev_priv->mm.shrink_count);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
 }
 
 int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -1269,7 +1271,8 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
 
 	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
 		i915_gem_object_lock(obj);
-		WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+		drm_WARN_ON(&i915->drm,
+			    i915_gem_object_set_to_cpu_domain(obj, true));
 		i915_gem_object_unlock(obj);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0697bed..4518b9b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,8 +26,6 @@
  *
  */
 
-#include <drm/i915_drm.h>
-
 #include "gem/i915_gem_context.h"
 #include "gt/intel_gt_requests.h"
 
@@ -292,7 +290,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 		GEM_BUG_ON(!drm_mm_node_allocated(node));
 		vma = container_of(node, typeof(*vma), node);
 
-		/* If we are using coloring to insert guard pages between
+		/*
+		 * If we are using coloring to insert guard pages between
 		 * different cache domains within the address space, we have
 		 * to check whether the objects on either side of our range
 		 * abutt and conflict. If they are in conflict, then we evict
@@ -309,22 +308,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 			}
 		}
 
-		if (flags & PIN_NONBLOCK &&
-		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
-			ret = -ENOSPC;
-			break;
-		}
-
-		/* Overlap of objects in the same batch? */
 		if (i915_vma_is_pinned(vma)) {
 			ret = -ENOSPC;
-			if (vma->exec_flags &&
-			    *vma->exec_flags & EXEC_OBJECT_PINNED)
-				ret = -EINVAL;
 			break;
 		}
 
-		/* Never show fear in the face of dragons!
+		if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
+			ret = -ENOSPC;
+			break;
+		}
+
+		/*
+		 * Never show fear in the face of dragons!
 		 *
 		 * We cannot directly remove this node from within this
 		 * iterator and as with i915_gem_evict_something() we employ
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d9c34a2..d152b64 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,10 +21,9 @@
  * IN THE SOFTWARE.
  */
 
-#include <drm/i915_drm.h>
-
 #include "i915_drv.h"
 #include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
 #include "i915_vgpu.h"
 
 /**
@@ -237,11 +236,12 @@ static int fence_update(struct i915_fence_reg *fence,
 		if (!i915_vma_is_map_and_fenceable(vma))
 			return -EINVAL;
 
-		if (WARN(!i915_gem_object_get_stride(vma->obj) ||
-			 !i915_gem_object_get_tiling(vma->obj),
-			 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-			 i915_gem_object_get_stride(vma->obj),
-			 i915_gem_object_get_tiling(vma->obj)))
+		if (drm_WARN(&uncore->i915->drm,
+			     !i915_gem_object_get_stride(vma->obj) ||
+			     !i915_gem_object_get_tiling(vma->obj),
+			     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+			     i915_gem_object_get_stride(vma->obj),
+			     i915_gem_object_get_tiling(vma->obj)))
 			return -EINVAL;
 
 		ret = i915_vma_sync(vma);
@@ -713,7 +713,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
 		}
 
 		if (dcc == 0xffffffff) {
-			DRM_ERROR("Couldn't read from MCHBAR.  "
+			drm_err(&i915->drm, "Couldn't read from MCHBAR.  "
 				  "Disabling tiling.\n");
 			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e039eb5..cb43381 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,8 +15,6 @@
 #include <asm/set_memory.h>
 #include <asm/smp.h>
 
-#include <drm/i915_drm.h>
-
 #include "display/intel_frontbuffer.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_requests.h"
@@ -63,7 +61,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 		/* XXX This does not prevent more requests being submitted! */
 		if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
 						     -MAX_SCHEDULE_TIMEOUT)) {
-			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+			drm_err(&dev_priv->drm,
+				"Failed to wait for idle; VT'd may hang.\n");
 			/* Wait a bit, in hopes it avoids the hang */
 			udelay(10);
 		}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9e401a5..2a4cd0b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -37,6 +37,7 @@
 #include <drm/drm_print.h>
 
 #include "display/intel_atomic.h"
+#include "display/intel_csr.h"
 #include "display/intel_overlay.h"
 
 #include "gem/i915_gem_context.h"
@@ -47,7 +48,6 @@
 #include "i915_gpu_error.h"
 #include "i915_memcpy.h"
 #include "i915_scatterlist.h"
-#include "intel_csr.h"
 
 #define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -450,6 +450,14 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
 		err_printf(m, "  ROW_INSTDONE[%d][%d]: 0x%08x\n",
 			   slice, subslice,
 			   ee->instdone.row[slice][subslice]);
+
+	if (INTEL_GEN(m->i915) < 12)
+		return;
+
+	err_printf(m, "  SC_INSTDONE_EXTRA: 0x%08x\n",
+		   ee->instdone.slice_common_extra[0]);
+	err_printf(m, "  SC_INSTDONE_EXTRA2: 0x%08x\n",
+		   ee->instdone.slice_common_extra[1]);
 }
 
 static void error_print_request(struct drm_i915_error_state_buf *m,
@@ -473,9 +481,13 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 				const char *header,
 				const struct i915_gem_context_coredump *ctx)
 {
-	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+	const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+
+	err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
 		   header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
-		   ctx->guilty, ctx->active);
+		   ctx->guilty, ctx->active,
+		   ctx->total_runtime * period,
+		   mul_u32_u32(ctx->avg_runtime, period));
 }
 
 static struct i915_vma_coredump *
@@ -515,6 +527,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 		   (u32)(ee->acthd>>32), (u32)ee->acthd);
 	err_printf(m, "  IPEIR: 0x%08x\n", ee->ipeir);
 	err_printf(m, "  IPEHR: 0x%08x\n", ee->ipehr);
+	err_printf(m, "  ESR:   0x%08x\n", ee->esr);
 
 	error_print_instdone(m, ee);
 
@@ -1102,6 +1115,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
 	}
 
 	if (INTEL_GEN(i915) >= 4) {
+		ee->esr = ENGINE_READ(engine, RING_ESR);
 		ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
 		ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
 		ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
@@ -1228,7 +1242,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
 {
 	struct i915_gem_context *ctx;
 	struct task_struct *task;
-	bool capture;
+	bool simulated;
 
 	rcu_read_lock();
 	ctx = rcu_dereference(rq->context->gem_context);
@@ -1236,7 +1250,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
 		ctx = NULL;
 	rcu_read_unlock();
 	if (!ctx)
-		return false;
+		return true;
 
 	rcu_read_lock();
 	task = pid_task(ctx->pid, PIDTYPE_PID);
@@ -1250,10 +1264,13 @@ static bool record_context(struct i915_gem_context_coredump *e,
 	e->guilty = atomic_read(&ctx->guilty_count);
 	e->active = atomic_read(&ctx->active_count);
 
-	capture = i915_gem_context_no_error_capture(ctx);
+	e->total_runtime = rq->context->runtime.total;
+	e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
+
+	simulated = i915_gem_context_no_error_capture(ctx);
 
 	i915_gem_context_put(ctx);
-	return capture;
+	return simulated;
 }
 
 struct intel_engine_capture_vma {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index e4a6afe..0d1f6c8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -75,6 +75,7 @@ struct intel_engine_coredump {
 	u32 hws;
 	u32 ipeir;
 	u32 ipehr;
+	u32 esr;
 	u32 bbstate;
 	u32 instpm;
 	u32 instps;
@@ -87,6 +88,10 @@ struct intel_engine_coredump {
 
 	struct i915_gem_context_coredump {
 		char comm[TASK_COMM_LEN];
+
+		u64 total_runtime;
+		u32 avg_runtime;
+
 		pid_t pid;
 		int active;
 		int guilty;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index c100724..8e45ca3 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,9 +28,10 @@
  */
 #include <linux/compat.h>
 
-#include <drm/i915_drm.h>
 #include <drm/drm_ioctl.h>
+
 #include "i915_drv.h"
+#include "i915_ioc32.h"
 
 struct drm_i915_getparam32 {
 	s32 param;
@@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
 };
 
 /**
- * i915_compat_ioctl - handle the mistakes of the past
+ * i915_ioc32_compat_ioctl - handle the mistakes of the past
  * @filp: the file pointer
  * @cmd: the ioctl command (and encoded flags)
  * @arg: the ioctl argument (from userspace)
@@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
  * Called whenever a 32-bit process running under a 64-bit kernel
  * performs an ioctl on /dev/dri/card<n>.
  */
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	unsigned int nr = DRM_IOCTL_NR(cmd);
 	drm_ioctl_compat_t *fn = NULL;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h
new file mode 100644
index 0000000..40dcd55
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_IOC32_H__
+#define __I915_IOC32_H__
+
+#ifdef CONFIG_COMPAT
+struct file;
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd,
+			     unsigned long arg);
+#else
+#define i915_ioc32_compat_ioctl NULL
+#endif
+
+#endif /* __I915_IOC32_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afc6aad..9f0653c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
 
 #include <drm/drm_drv.h>
 #include <drm/drm_irq.h>
-#include <drm/i915_drm.h>
 
 #include "display/intel_display_types.h"
 #include "display/intel_fifo_underrun.h"
@@ -79,7 +78,7 @@ static const u32 hpd_ibx[HPD_NUM_PINS] = {
 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
-	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG,
 };
 
 static const u32 hpd_cpt[HPD_NUM_PINS] = {
@@ -87,7 +86,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
-	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
 };
 
 static const u32 hpd_spt[HPD_NUM_PINS] = {
@@ -95,7 +94,7 @@ static const u32 hpd_spt[HPD_NUM_PINS] = {
 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
-	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
 };
 
 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
@@ -104,7 +103,7 @@ static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
-	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
 };
 
 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
@@ -113,7 +112,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
 };
 
 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
@@ -122,21 +121,21 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
-	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
 };
 
 /* BXT hpd list */
 static const u32 hpd_bxt[HPD_NUM_PINS] = {
 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
-	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
 };
 
 static const u32 hpd_gen11[HPD_NUM_PINS] = {
 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
-	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
 };
 
 static const u32 hpd_gen12[HPD_NUM_PINS] = {
@@ -145,7 +144,7 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
 	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
 	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
 	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
-	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
 };
 
 static const u32 hpd_icp[HPD_NUM_PINS] = {
@@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
 	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
 };
 
+static void
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+	drm_crtc_handle_vblank(&crtc->base);
+}
+
 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
 		    i915_reg_t iir, i915_reg_t ier)
 {
@@ -208,8 +215,9 @@ static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
 	if (val == 0)
 		return;
 
-	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
-	     i915_mmio_reg_offset(reg), val);
+	drm_WARN(&uncore->i915->drm, 1,
+		 "Interrupt register 0x%x is not zero: 0x%08x\n",
+		 i915_mmio_reg_offset(reg), val);
 	intel_uncore_write(uncore, reg, 0xffffffff);
 	intel_uncore_posting_read(uncore, reg);
 	intel_uncore_write(uncore, reg, 0xffffffff);
@@ -223,8 +231,9 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
 	if (val == 0)
 		return;
 
-	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
-	     i915_mmio_reg_offset(GEN2_IIR), val);
+	drm_WARN(&uncore->i915->drm, 1,
+		 "Interrupt register 0x%x is not zero: 0x%08x\n",
+		 i915_mmio_reg_offset(GEN2_IIR), val);
 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
 	intel_uncore_posting_read16(uncore, GEN2_IIR);
 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
@@ -262,7 +271,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
 	u32 val;
 
 	lockdep_assert_held(&dev_priv->irq_lock);
-	WARN_ON(bits & ~mask);
+	drm_WARN_ON(&dev_priv->drm, bits & ~mask);
 
 	val = I915_READ(PORT_HOTPLUG_EN);
 	val &= ~mask;
@@ -305,9 +314,9 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
-	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 		return;
 
 	new_val = dev_priv->irq_mask;
@@ -336,9 +345,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
-	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 		return;
 
 	old_val = I915_READ(GEN8_DE_PORT_IMR);
@@ -369,9 +378,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
-	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 		return;
 
 	new_val = dev_priv->de_irq_mask[pipe];
@@ -399,11 +408,11 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
 	sdeimr &= ~interrupt_mask;
 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
 
-	WARN_ON(enabled_irq_mask & ~interrupt_mask);
+	drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
 
 	lockdep_assert_held(&dev_priv->irq_lock);
 
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 		return;
 
 	I915_WRITE(SDEIMR, sdeimr);
@@ -425,13 +434,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
 	 * On pipe A we don't support the PSR interrupt yet,
 	 * on pipe B and C the same bit MBZ.
 	 */
-	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm,
+			     status_mask & PIPE_A_PSR_STATUS_VLV))
 		return 0;
 	/*
 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
 	 * A the same bit is for perf counters which we don't use either.
 	 */
-	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+	if (drm_WARN_ON_ONCE(&dev_priv->drm,
+			     status_mask & PIPE_B_PSR_STATUS_VLV))
 		return 0;
 
 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
@@ -443,10 +454,11 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
 
 out:
-	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
-		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
-		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
-		  pipe_name(pipe), enable_mask, status_mask);
+	drm_WARN_ONCE(&dev_priv->drm,
+		      enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
+		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+		      pipe_name(pipe), enable_mask, status_mask);
 
 	return enable_mask;
 }
@@ -457,12 +469,12 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
 	i915_reg_t reg = PIPESTAT(pipe);
 	u32 enable_mask;
 
-	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
-		  "pipe %c: status_mask=0x%x\n",
-		  pipe_name(pipe), status_mask);
+	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+		      "pipe %c: status_mask=0x%x\n",
+		      pipe_name(pipe), status_mask);
 
 	lockdep_assert_held(&dev_priv->irq_lock);
-	WARN_ON(!intel_irqs_enabled(dev_priv));
+	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 
 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
 		return;
@@ -480,12 +492,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
 	i915_reg_t reg = PIPESTAT(pipe);
 	u32 enable_mask;
 
-	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
-		  "pipe %c: status_mask=0x%x\n",
-		  pipe_name(pipe), status_mask);
+	drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+		      "pipe %c: status_mask=0x%x\n",
+		      pipe_name(pipe), status_mask);
 
 	lockdep_assert_held(&dev_priv->irq_lock);
-	WARN_ON(!intel_irqs_enabled(dev_priv));
+	drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
 
 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
 		return;
@@ -624,9 +636,9 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
 	 * register.
 	 */
 	do {
-		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
-		low   = I915_READ_FW(low_frame);
-		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
+		high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
+		low   = intel_de_read_fw(dev_priv, low_frame);
+		high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
 	} while (high1 != high2);
 
 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -683,15 +695,17 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
 		 * pipe frame time stamp. The time stamp value
 		 * is sampled at every start of vertical blank.
 		 */
-		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+		scan_prev_time = intel_de_read_fw(dev_priv,
+						  PIPE_FRMTMSTMP(crtc->pipe));
 
 		/*
 		 * The TIMESTAMP_CTR register has the current
 		 * time stamp value.
 		 */
-		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
+		scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
 
-		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+		scan_post_time = intel_de_read_fw(dev_priv,
+						  PIPE_FRMTMSTMP(crtc->pipe));
 	} while (scan_post_time != scan_prev_time);
 
 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
@@ -702,7 +716,10 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
 	return scanline;
 }
 
-/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -726,9 +743,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 		vtotal /= 2;
 
 	if (IS_GEN(dev_priv, 2))
-		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
 	else
-		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+		position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
 	/*
 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -747,7 +764,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 
 		for (i = 0; i < 100; i++) {
 			udelay(1);
-			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+			temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 			if (temp != position) {
 				position = temp;
 				break;
@@ -762,13 +779,15 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 	return (position + crtc->scanline_offset) % vtotal;
 }
 
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
-			      bool in_vblank_irq, int *vpos, int *hpos,
-			      ktime_t *stime, ktime_t *etime,
-			      const struct drm_display_mode *mode)
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+				     bool in_vblank_irq,
+				     int *vpos, int *hpos,
+				     ktime_t *stime, ktime_t *etime,
+				     const struct drm_display_mode *mode)
 {
+	struct drm_device *dev = _crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+	struct intel_crtc *crtc = to_intel_crtc(_crtc);
 	enum pipe pipe = crtc->pipe;
 	int position;
 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
@@ -777,9 +796,10 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
 
-	if (WARN_ON(!mode->crtc_clock)) {
-		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
-				 "pipe %c\n", pipe_name(pipe));
+	if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+		drm_dbg(&dev_priv->drm,
+			"trying to get scanoutpos for disabled "
+			"pipe %c\n", pipe_name(pipe));
 		return false;
 	}
 
@@ -818,7 +838,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
 		 * We can split this into vertical and horizontal
 		 * scanout position.
 		 */
-		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+		position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
 		/* convert to pixel counts */
 		vbl_start *= htotal;
@@ -879,6 +899,14 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
 	return true;
 }
 
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+				     ktime_t *vblank_time, bool in_vblank_irq)
+{
+	return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+		crtc, max_error, vblank_time, in_vblank_irq,
+		i915_get_crtc_scanoutpos);
+}
+
 int intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -918,7 +946,7 @@ static void ivb_parity_work(struct work_struct *work)
 	mutex_lock(&dev_priv->drm.struct_mutex);
 
 	/* If we've screwed up tracking, just let the interrupt fire again */
-	if (WARN_ON(!dev_priv->l3_parity.which_slice))
+	if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
 		goto out;
 
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -929,7 +957,8 @@ static void ivb_parity_work(struct work_struct *work)
 		i915_reg_t reg;
 
 		slice--;
-		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
+		if (drm_WARN_ON_ONCE(&dev_priv->drm,
+				     slice >= NUM_L3_SLICES(dev_priv)))
 			break;
 
 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -966,7 +995,7 @@ static void ivb_parity_work(struct work_struct *work)
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
 out:
-	WARN_ON(dev_priv->l3_parity.which_slice);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
 	spin_lock_irq(&gt->irq_lock);
 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
 	spin_unlock_irq(&gt->irq_lock);
@@ -1165,8 +1194,9 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
 			*long_mask |= BIT(pin);
 	}
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
-			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+	drm_dbg(&dev_priv->drm,
+		"hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+		hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
 
 }
 
@@ -1187,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 					 u32 crc2, u32 crc3,
 					 u32 crc4)
 {
-	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+	struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
 
 	trace_intel_pipe_crc(crtc, crcs);
@@ -1351,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 	for_each_pipe(dev_priv, pipe) {
 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 
 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1369,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 	for_each_pipe(dev_priv, pipe) {
 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 
 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 			blc_event = true;
@@ -1393,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 	for_each_pipe(dev_priv, pipe) {
 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 
 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 			blc_event = true;
@@ -1419,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 	for_each_pipe(dev_priv, pipe) {
 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 
 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1463,9 +1493,9 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 	}
 
-	WARN_ONCE(1,
-		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
-		  I915_READ(PORT_HOTPLUG_STAT));
+	drm_WARN_ONCE(&dev_priv->drm, 1,
+		      "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+		      I915_READ(PORT_HOTPLUG_STAT));
 
 	return hotplug_status;
 }
@@ -1603,7 +1633,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		u32 master_ctl, iir;
 		u32 pipe_stats[I915_MAX_PIPES] = {};
 		u32 hotplug_status = 0;
-		u32 gt_iir[4];
 		u32 ier = 0;
 
 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -1631,7 +1660,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		ier = I915_READ(VLV_IER);
 		I915_WRITE(VLV_IER, 0);
 
-		gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+		gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
 
 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1655,8 +1684,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 		I915_WRITE(VLV_IER, ier);
 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
 
-		gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
 		if (hotplug_status)
 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
 
@@ -1710,8 +1737,8 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
 			       SDE_AUDIO_POWER_SHIFT);
-		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-				 port_name(port));
+		drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+			port_name(port));
 	}
 
 	if (pch_iir & SDE_AUX_MASK)
@@ -1721,25 +1748,27 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 		gmbus_irq_handler(dev_priv);
 
 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
-		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+		drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
 
 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
-		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+		drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
 
 	if (pch_iir & SDE_POISON)
-		DRM_ERROR("PCH poison interrupt\n");
+		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
 
-	if (pch_iir & SDE_FDI_MASK)
+	if (pch_iir & SDE_FDI_MASK) {
 		for_each_pipe(dev_priv, pipe)
-			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
-					 pipe_name(pipe),
-					 I915_READ(FDI_RX_IIR(pipe)));
+			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
+				pipe_name(pipe),
+				I915_READ(FDI_RX_IIR(pipe)));
+	}
 
 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
-		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+		drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
 
 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
-		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+		drm_dbg(&dev_priv->drm,
+			"PCH transcoder CRC error interrupt\n");
 
 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
@@ -1754,7 +1783,7 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
 	enum pipe pipe;
 
 	if (err_int & ERR_INT_POISON)
-		DRM_ERROR("Poison interrupt\n");
+		drm_err(&dev_priv->drm, "Poison interrupt\n");
 
 	for_each_pipe(dev_priv, pipe) {
 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
@@ -1777,7 +1806,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
 	enum pipe pipe;
 
 	if (serr_int & SERR_INT_POISON)
-		DRM_ERROR("PCH poison interrupt\n");
+		drm_err(&dev_priv->drm, "PCH poison interrupt\n");
 
 	for_each_pipe(dev_priv, pipe)
 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
@@ -1796,8 +1825,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
 			       SDE_AUDIO_POWER_SHIFT_CPT);
-		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
-				 port_name(port));
+		drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+			port_name(port));
 	}
 
 	if (pch_iir & SDE_AUX_MASK_CPT)
@@ -1807,16 +1836,17 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 		gmbus_irq_handler(dev_priv);
 
 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
-		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+		drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
 
 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
-		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+		drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
 
-	if (pch_iir & SDE_FDI_MASK_CPT)
+	if (pch_iir & SDE_FDI_MASK_CPT) {
 		for_each_pipe(dev_priv, pipe)
-			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
-					 pipe_name(pipe),
-					 I915_READ(FDI_RX_IIR(pipe)));
+			drm_dbg(&dev_priv->drm, "  pipe %c FDI IIR: 0x%08x\n",
+				pipe_name(pipe),
+				I915_READ(FDI_RX_IIR(pipe)));
+	}
 
 	if (pch_iir & SDE_ERROR_CPT)
 		cpt_serr_int_handler(dev_priv);
@@ -1844,8 +1874,9 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
 		pins = hpd_icp;
 	} else {
-		WARN(!HAS_PCH_ICP(dev_priv),
-		     "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+		drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
+			 "Unrecognized PCH type 0x%x\n",
+			 INTEL_PCH_TYPE(dev_priv));
 
 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -1952,11 +1983,11 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
 		intel_opregion_asle_intr(dev_priv);
 
 	if (de_iir & DE_POISON)
-		DRM_ERROR("Poison interrupt\n");
+		drm_err(&dev_priv->drm, "Poison interrupt\n");
 
 	for_each_pipe(dev_priv, pipe) {
 		if (de_iir & DE_PIPE_VBLANK(pipe))
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 
 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2009,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 
 	for_each_pipe(dev_priv, pipe) {
 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 	}
 
 	/* check event from PCH */
@@ -2153,7 +2184,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 	if (pin_mask)
 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 	else
-		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+		drm_err(&dev_priv->drm,
+			"Unexpected DE HPD interrupt 0x%08x\n", iir);
 }
 
 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
@@ -2226,7 +2258,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 	}
 
 	if (!found)
-		DRM_ERROR("Unexpected DE Misc interrupt\n");
+		drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
 }
 
 static irqreturn_t
@@ -2243,7 +2275,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			ret = IRQ_HANDLED;
 			gen8_de_misc_irq_handler(dev_priv, iir);
 		} else {
-			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+			drm_err(&dev_priv->drm,
+				"The master control interrupt lied (DE MISC)!\n");
 		}
 	}
 
@@ -2254,7 +2287,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			ret = IRQ_HANDLED;
 			gen11_hpd_irq_handler(dev_priv, iir);
 		} else {
-			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+			drm_err(&dev_priv->drm,
+				"The master control interrupt lied, (DE HPD)!\n");
 		}
 	}
 
@@ -2294,10 +2328,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			}
 
 			if (!found)
-				DRM_ERROR("Unexpected DE Port interrupt\n");
+				drm_err(&dev_priv->drm,
+					"Unexpected DE Port interrupt\n");
 		}
 		else
-			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+			drm_err(&dev_priv->drm,
+				"The master control interrupt lied (DE PORT)!\n");
 	}
 
 	for_each_pipe(dev_priv, pipe) {
@@ -2308,7 +2344,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 
 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
 		if (!iir) {
-			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+			drm_err(&dev_priv->drm,
+				"The master control interrupt lied (DE PIPE)!\n");
 			continue;
 		}
 
@@ -2316,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
 
 		if (iir & GEN8_PIPE_VBLANK)
-			drm_handle_vblank(&dev_priv->drm, pipe);
+			intel_handle_vblank(dev_priv, pipe);
 
 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -2326,9 +2363,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 
 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
 		if (fault_errors)
-			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
-				  pipe_name(pipe),
-				  fault_errors);
+			drm_err(&dev_priv->drm,
+				"Fault errors on pipe %c: 0x%08x\n",
+				pipe_name(pipe),
+				fault_errors);
 	}
 
 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
@@ -2354,7 +2392,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 			 * Like on previous PCH there seems to be something
 			 * fishy going on with forwarding PCH interrupts.
 			 */
-			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+			drm_dbg(&dev_priv->drm,
+				"The master control interrupt lied (SDE)!\n");
 		}
 	}
 
@@ -2384,7 +2423,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 	struct drm_i915_private *dev_priv = arg;
 	void __iomem * const regs = dev_priv->uncore.regs;
 	u32 master_ctl;
-	u32 gt_iir[4];
 
 	if (!intel_irqs_enabled(dev_priv))
 		return IRQ_NONE;
@@ -2395,8 +2433,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 		return IRQ_NONE;
 	}
 
-	/* Find, clear, then process each source of interrupt */
-	gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+	/* Find, queue (onto bottom-halves), then clear each source */
+	gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
 
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
 	if (master_ctl & ~GEN8_GT_IRQS) {
@@ -2407,8 +2445,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
 
 	gen8_master_intr_enable(regs);
 
-	gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
 	return IRQ_HANDLED;
 }
 
@@ -2491,7 +2527,7 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
 		return IRQ_NONE;
 	}
 
-	/* Find, clear, then process each source of interrupt. */
+	/* Find, queue (onto bottom-halves), then clear each source */
 	gen11_gt_irq_handler(gt, master_ctl);
 
 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
@@ -2686,7 +2722,7 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
 	if (HAS_PCH_NOP(dev_priv))
 		return;
 
-	WARN_ON(I915_READ(SDEIER) != 0);
+	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
 	I915_WRITE(SDEIER, 0xffffffff);
 	POSTING_READ(SDEIER);
 }
@@ -2733,7 +2769,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
 			I915_LPE_PIPE_C_INTERRUPT;
 
-	WARN_ON(dev_priv->irq_mask != ~0u);
+	drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
 
 	dev_priv->irq_mask = ~enable_mask;
 
@@ -3163,8 +3199,9 @@ static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
 		   PORTB_HOTPLUG_ENABLE |
 		   PORTC_HOTPLUG_ENABLE;
 
-	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
-		      hotplug, enabled_irqs);
+	drm_dbg_kms(&dev_priv->drm,
+		    "Invert bit setting: hp_ctl:%x hp_port:%x\n",
+		    hotplug, enabled_irqs);
 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
 
 	/*
@@ -3418,7 +3455,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
 {
 	u32 mask = SDE_GMBUS_ICP;
 
-	WARN_ON(I915_READ(SDEIER) != 0);
+	drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
 	I915_WRITE(SDEIER, 0xffffffff);
 	POSTING_READ(SDEIER);
 
@@ -3547,7 +3584,8 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
 
 	if (eir_stuck)
-		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
+			eir_stuck);
 }
 
 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3584,7 +3622,8 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
 
 	if (eir_stuck)
-		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+		drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
+			eir_stuck);
 }
 
 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 812c47a..25f25cd 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -101,10 +101,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
 				     u8 pipe_mask);
 
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
-			      bool in_vblank_irq, int *vpos, int *hpos,
-			      ktime_t *stime, ktime_t *etime,
-			      const struct drm_display_mode *mode);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+				     ktime_t *vblank_time, bool in_vblank_irq);
 
 u32 i915_get_vblank_counter(struct drm_crtc *crtc);
 u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1dd1f36..add00ec 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@
 	MODULE_PARM_DESC(name, desc)
 
 struct i915_params i915_modparams __read_mostly = {
-#define MEMBER(T, member, value) .member = (value),
+#define MEMBER(T, member, value, ...) .member = (value),
 	I915_PARAMS_FOR_EACH(MEMBER)
 #undef MEMBER
 };
@@ -92,9 +92,6 @@ i915_param_named_unsafe(force_probe, charp, 0400,
 	"Force probe the driver for specified devices. "
 	"See CONFIG_DRM_I915_FORCE_PROBE for details.");
 
-i915_param_named_unsafe(alpha_support, bool, 0400,
-	"Deprecated. See i915.force_probe.");
-
 i915_param_named_unsafe(disable_power_well, int, 0400,
 	"Disable display power wells when possible "
 	"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
@@ -106,10 +103,6 @@ i915_param_named(fastboot, int, 0600,
 	"(0=disabled, 1=enabled) "
 	"Default: -1 (use per-chip default)");
 
-i915_param_named_unsafe(prefault_disable, bool, 0600,
-	"Disable page prefaulting for pread/pwrite/reloc (default:false). "
-	"For developers only.");
-
 i915_param_named_unsafe(load_detect_test, bool, 0600,
 	"Force-enable the VGA load detect code for testing (default:false). "
 	"For developers only.");
@@ -172,7 +165,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
 
 i915_param_named(enable_dpcd_backlight, int, 0600,
 	"Enable support for DPCD backlight control"
-	"(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
+	"(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT)
 i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 31b88f2..4532373 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,49 +36,49 @@ struct drm_printer;
 /*
  * Invoke param, a function-like macro, for each i915 param, with arguments:
  *
- * param(type, name, value)
+ * param(type, name, value, mode)
  *
- * type: parameter type, one of {bool, int, unsigned int, char *}
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
  * name: name of the parameter
  * value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ *       debugfs file
  */
 #define I915_PARAMS_FOR_EACH(param) \
-	param(char *, vbt_firmware, NULL) \
-	param(int, modeset, -1) \
-	param(int, lvds_channel_mode, 0) \
-	param(int, panel_use_ssc, -1) \
-	param(int, vbt_sdvo_panel_type, -1) \
-	param(int, enable_dc, -1) \
-	param(int, enable_fbc, -1) \
-	param(int, enable_psr, -1) \
-	param(int, disable_power_well, -1) \
-	param(int, enable_ips, 1) \
-	param(int, invert_brightness, 0) \
-	param(int, enable_guc, 0) \
-	param(int, guc_log_level, -1) \
-	param(char *, guc_firmware_path, NULL) \
-	param(char *, huc_firmware_path, NULL) \
-	param(char *, dmc_firmware_path, NULL) \
-	param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
-	param(int, edp_vswing, 0) \
-	param(int, reset, 3) \
-	param(unsigned int, inject_probe_failure, 0) \
-	param(int, fastboot, -1) \
-	param(int, enable_dpcd_backlight, 0) \
-	param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
-	param(unsigned long, fake_lmem_start, 0) \
+	param(char *, vbt_firmware, NULL, 0400) \
+	param(int, modeset, -1, 0400) \
+	param(int, lvds_channel_mode, 0, 0400) \
+	param(int, panel_use_ssc, -1, 0600) \
+	param(int, vbt_sdvo_panel_type, -1, 0400) \
+	param(int, enable_dc, -1, 0400) \
+	param(int, enable_fbc, -1, 0600) \
+	param(int, enable_psr, -1, 0600) \
+	param(int, disable_power_well, -1, 0400) \
+	param(int, enable_ips, 1, 0600) \
+	param(int, invert_brightness, 0, 0600) \
+	param(int, enable_guc, 0, 0400) \
+	param(int, guc_log_level, -1, 0400) \
+	param(char *, guc_firmware_path, NULL, 0400) \
+	param(char *, huc_firmware_path, NULL, 0400) \
+	param(char *, dmc_firmware_path, NULL, 0400) \
+	param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
+	param(int, edp_vswing, 0, 0400) \
+	param(unsigned int, reset, 3, 0600) \
+	param(unsigned int, inject_probe_failure, 0, 0600) \
+	param(int, fastboot, -1, 0600) \
+	param(int, enable_dpcd_backlight, -1, 0600) \
+	param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
+	param(unsigned long, fake_lmem_start, 0, 0400) \
 	/* leave bools at the end to not create holes */ \
-	param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
-	param(bool, enable_hangcheck, true) \
-	param(bool, prefault_disable, false) \
-	param(bool, load_detect_test, false) \
-	param(bool, force_reset_modeset_test, false) \
-	param(bool, error_capture, true) \
-	param(bool, disable_display, false) \
-	param(bool, verbose_state_checks, true) \
-	param(bool, nuclear_pageflip, false) \
-	param(bool, enable_dp_mst, true) \
-	param(bool, enable_gvt, false)
+	param(bool, enable_hangcheck, true, 0600) \
+	param(bool, load_detect_test, false, 0600) \
+	param(bool, force_reset_modeset_test, false, 0600) \
+	param(bool, error_capture, true, 0600) \
+	param(bool, disable_display, false, 0400) \
+	param(bool, verbose_state_checks, true, 0) \
+	param(bool, nuclear_pageflip, false, 0400) \
+	param(bool, enable_dp_mst, true, 0600) \
+	param(bool, enable_gvt, false, 0400)
 
 #define MEMBER(T, member, ...) T member;
 struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f631f6d..2c80a01 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
 #include <linux/vga_switcheroo.h>
 
 #include <drm/drm_drv.h>
+#include <drm/i915_pciids.h>
 
 #include "display/intel_fbdev.h"
 
@@ -615,7 +616,8 @@ static const struct intel_device_info chv_info = {
 	.has_gt_uc = 1, \
 	.display.has_hdcp = 1, \
 	.display.has_ipc = 1, \
-	.ddb_size = 896
+	.ddb_size = 896, \
+	.num_supported_dbuf_slices = 1
 
 #define SKL_PLATFORM \
 	GEN9_FEATURES, \
@@ -650,6 +652,7 @@ static const struct intel_device_info skl_gt4_info = {
 #define GEN9_LP_FEATURES \
 	GEN(9), \
 	.is_lp = 1, \
+	.num_supported_dbuf_slices = 1, \
 	.display.has_hotplug = 1, \
 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 	.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
@@ -774,6 +777,7 @@ static const struct intel_device_info cnl_info = {
 	}, \
 	GEN(11), \
 	.ddb_size = 2048, \
+	.num_supported_dbuf_slices = 2, \
 	.has_logical_ring_elsq = 1, \
 	.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
 
@@ -819,11 +823,9 @@ static const struct intel_device_info tgl_info = {
 	GEN12_FEATURES,
 	PLATFORM(INTEL_TIGERLAKE),
 	.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
-	.require_force_probe = 1,
 	.display.has_modular_fia = 1,
 	.engine_mask =
 		BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
-	.has_rps = false, /* XXX disabled for debugging */
 };
 
 #define GEN12_DGFX_FEATURES \
@@ -928,13 +930,6 @@ static bool force_probe(u16 device_id, const char *devices)
 	char *s, *p, *tok;
 	bool ret;
 
-	/* FIXME: transitional */
-	if (i915_modparams.alpha_support) {
-		DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n",
-			 device_id);
-		return true;
-	}
-
 	if (!devices || !*devices)
 		return false;
 
@@ -968,7 +963,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	if (intel_info->require_force_probe &&
 	    !force_probe(pdev->device, i915_modparams.force_probe)) {
-		DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n"
+		dev_info(&pdev->dev,
+			 "Your graphics device %04x is not properly supported by the driver in this\n"
 			 "kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
 			 "module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
 			 "or (recommended) check for kernel updates.\n",
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3b6b913..551be58 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
 				aging_tail = hw_tail;
 			stream->oa_buffer.aging_timestamp = now;
 		} else {
-			DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
-				  hw_tail);
+			drm_err(&stream->perf->i915->drm,
+				"Ignoring spurious out of range OA buffer tail pointer = %x\n",
+				hw_tail);
 		}
 	}
 
@@ -686,7 +687,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 	u32 taken;
 	int ret = 0;
 
-	if (WARN_ON(!stream->enabled))
+	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
 		return -EIO;
 
 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -718,10 +719,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 	 * only be incremented by multiples of the report size (notably also
 	 * all a power of two).
 	 */
-	if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
-		      tail > OA_BUFFER_SIZE || tail % report_size,
-		      "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
-		      head, tail))
+	if (drm_WARN_ONCE(&uncore->i915->drm,
+			  head > OA_BUFFER_SIZE || head % report_size ||
+			  tail > OA_BUFFER_SIZE || tail % report_size,
+			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+			  head, tail))
 		return -EIO;
 
 
@@ -742,8 +744,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
 		 * here would imply a driver bug that would result
 		 * in an overrun.
 		 */
-		if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
-			DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+		if (drm_WARN_ON(&uncore->i915->drm,
+				(OA_BUFFER_SIZE - head) < report_size)) {
+			drm_err(&uncore->i915->drm,
+				"Spurious OA head ptr: non-integral report offset\n");
 			break;
 		}
 
@@ -896,7 +900,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
 	i915_reg_t oastatus_reg;
 	int ret;
 
-	if (WARN_ON(!stream->oa_buffer.vaddr))
+	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
 		return -EIO;
 
 	oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
@@ -986,7 +990,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 	u32 taken;
 	int ret = 0;
 
-	if (WARN_ON(!stream->enabled))
+	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
 		return -EIO;
 
 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -1015,10 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 	 * only be incremented by multiples of the report size (notably also
 	 * all a power of two).
 	 */
-	if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
-		      tail > OA_BUFFER_SIZE || tail % report_size,
-		      "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
-		      head, tail))
+	if (drm_WARN_ONCE(&uncore->i915->drm,
+			  head > OA_BUFFER_SIZE || head % report_size ||
+			  tail > OA_BUFFER_SIZE || tail % report_size,
+			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+			  head, tail))
 		return -EIO;
 
 
@@ -1036,8 +1041,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
 		 * here would imply a driver bug that would result
 		 * in an overrun.
 		 */
-		if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
-			DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+		if (drm_WARN_ON(&uncore->i915->drm,
+				(OA_BUFFER_SIZE - head) < report_size)) {
+			drm_err(&uncore->i915->drm,
+				"Spurious OA head ptr: non-integral report offset\n");
 			break;
 		}
 
@@ -1110,7 +1117,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
 	u32 oastatus1;
 	int ret;
 
-	if (WARN_ON(!stream->oa_buffer.vaddr))
+	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
 		return -EIO;
 
 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
@@ -1319,7 +1326,13 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 	case 12: {
 		stream->specific_ctx_id_mask =
 			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
-		stream->specific_ctx_id = stream->specific_ctx_id_mask;
+		/*
+		 * Pick an unused context id
+		 * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
+		 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
+		 */
+		stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+		BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
 		break;
 	}
 
@@ -1327,11 +1340,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 		MISSING_CASE(INTEL_GEN(ce->engine->i915));
 	}
 
-	ce->tag = stream->specific_ctx_id_mask;
+	ce->tag = stream->specific_ctx_id;
 
-	DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
-			 stream->specific_ctx_id,
-			 stream->specific_ctx_id_mask);
+	drm_dbg(&stream->perf->i915->drm,
+		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+		stream->specific_ctx_id,
+		stream->specific_ctx_id_mask);
 
 	return 0;
 }
@@ -1391,8 +1405,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
 	/*
 	 * Unset exclusive_stream first, it will be checked while disabling
 	 * the metric set on gen8+.
+	 *
+	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
 	 */
-	perf->exclusive_stream = NULL;
+	WRITE_ONCE(perf->exclusive_stream, NULL);
 	perf->ops.disable_metric_set(stream);
 
 	free_oa_buffer(stream);
@@ -1575,11 +1591,12 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
 
 static int alloc_oa_buffer(struct i915_perf_stream *stream)
 {
+	struct drm_i915_private *i915 = stream->perf->i915;
 	struct drm_i915_gem_object *bo;
 	struct i915_vma *vma;
 	int ret;
 
-	if (WARN_ON(stream->oa_buffer.vma))
+	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
 		return -ENODEV;
 
 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
@@ -1587,7 +1604,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
 
 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
 	if (IS_ERR(bo)) {
-		DRM_ERROR("Failed to allocate OA buffer\n");
+		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
 		return PTR_ERR(bo);
 	}
 
@@ -1669,7 +1686,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
 
 	bo = i915_gem_object_create_internal(i915, 4096);
 	if (IS_ERR(bo)) {
-		DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+		drm_err(&i915->drm,
+			"Failed to allocate NOA wait batchbuffer\n");
 		return PTR_ERR(bo);
 	}
 
@@ -2184,7 +2202,9 @@ static int gen8_modify_self(struct intel_context *ce,
 	struct i915_request *rq;
 	int err;
 
+	intel_engine_pm_get(ce->engine);
 	rq = i915_request_create(ce);
+	intel_engine_pm_put(ce->engine);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
@@ -2653,7 +2673,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
 	if (intel_wait_for_register(uncore,
 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
 				    50))
-		DRM_ERROR("wait for OA to be disabled timed out\n");
+		drm_err(&stream->perf->i915->drm,
+			"wait for OA to be disabled timed out\n");
 }
 
 static void gen8_oa_disable(struct i915_perf_stream *stream)
@@ -2664,7 +2685,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
 	if (intel_wait_for_register(uncore,
 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
 				    50))
-		DRM_ERROR("wait for OA to be disabled timed out\n");
+		drm_err(&stream->perf->i915->drm,
+			"wait for OA to be disabled timed out\n");
 }
 
 static void gen12_oa_disable(struct i915_perf_stream *stream)
@@ -2676,7 +2698,16 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
 				    GEN12_OAG_OACONTROL,
 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
 				    50))
-		DRM_ERROR("wait for OA to be disabled timed out\n");
+		drm_err(&stream->perf->i915->drm,
+			"wait for OA to be disabled timed out\n");
+
+	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
+	if (intel_wait_for_register(uncore,
+				    GEN12_OA_TLB_INV_CR,
+				    1, 0,
+				    50))
+		drm_err(&stream->perf->i915->drm,
+			"wait for OA tlb invalidate timed out\n");
 }
 
 /**
@@ -2740,6 +2771,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 			       struct drm_i915_perf_open_param *param,
 			       struct perf_open_properties *props)
 {
+	struct drm_i915_private *i915 = stream->perf->i915;
 	struct i915_perf *perf = stream->perf;
 	int format_size;
 	int ret;
@@ -2796,7 +2828,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 	stream->sample_size += format_size;
 
 	stream->oa_buffer.format_size = format_size;
-	if (WARN_ON(stream->oa_buffer.format_size == 0))
+	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
 		return -EINVAL;
 
 	stream->hold_preemption = props->hold_preemption;
@@ -2849,7 +2881,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 		goto err_oa_buf_alloc;
 
 	stream->ops = &i915_oa_stream_ops;
-	perf->exclusive_stream = stream;
+	WRITE_ONCE(perf->exclusive_stream, stream);
 
 	ret = i915_perf_stream_enable_sync(stream);
 	if (ret) {
@@ -2869,7 +2901,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
 	return 0;
 
 err_enable:
-	perf->exclusive_stream = NULL;
+	WRITE_ONCE(perf->exclusive_stream, NULL);
 	perf->ops.disable_metric_set(stream);
 
 	free_oa_buffer(stream);
@@ -2895,12 +2927,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
 {
 	struct i915_perf_stream *stream;
 
-	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
-
 	if (engine->class != RENDER_CLASS)
 		return;
 
-	stream = engine->i915->perf.exclusive_stream;
+	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
 	/*
 	 * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
 	 * is already doing that, so nothing to be done for gen12 here.
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index aa729d0..2c06253 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -448,7 +448,7 @@ static void engine_event_destroy(struct perf_event *event)
 	engine = intel_engine_lookup_user(i915,
 					  engine_event_class(event),
 					  engine_event_instance(event));
-	if (WARN_ON_ONCE(!engine))
+	if (drm_WARN_ON_ONCE(&i915->drm, !engine))
 		return;
 
 	if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
@@ -584,7 +584,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 						  engine_event_class(event),
 						  engine_event_instance(event));
 
-		if (WARN_ON_ONCE(!engine)) {
+		if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
 			/* Do nothing */
 		} else if (sample == I915_SAMPLE_BUSY &&
 			   intel_engine_supports_stats(engine)) {
@@ -1188,7 +1188,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
 	if (!pmu->base.event_init)
 		return;
 
-	WARN_ON(pmu->enable);
+	drm_WARN_ON(&i915->drm, pmu->enable);
 
 	hrtimer_cancel(&pmu->timer);
 
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index f1d6cad..941f0c1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -10,7 +10,7 @@
 #include <linux/hrtimer.h>
 #include <linux/perf_event.h>
 #include <linux/spinlock_types.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
 
 struct drm_i915_private;
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3575fd3..59e64ac 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OABUFFER_SIZE_8M    (6 << 3)
 #define OABUFFER_SIZE_16M   (7 << 3)
 
+#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+
 /* Gen12 OAR unit */
 #define GEN12_OAR_OACONTROL _MMIO(0x2960)
 #define  GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -2626,6 +2628,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define IPEIR_I965	_MMIO(0x2064)
 #define IPEHR_I965	_MMIO(0x2068)
 #define GEN7_SC_INSTDONE	_MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA		_MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2	_MMIO(0x7108)
 #define GEN7_SAMPLER_INSTDONE	_MMIO(0xe160)
 #define GEN7_ROW_INSTDONE	_MMIO(0xe164)
 #define GEN8_MCR_SELECTOR		_MMIO(0xfdc)
@@ -2639,6 +2643,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN11_MCR_SUBSLICE_MASK	GEN11_MCR_SUBSLICE(0x7)
 #define RING_IPEIR(base)	_MMIO((base) + 0x64)
 #define RING_IPEHR(base)	_MMIO((base) + 0x68)
+#define RING_EIR(base)		_MMIO((base) + 0xb0)
+#define RING_EMR(base)		_MMIO((base) + 0xb4)
+#define RING_ESR(base)		_MMIO((base) + 0xb8)
 /*
  * On GEN4, only the render ring INSTDONE exists and has a different
  * layout than the GEN7+ version.
@@ -2860,6 +2867,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define MI_ARB_STATE	_MMIO(0x20e4) /* 915+ only */
 
 #define MBUS_ABOX_CTL			_MMIO(0x45038)
+#define MBUS_ABOX1_CTL			_MMIO(0x45048)
+#define MBUS_ABOX2_CTL			_MMIO(0x4504C)
 #define MBUS_ABOX_BW_CREDIT_MASK	(3 << 20)
 #define MBUS_ABOX_BW_CREDIT(x)		((x) << 20)
 #define MBUS_ABOX_B_CREDIT_MASK		(0xF << 16)
@@ -3088,7 +3097,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GT_CONTEXT_SWITCH_INTERRUPT		(1 <<  8)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
-#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
+#define GT_CS_MASTER_ERROR_INTERRUPT		REG_BIT(3)
 #define GT_RENDER_SYNC_STATUS_INTERRUPT		(1 <<  2)
 #define GT_RENDER_DEBUG_INTERRUPT		(1 <<  1)
 #define GT_RENDER_USER_INTERRUPT		(1 <<  0)
@@ -3160,6 +3169,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN7_FF_THREAD_MODE		_MMIO(0x20a0)
 #define   GEN7_FF_SCHED_MASK		0x0077070
 #define   GEN8_FF_DS_REF_CNT_FFME	(1 << 19)
+#define   GEN12_FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
 #define   GEN7_FF_TS_SCHED_HS1		(0x5 << 16)
 #define   GEN7_FF_TS_SCHED_HS0		(0x3 << 16)
 #define   GEN7_FF_TS_SCHED_LOAD_BALANCE	(0x1 << 16)
@@ -3277,6 +3287,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 /* Framebuffer compression for Ivybridge */
 #define IVB_FBC_RT_BASE			_MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER		_MMIO(0x7024)
 
 #define IPS_CTL		_MMIO(0x43408)
 #define   IPS_ENABLE	(1 << 31)
@@ -3743,8 +3754,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   MCH_SSKPD_WM0_MASK		0x3f
 #define   MCH_SSKPD_WM0_VAL		0xc
 
-#define MCH_SECP_NRG_STTS		_MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
-
 /* Clocking configuration register */
 #define CLKCFG			_MMIO(MCHBAR_MIRROR_BASE + 0xc00)
 #define CLKCFG_FSB_400					(5 << 0)	/* hrawclk 100 */
@@ -4124,6 +4133,9 @@ enum {
 #define   PWM2_GATING_DIS		(1 << 14)
 #define   PWM1_GATING_DIS		(1 << 13)
 
+#define GEN9_CLKGATE_DIS_3		_MMIO(0x46538)
+#define   TGL_VRH_GATING_DIS		REG_BIT(31)
+
 #define GEN9_CLKGATE_DIS_4		_MMIO(0x4653C)
 #define   BXT_GMBUS_GATING_DIS		(1 << 14)
 
@@ -4851,16 +4863,6 @@ enum {
 #define _PP_STATUS			0x61200
 #define PP_STATUS(pps_idx)		_MMIO_PPS(pps_idx, _PP_STATUS)
 #define   PP_ON				REG_BIT(31)
-
-#define _PP_CONTROL_1			0xc7204
-#define _PP_CONTROL_2			0xc7304
-#define ICP_PP_CONTROL(x)		_MMIO(((x) == 1) ? _PP_CONTROL_1 : \
-					      _PP_CONTROL_2)
-#define  POWER_CYCLE_DELAY_MASK		REG_GENMASK(8, 4)
-#define  VDD_OVERRIDE_FORCE		REG_BIT(3)
-#define  BACKLIGHT_ENABLE		REG_BIT(2)
-#define  PWR_DOWN_ON_RESET		REG_BIT(1)
-#define  PWR_STATE_TARGET		REG_BIT(0)
 /*
  * Indicates that all dependencies of the panel are on:
  *
@@ -4921,6 +4923,7 @@ enum {
 #define   PFIT_ENABLE		(1 << 31)
 #define   PFIT_PIPE_MASK	(3 << 29)
 #define   PFIT_PIPE_SHIFT	29
+#define   PFIT_PIPE(pipe)	((pipe) << 29)
 #define   VERT_INTERP_DISABLE	(0 << 10)
 #define   VERT_INTERP_BILINEAR	(1 << 10)
 #define   VERT_INTERP_MASK	(3 << 10)
@@ -5870,7 +5873,6 @@ enum {
 
 #define  _PIPEAGCMAX           0x70010
 #define  _PIPEBGCMAX           0x71010
-#define PIPEGCMAX_RGB_MASK     REG_GENMASK(15, 0)
 #define PIPEGCMAX(pipe, i)     _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
 
 #define _PIPE_MISC_A			0x70030
@@ -5879,6 +5881,7 @@ enum {
 #define   PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
 #define   PIPEMISC_HDR_MODE_PRECISION	(1 << 23) /* icl+ */
 #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
+#define   PIPEMISC_PIXEL_ROUNDING_TRUNC	REG_BIT(8) /* tgl+ */
 #define   PIPEMISC_DITHER_BPC_MASK	(7 << 5)
 #define   PIPEMISC_DITHER_8_BPC		(0 << 5)
 #define   PIPEMISC_DITHER_10_BPC	(1 << 5)
@@ -7745,9 +7748,9 @@ enum {
 #define DISP_ARB_CTL2	_MMIO(0x45004)
 #define  DISP_DATA_PARTITION_5_6	(1 << 6)
 #define  DISP_IPC_ENABLE		(1 << 3)
-#define DBUF_CTL	_MMIO(0x45008)
-#define DBUF_CTL_S1	_MMIO(0x45008)
-#define DBUF_CTL_S2	_MMIO(0x44FE8)
+#define _DBUF_CTL_S1			0x45008
+#define _DBUF_CTL_S2			0x44FE8
+#define DBUF_CTL_S(slice)		_MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
 #define  DBUF_POWER_REQUEST		(1 << 31)
 #define  DBUF_POWER_STATE		(1 << 30)
 #define GEN7_MSG_CTL	_MMIO(0x45010)
@@ -7767,6 +7770,7 @@ enum {
 
 #define GEN8_CHICKEN_DCPR_1		_MMIO(0x46430)
 #define   SKL_SELECT_ALTERNATE_DC_EXIT	(1 << 30)
+#define   CNL_DELAY_PMRSP		(1 << 22)
 #define   MASK_WAKEMEM			(1 << 13)
 #define   CNL_DDI_CLOCK_REG_ACCESS_ON	(1 << 7)
 
@@ -8988,6 +8992,8 @@ enum {
 #define     GEN6_PCODE_UNIMPLEMENTED_CMD	0xFF
 #define     GEN7_PCODE_TIMEOUT			0x2
 #define     GEN7_PCODE_ILLEGAL_DATA		0x3
+#define     GEN11_PCODE_ILLEGAL_SUBCOMMAND	0x4
+#define     GEN11_PCODE_LOCKED			0x6
 #define     GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
 #define   GEN6_PCODE_WRITE_RC6VIDS		0x4
 #define   GEN6_PCODE_READ_RC6VIDS		0x5
@@ -9136,12 +9142,19 @@ enum {
 #define   THROTTLE_12_5				(7 << 2)
 #define   DISABLE_EARLY_EOT			(1 << 1)
 
-#define GEN7_ROW_CHICKEN2		_MMIO(0xe4f4)
+#define GEN7_ROW_CHICKEN2			_MMIO(0xe4f4)
+#define   GEN12_DISABLE_EARLY_READ		REG_BIT(14)
+#define   GEN12_PUSH_CONST_DEREF_HOLD_DIS	REG_BIT(8)
+
 #define GEN7_ROW_CHICKEN2_GT2		_MMIO(0xf4f4)
 #define   DOP_CLOCK_GATING_DISABLE	(1 << 0)
 #define   PUSH_CONSTANT_DEREF_DISABLE	(1 << 8)
 #define   GEN11_TDL_CLOCK_GATING_FIX_DISABLE	(1 << 1)
 
+#define GEN9_ROW_CHICKEN4		_MMIO(0xe48c)
+#define   GEN12_DISABLE_TDL_PUSH	REG_BIT(9)
+#define   GEN11_DIS_PICK_2ND_EU		REG_BIT(7)
+
 #define HSW_ROW_CHICKEN3		_MMIO(0xe49c)
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 
@@ -9242,6 +9255,10 @@ enum {
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250	(7 << 16)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352	(8 << 16)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500	(9 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_296703	(10 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_297000	(11 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_593407	(12 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_594000	(13 << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
 /* HSW Audio */
@@ -10533,13 +10550,13 @@ enum skl_power_gate {
 #define  D_COMP_COMP_DISABLE		(1 << 0)
 
 /* Pipe WM_LINETIME - watermark line time */
-#define _PIPE_WM_LINETIME_A		0x45270
-#define _PIPE_WM_LINETIME_B		0x45274
-#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
-#define   PIPE_WM_LINETIME_MASK			(0x1ff)
-#define   PIPE_WM_LINETIME_TIME(x)		((x))
-#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff << 16)
-#define   PIPE_WM_LINETIME_IPS_LINETIME(x)	((x) << 16)
+#define _WM_LINETIME_A		0x45270
+#define _WM_LINETIME_B		0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define  HSW_LINETIME_MASK	REG_GENMASK(8, 0)
+#define  HSW_LINETIME(x)	REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define  HSW_IPS_LINETIME_MASK	REG_GENMASK(24, 16)
+#define  HSW_IPS_LINETIME(x)	REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
 
 /* SFUSE_STRAP */
 #define SFUSE_STRAP			_MMIO(0xc2014)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a18b2a2..c0df71d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -51,7 +51,6 @@ struct execute_cb {
 static struct i915_global_request {
 	struct i915_global base;
 	struct kmem_cache *slab_requests;
-	struct kmem_cache *slab_dependencies;
 	struct kmem_cache *slab_execute_cbs;
 } global;
 
@@ -203,6 +202,19 @@ static void free_capture_list(struct i915_request *request)
 	}
 }
 
+static void __i915_request_fill(struct i915_request *rq, u8 val)
+{
+	void *vaddr = rq->ring->vaddr;
+	u32 head;
+
+	head = rq->infix;
+	if (rq->postfix < head) {
+		memset(vaddr + head, val, rq->ring->size - head);
+		head = 0;
+	}
+	memset(vaddr + head, val, rq->postfix - head);
+}
+
 static void remove_from_engine(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine, *locked;
@@ -247,6 +259,9 @@ bool i915_request_retire(struct i915_request *rq)
 	 */
 	GEM_BUG_ON(!list_is_first(&rq->link,
 				  &i915_request_timeline(rq)->requests));
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		/* Poison before we release our space in the ring */
+		__i915_request_fill(rq, POISON_FREE);
 	rq->ring->head = rq->postfix;
 
 	/*
@@ -275,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq)
 	spin_unlock_irq(&rq->lock);
 
 	remove_from_client(rq);
-	list_del_rcu(&rq->link);
+	__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
 
 	intel_context_exit(rq->context);
 	intel_context_unpin(rq->context);
@@ -348,6 +363,50 @@ __await_execution(struct i915_request *rq,
 	return 0;
 }
 
+static bool fatal_error(int error)
+{
+	switch (error) {
+	case 0: /* not an error! */
+	case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+	case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+		return false;
+	default:
+		return true;
+	}
+}
+
+void __i915_request_skip(struct i915_request *rq)
+{
+	GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+	if (rq->infix == rq->postfix)
+		return;
+
+	/*
+	 * As this request likely depends on state from the lost
+	 * context, clear out all the user operations leaving the
+	 * breadcrumb at the end (so we get the fence notifications).
+	 */
+	__i915_request_fill(rq, 0);
+	rq->infix = rq->postfix;
+}
+
+void i915_request_set_error_once(struct i915_request *rq, int error)
+{
+	int old;
+
+	GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+	if (i915_request_signaled(rq))
+		return;
+
+	old = READ_ONCE(rq->fence.error);
+	do {
+		if (fatal_error(old))
+			return;
+	} while (!try_cmpxchg(&rq->fence.error, &old, error));
+}
+
 bool __i915_request_submit(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
@@ -377,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request)
 	if (i915_request_completed(request))
 		goto xfer;
 
-	if (intel_context_is_banned(request->context))
-		i915_request_skip(request, -EIO);
+	if (unlikely(intel_context_is_banned(request->context)))
+		i915_request_set_error_once(request, -EIO);
+	if (unlikely(fatal_error(request->fence.error)))
+		__i915_request_skip(request);
 
 	/*
 	 * Are we using semaphores when the gpu is already saturated?
@@ -504,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 		trace_i915_request_submit(request);
 
 		if (unlikely(fence->error))
-			i915_request_skip(request, fence->error);
+			i915_request_set_error_once(request, fence->error);
 
 		/*
 		 * We need to serialize use of the submit_request() callback
@@ -688,6 +749,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	RCU_INIT_POINTER(rq->timeline, tl);
 	RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
 	rq->hwsp_seqno = tl->hwsp_seqno;
+	GEM_BUG_ON(i915_request_completed(rq));
 
 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
@@ -857,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
 	 *
 	 * See the are-we-too-late? check in __i915_request_submit().
 	 */
-	return rq->sched.semaphores | rq->engine->saturated;
+	return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
 }
 
 static int
@@ -915,8 +977,16 @@ emit_semaphore_wait(struct i915_request *to,
 		    struct i915_request *from,
 		    gfp_t gfp)
 {
+	const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+
+	if (!intel_context_use_semaphores(to->context))
+		goto await_fence;
+
+	if (!rcu_access_pointer(from->hwsp_cacheline))
+		goto await_fence;
+
 	/* Just emit the first semaphore we see as request space is limited. */
-	if (already_busywaiting(to) & from->engine->mask)
+	if (already_busywaiting(to) & mask)
 		goto await_fence;
 
 	if (i915_request_await_start(to, from) < 0)
@@ -929,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
 	if (__emit_semaphore_wait(to, from, from->fence.seqno))
 		goto await_fence;
 
-	to->sched.semaphores |= from->engine->mask;
+	to->sched.semaphores |= mask;
 	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 	return 0;
 
@@ -960,12 +1030,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
 						       &from->submit,
 						       I915_FENCE_GFP);
-	else if (intel_context_use_semaphores(to->context))
-		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
 	else
-		ret = i915_sw_fence_await_dma_fence(&to->submit,
-						    &from->fence, 0,
-						    I915_FENCE_GFP);
+		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
 	if (ret < 0)
 		return ret;
 
@@ -1064,6 +1130,8 @@ __i915_request_await_execution(struct i915_request *to,
 {
 	int err;
 
+	GEM_BUG_ON(intel_context_is_barrier(from->context));
+
 	/* Submit both requests at the same time */
 	err = __await_execution(to, from, hook, I915_FENCE_GFP);
 	if (err)
@@ -1074,14 +1142,45 @@ __i915_request_await_execution(struct i915_request *to,
 					  &from->fence))
 		return 0;
 
-	/* Ensure both start together [after all semaphores in signal] */
-	if (intel_engine_has_semaphores(to->engine))
-		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
-	else
-		err = i915_request_await_start(to, from);
+	/*
+	 * Wait until the start of this request.
+	 *
+	 * The execution cb fires when we submit the request to HW. But in
+	 * many cases this may be long before the request itself is ready to
+	 * run (consider that we submit 2 requests for the same context, where
+	 * the request of interest is behind an indefinite spinner). So we hook
+	 * up to both to reduce our queues and keep the execution lag minimised
+	 * in the worst case, though we hope that the await_start is elided.
+	 */
+	err = i915_request_await_start(to, from);
 	if (err < 0)
 		return err;
 
+	/*
+	 * Ensure both start together [after all semaphores in signal]
+	 *
+	 * Now that we are queued to the HW at roughly the same time (thanks
+	 * to the execute cb) and are ready to run at roughly the same time
+	 * (thanks to the await start), our signaler may still be indefinitely
+	 * delayed by waiting on a semaphore from a remote engine. If our
+	 * signaler depends on a semaphore, so indirectly do we, and we do not
+	 * want to start our payload until our signaler also starts theirs.
+	 * So we wait.
+	 *
+	 * However, there is also a second condition for which we need to wait
+	 * for the precise start of the signaler. Consider that the signaler
+	 * was submitted in a chain of requests following another context
+	 * (with just an ordinary intra-engine fence dependency between the
+	 * two). In this case the signaler is queued to HW, but not for
+	 * immediate execution, and so we must wait until it reaches the
+	 * active slot.
+	 */
+	if (intel_engine_has_semaphores(to->engine)) {
+		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+		if (err < 0)
+			return err;
+	}
+
 	/* Couple the dependency tree for PI on this exposed to->fence */
 	if (to->engine->schedule) {
 		err = i915_sched_node_add_dependency(&to->sched, &from->sched);
@@ -1202,31 +1301,6 @@ i915_request_await_object(struct i915_request *to,
 	return ret;
 }
 
-void i915_request_skip(struct i915_request *rq, int error)
-{
-	void *vaddr = rq->ring->vaddr;
-	u32 head;
-
-	GEM_BUG_ON(!IS_ERR_VALUE((long)error));
-	dma_fence_set_error(&rq->fence, error);
-
-	if (rq->infix == rq->postfix)
-		return;
-
-	/*
-	 * As this request likely depends on state from the lost
-	 * context, clear out all the user operations leaving the
-	 * breadcrumb at the end (so we get the fence notifications).
-	 */
-	head = rq->infix;
-	if (rq->postfix < head) {
-		memset(vaddr + head, 0, rq->ring->size - head);
-		head = 0;
-	}
-	memset(vaddr + head, 0, rq->postfix - head);
-	rq->infix = rq->postfix;
-}
-
 static struct i915_request *
 __i915_request_add_to_timeline(struct i915_request *rq)
 {
@@ -1256,7 +1330,17 @@ __i915_request_add_to_timeline(struct i915_request *rq)
 	prev = to_request(__i915_active_fence_set(&timeline->last_request,
 						  &rq->fence));
 	if (prev && !i915_request_completed(prev)) {
-		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+		/*
+		 * The requests are supposed to be kept in order. However,
+		 * we need to be wary in case the timeline->last_request
+		 * is used as a barrier for external modification to this
+		 * context.
+		 */
+		GEM_BUG_ON(prev->context == rq->context &&
+			   i915_seqno_passed(prev->fence.seqno,
+					     rq->fence.seqno));
+
+		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
 			i915_sw_fence_await_sw_fence(&rq->submit,
 						     &prev->submit,
 						     &rq->submitq);
@@ -1340,39 +1424,23 @@ void i915_request_add(struct i915_request *rq)
 {
 	struct intel_timeline * const tl = i915_request_timeline(rq);
 	struct i915_sched_attr attr = {};
-	struct i915_request *prev;
+	struct i915_gem_context *ctx;
 
 	lockdep_assert_held(&tl->mutex);
 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
 
 	trace_i915_request_add(rq);
+	__i915_request_commit(rq);
 
-	prev = __i915_request_commit(rq);
+	/* XXX placeholder for selftests */
+	rcu_read_lock();
+	ctx = rcu_dereference(rq->context->gem_context);
+	if (ctx)
+		attr = ctx->sched;
+	rcu_read_unlock();
 
-	if (rcu_access_pointer(rq->context->gem_context))
-		attr = i915_request_gem_context(rq)->sched;
-
-	/*
-	 * Boost actual workloads past semaphores!
-	 *
-	 * With semaphores we spin on one engine waiting for another,
-	 * simply to reduce the latency of starting our work when
-	 * the signaler completes. However, if there is any other
-	 * work that we could be doing on this engine instead, that
-	 * is better utilisation and will reduce the overall duration
-	 * of the current work. To avoid PI boosting a semaphore
-	 * far in the distance past over useful work, we keep a history
-	 * of any semaphore use along our dependency chain.
-	 */
 	if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
 		attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
-	/*
-	 * Boost priorities to new clients (new request flows).
-	 *
-	 * Allow interactive/synchronous clients to jump ahead of
-	 * the bulk clients. (FQ_CODEL)
-	 */
 	if (list_empty(&rq->sched.signalers_list))
 		attr.priority |= I915_PRIORITY_WAIT;
 
@@ -1380,32 +1448,10 @@ void i915_request_add(struct i915_request *rq)
 	__i915_request_queue(rq, &attr);
 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
 
-	/*
-	 * In typical scenarios, we do not expect the previous request on
-	 * the timeline to be still tracked by timeline->last_request if it
-	 * has been completed. If the completed request is still here, that
-	 * implies that request retirement is a long way behind submission,
-	 * suggesting that we haven't been retiring frequently enough from
-	 * the combination of retire-before-alloc, waiters and the background
-	 * retirement worker. So if the last request on this timeline was
-	 * already completed, do a catch up pass, flushing the retirement queue
-	 * up to this client. Since we have now moved the heaviest operations
-	 * during retirement onto secondary workers, such as freeing objects
-	 * or contexts, retiring a bunch of requests is mostly list management
-	 * (and cache misses), and so we should not be overly penalizing this
-	 * client by performing excess work, though we may still performing
-	 * work on behalf of others -- but instead we should benefit from
-	 * improved resource management. (Well, that's the theory at least.)
-	 */
-	if (prev &&
-	    i915_request_completed(prev) &&
-	    rcu_access_pointer(prev->timeline) == tl)
-		i915_request_retire_upto(prev);
-
 	mutex_unlock(&tl->mutex);
 }
 
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
 {
 	unsigned long t;
 
@@ -1422,7 +1468,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
 	 * stop busywaiting, see busywait_stop().
 	 */
 	*cpu = get_cpu();
-	t = local_clock() >> 10;
+	t = local_clock();
 	put_cpu();
 
 	return t;
@@ -1432,15 +1478,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
 {
 	unsigned int this_cpu;
 
-	if (time_after(local_clock_us(&this_cpu), timeout))
+	if (time_after(local_clock_ns(&this_cpu), timeout))
 		return true;
 
 	return this_cpu != cpu;
 }
 
-static bool __i915_spin_request(const struct i915_request * const rq,
-				int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq, int state)
 {
+	unsigned long timeout_ns;
 	unsigned int cpu;
 
 	/*
@@ -1468,7 +1514,8 @@ static bool __i915_spin_request(const struct i915_request * const rq,
 	 * takes to sleep on a request, on the order of a microsecond.
 	 */
 
-	timeout_us += local_clock_us(&cpu);
+	timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+	timeout_ns += local_clock_ns(&cpu);
 	do {
 		if (i915_request_completed(rq))
 			return true;
@@ -1476,7 +1523,7 @@ static bool __i915_spin_request(const struct i915_request * const rq,
 		if (signal_pending_state(state, current))
 			break;
 
-		if (busywait_stop(timeout_us, cpu))
+		if (busywait_stop(timeout_ns, cpu))
 			break;
 
 		cpu_relax();
@@ -1562,8 +1609,8 @@ long i915_request_wait(struct i915_request *rq,
 	 * completion. That requires having a good predictor for the request
 	 * duration, which we currently lack.
 	 */
-	if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
-	    __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+	if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+	    __i915_spin_request(rq, state)) {
 		dma_fence_signal(&rq->fence);
 		goto out;
 	}
@@ -1598,6 +1645,8 @@ long i915_request_wait(struct i915_request *rq,
 			break;
 		}
 
+		intel_engine_flush_submission(rq->engine);
+
 		if (signal_pending_state(state, current)) {
 			timeout = -ERESTARTSYS;
 			break;
@@ -1608,7 +1657,6 @@ long i915_request_wait(struct i915_request *rq,
 			break;
 		}
 
-		intel_engine_flush_submission(rq->engine);
 		timeout = io_schedule_timeout(timeout);
 	}
 	__set_current_state(TASK_RUNNING);
@@ -1628,14 +1676,12 @@ long i915_request_wait(struct i915_request *rq,
 
 static void i915_global_request_shrink(void)
 {
-	kmem_cache_shrink(global.slab_dependencies);
 	kmem_cache_shrink(global.slab_execute_cbs);
 	kmem_cache_shrink(global.slab_requests);
 }
 
 static void i915_global_request_exit(void)
 {
-	kmem_cache_destroy(global.slab_dependencies);
 	kmem_cache_destroy(global.slab_execute_cbs);
 	kmem_cache_destroy(global.slab_requests);
 }
@@ -1665,17 +1711,9 @@ int __init i915_global_request_init(void)
 	if (!global.slab_execute_cbs)
 		goto err_requests;
 
-	global.slab_dependencies = KMEM_CACHE(i915_dependency,
-					      SLAB_HWCACHE_ALIGN |
-					      SLAB_RECLAIM_ACCOUNT);
-	if (!global.slab_dependencies)
-		goto err_execute_cbs;
-
 	i915_global_register(&global.base);
 	return 0;
 
-err_execute_cbs:
-	kmem_cache_destroy(global.slab_execute_cbs);
 err_requests:
 	kmem_cache_destroy(global.slab_requests);
 	return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index fccc339..3c552bf 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -305,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
 struct i915_request * __must_check
 i915_request_create(struct intel_context *ce);
 
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void __i915_request_skip(struct i915_request *rq);
+
 struct i915_request *__i915_request_commit(struct i915_request *request);
 void __i915_request_queue(struct i915_request *rq,
 			  const struct i915_sched_attr *attr);
@@ -354,8 +357,6 @@ void i915_request_add(struct i915_request *rq);
 bool __i915_request_submit(struct i915_request *request);
 void i915_request_submit(struct i915_request *request);
 
-void i915_request_skip(struct i915_request *request, int error);
-
 void __i915_request_unsubmit(struct i915_request *request);
 void i915_request_unsubmit(struct i915_request *request);
 
@@ -397,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 
 static inline u32 __hwsp_seqno(const struct i915_request *rq)
 {
-	return READ_ONCE(*rq->hwsp_seqno);
+	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+	return READ_ONCE(*hwsp);
 }
 
 /**
@@ -481,7 +484,7 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
 }
 
 /**
- * i915_request_is_running - check if the request is ready for execution
+ * i915_request_is_ready - check if the request is ready for execution
  * @rq: the request
  *
  * Upon construction, the request is instructed to wait upon various
@@ -511,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
 
 static inline void i915_request_mark_complete(struct i915_request *rq)
 {
-	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+		   (u32 *)&rq->fence.seqno);
 }
 
 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 34b654b..68b06a7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
 	if (!inflight)
 		goto unlock;
 
+	engine->execlists.queue_priority_hint = prio;
+
 	/*
 	 * If we are already the currently executing context, don't
 	 * bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
 	if (inflight->context == rq->context)
 		goto unlock;
 
-	engine->execlists.queue_priority_hint = prio;
 	if (need_preempt(prio, rq_prio(inflight)))
 		tasklet_hi_schedule(&engine->execlists.tasklet);
 
@@ -227,10 +228,10 @@ static void kick_submission(struct intel_engine_cs *engine,
 static void __i915_schedule(struct i915_sched_node *node,
 			    const struct i915_sched_attr *attr)
 {
+	const int prio = max(attr->priority, node->attr.priority);
 	struct intel_engine_cs *engine;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
-	const int prio = attr->priority;
 	struct sched_cache cache;
 	LIST_HEAD(dfs);
 
@@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,
 	lockdep_assert_held(&schedule_lock);
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 
-	if (prio <= READ_ONCE(node->attr.priority))
-		return;
-
 	if (node_signaled(node))
 		return;
 
@@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node,
 
 		GEM_BUG_ON(node_to_request(node)->engine != engine);
 
-		node->attr.priority = prio;
+		WRITE_ONCE(node->attr.priority, prio);
 
 		/*
 		 * Once the request is ready, it will be placed into the
@@ -363,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
 {
 	struct i915_sched_attr attr = node->attr;
 
+	if (attr.priority & bump)
+		return;
+
 	attr.priority |= bump;
 	__i915_schedule(node, &attr);
 }
@@ -433,7 +434,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 			node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 
 		/* All set, now publish. Beware the lockless walkers. */
-		list_add(&dep->signal_link, &node->signalers_list);
+		list_add_rcu(&dep->signal_link, &node->signalers_list);
 		list_add_rcu(&dep->wait_link, &signal->waiters_list);
 
 		/*
@@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
-		list_del(&dep->wait_link);
+		list_del_rcu(&dep->wait_link);
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
@@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		GEM_BUG_ON(dep->signaler != node);
 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
-		list_del(&dep->signal_link);
+		list_del_rcu(&dep->signal_link);
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
@@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { {
 int __init i915_global_scheduler_init(void)
 {
 	global.slab_dependencies = KMEM_CACHE(i915_dependency,
-					      SLAB_HWCACHE_ALIGN);
+					      SLAB_HWCACHE_ALIGN |
+					      SLAB_TYPESAFE_BY_RCU);
 	if (!global.slab_dependencies)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8812cdd..ed2be34 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,8 +24,6 @@
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <drm/i915_drm.h>
-
 #include "display/intel_fbc.h"
 #include "display/intel_gmbus.h"
 #include "display/intel_vga.h"
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 51ba97d..a3d38e0 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -211,10 +211,21 @@ void i915_sw_fence_complete(struct i915_sw_fence *fence)
 	__i915_sw_fence_complete(fence, NULL);
 }
 
-void i915_sw_fence_await(struct i915_sw_fence *fence)
+bool i915_sw_fence_await(struct i915_sw_fence *fence)
 {
-	debug_fence_assert(fence);
-	WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+	int pending;
+
+	/*
+	 * It is only safe to add a new await to the fence while it has
+	 * not yet been signaled (i.e. there are still existing signalers).
+	 */
+	pending = atomic_read(&fence->pending);
+	do {
+		if (pending < 1)
+			return false;
+	} while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
+
+	return true;
 }
 
 void __i915_sw_fence_init(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 19e806c..30a8633 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -91,7 +91,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    unsigned long timeout,
 				    gfp_t gfp);
 
-void i915_sw_fence_await(struct i915_sw_fence *fence);
+bool i915_sw_fence_await(struct i915_sw_fence *fence);
 void i915_sw_fence_complete(struct i915_sw_fence *fence);
 
 static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 39c79e1..ed69b5d 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -43,7 +43,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
 	 * locking inversion with the driver load path. And the access here is
 	 * completely racy anyway. So don't bother with locking for now.
 	 */
-	return i915 && i915->drm.open_count == 0;
+	return i915 && atomic_read(&i915->drm.open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 0cef313..45d32ef 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
 
 #include "gt/intel_rc6.h"
 #include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
 
 #include "i915_drv.h"
 #include "i915_sysfs.h"
@@ -525,7 +526,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
 	struct device *kdev = kobj_to_dev(kobj);
 	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 
-	DRM_DEBUG_DRIVER("Resetting error state\n");
+	drm_dbg(&dev_priv->drm, "Resetting error state\n");
 	i915_reset_error_state(dev_priv);
 
 	return count;
@@ -564,31 +565,36 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6_attr_group);
 		if (ret)
-			DRM_ERROR("RC6 residency sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"RC6 residency sysfs setup failed\n");
 	}
 	if (HAS_RC6p(dev_priv)) {
 		ret = sysfs_merge_group(&kdev->kobj,
 					&rc6p_attr_group);
 		if (ret)
-			DRM_ERROR("RC6p residency sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"RC6p residency sysfs setup failed\n");
 	}
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		ret = sysfs_merge_group(&kdev->kobj,
 					&media_rc6_attr_group);
 		if (ret)
-			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"Media RC6 residency sysfs setup failed\n");
 	}
 #endif
 	if (HAS_L3_DPF(dev_priv)) {
 		ret = device_create_bin_file(kdev, &dpf_attrs);
 		if (ret)
-			DRM_ERROR("l3 parity sysfs setup failed\n");
+			drm_err(&dev_priv->drm,
+				"l3 parity sysfs setup failed\n");
 
 		if (NUM_L3_SLICES(dev_priv) > 1) {
 			ret = device_create_bin_file(kdev,
 						     &dpf_attrs_1);
 			if (ret)
-				DRM_ERROR("l3 parity slice 1 setup failed\n");
+				drm_err(&dev_priv->drm,
+					"l3 parity slice 1 setup failed\n");
 		}
 	}
 
@@ -598,9 +604,11 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 	else if (INTEL_GEN(dev_priv) >= 6)
 		ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
 	if (ret)
-		DRM_ERROR("RPS sysfs setup failed\n");
+		drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
 
 	i915_setup_error_capture(kdev);
+
+	intel_engines_add_sysfs(dev_priv);
 }
 
 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 233a97a..bc854ad 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -339,6 +339,68 @@ TRACE_EVENT(intel_disable_plane,
 		      __entry->frame, __entry->scanline)
 );
 
+/* fbc */
+
+TRACE_EVENT(intel_fbc_activate,
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u",
+		      pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_deactivate,
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u",
+		      pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_nuke,
+	    TP_PROTO(struct intel_crtc *crtc),
+	    TP_ARGS(crtc),
+
+	    TP_STRUCT__entry(
+			     __field(enum pipe, pipe)
+			     __field(u32, frame)
+			     __field(u32, scanline)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pipe = crtc->pipe;
+			   __entry->frame = intel_crtc_get_vblank_counter(crtc);
+			   __entry->scanline = intel_get_crtc_scanline(crtc);
+			   ),
+
+	    TP_printk("pipe %c, frame=%u, scanline=%u",
+		      pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
 /* pipe updates */
 
 TRACE_EVENT(intel_pipe_update_start,
@@ -738,7 +800,7 @@ TRACE_EVENT(i915_request_in,
 			     __field(u16, instance)
 			     __field(u32, seqno)
 			     __field(u32, port)
-			     __field(u32, prio)
+			     __field(s32, prio)
 			    ),
 
 	    TP_fast_assign(
@@ -751,7 +813,7 @@ TRACE_EVENT(i915_request_in,
 			   __entry->port = port;
 			   ),
 
-	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+	    TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
 		      __entry->dev, __entry->class, __entry->instance,
 		      __entry->ctx, __entry->seqno,
 		      __entry->prio, __entry->port)
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 632d695..029854a 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,7 +8,6 @@
 #include "i915_drv.h"
 #include "i915_utils.h"
 
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
 #define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
 
 void
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index d34141f..03a73d2 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -34,6 +34,8 @@
 struct drm_i915_private;
 struct timer_list;
 
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
 #if 0
@@ -100,12 +102,24 @@ bool i915_error_injected(void);
 	typeof(max) max__ = (max); \
 	(void)(&start__ == &size__); \
 	(void)(&start__ == &max__); \
-	start__ > max__ || size__ > max__ - start__; \
+	start__ >= max__ || size__ > max__ - start__; \
 })
 
 #define range_overflows_t(type, start, size, max) \
 	range_overflows((type)(start), (type)(size), (type)(max))
 
+#define range_overflows_end(start, size, max) ({ \
+	typeof(start) start__ = (start); \
+	typeof(size) size__ = (size); \
+	typeof(max) max__ = (max); \
+	(void)(&start__ == &size__); \
+	(void)(&start__ == &max__); \
+	start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_end_t(type, start, size, max) \
+	range_overflows_end((type)(start), (type)(size), (type)(max))
+
 /* Note we don't consider signbits :| */
 #define overflows_type(x, T) \
 	(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
@@ -246,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
 	WRITE_ONCE(head->next, first);
 }
 
+static inline int list_is_last_rcu(const struct list_head *list,
+				   const struct list_head *head)
+{
+	return READ_ONCE(list->next) == head;
+}
+
 /*
  * Wait until the work is finally complete, even if it tries to postpone
  * by requeueing itself. Note, that if the worker never cancels itself,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 968be26..70fca72 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
  * SOFTWARE.
  */
 
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
 #include "i915_vgpu.h"
 
 /**
@@ -51,13 +53,13 @@
  */
 
 /**
- * i915_detect_vgpu - detect virtual GPU
+ * intel_vgpu_detect - detect virtual GPU
  * @dev_priv: i915 device private
  *
  * This function is called at the initialization stage, to detect whether
  * running on a vGPU.
  */
-void i915_detect_vgpu(struct drm_i915_private *dev_priv)
+void intel_vgpu_detect(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	u64 magic;
@@ -77,7 +79,8 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
 
 	shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
 	if (!shared_area) {
-		DRM_ERROR("failed to map MMIO bar to check for VGT\n");
+		drm_err(&dev_priv->drm,
+			"failed to map MMIO bar to check for VGT\n");
 		return;
 	}
 
@@ -87,7 +90,7 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
 
 	version_major = readw(shared_area + vgtif_offset(version_major));
 	if (version_major < VGT_VERSION_MAJOR) {
-		DRM_INFO("VGT interface version mismatch!\n");
+		drm_info(&dev_priv->drm, "VGT interface version mismatch!\n");
 		goto out;
 	}
 
@@ -95,17 +98,42 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
 
 	dev_priv->vgpu.active = true;
 	mutex_init(&dev_priv->vgpu.lock);
-	DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+	drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n");
 
 out:
 	pci_iounmap(pdev, shared_area);
 }
 
+void intel_vgpu_register(struct drm_i915_private *i915)
+{
+	/*
+	 * Notify a valid surface after modesetting, when running inside a VM.
+	 */
+	if (intel_vgpu_active(i915))
+		intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
+				   VGT_DRV_DISPLAY_READY);
+}
+
+bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->vgpu.active;
+}
+
 bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
 {
 	return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
 }
 
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
 struct _balloon_info_ {
 	/*
 	 * There are up to 2 regions per mappable/unmappable graphic
@@ -120,13 +148,15 @@ static struct _balloon_info_ bl_info;
 static void vgt_deballoon_space(struct i915_ggtt *ggtt,
 				struct drm_mm_node *node)
 {
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	if (!drm_mm_node_allocated(node))
 		return;
 
-	DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
-			 node->start,
-			 node->start + node->size,
-			 node->size / 1024);
+	drm_dbg(&dev_priv->drm,
+		"deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
+		node->start,
+		node->start + node->size,
+		node->size / 1024);
 
 	ggtt->vm.reserved -= node->size;
 	drm_mm_remove_node(node);
@@ -141,12 +171,13 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
  */
 void intel_vgt_deballoon(struct i915_ggtt *ggtt)
 {
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	int i;
 
 	if (!intel_vgpu_active(ggtt->vm.i915))
 		return;
 
-	DRM_DEBUG("VGT deballoon.\n");
+	drm_dbg(&dev_priv->drm, "VGT deballoon.\n");
 
 	for (i = 0; i < 4; i++)
 		vgt_deballoon_space(ggtt, &bl_info.space[i]);
@@ -156,13 +187,15 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
 			     struct drm_mm_node *node,
 			     unsigned long start, unsigned long end)
 {
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
 	unsigned long size = end - start;
 	int ret;
 
 	if (start >= end)
 		return -EINVAL;
 
-	DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+	drm_info(&dev_priv->drm,
+		 "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
 		 start, end, size / 1024);
 	ret = i915_gem_gtt_reserve(&ggtt->vm, node,
 				   size, start, I915_COLOR_UNEVICTABLE,
@@ -219,7 +252,8 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
  */
 int intel_vgt_balloon(struct i915_ggtt *ggtt)
 {
-	struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
+	struct drm_i915_private *dev_priv = ggtt->vm.i915;
+	struct intel_uncore *uncore = &dev_priv->uncore;
 	unsigned long ggtt_end = ggtt->vm.total;
 
 	unsigned long mappable_base, mappable_size, mappable_end;
@@ -241,16 +275,18 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
 	mappable_end = mappable_base + mappable_size;
 	unmappable_end = unmappable_base + unmappable_size;
 
-	DRM_INFO("VGT ballooning configuration:\n");
-	DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+	drm_info(&dev_priv->drm, "VGT ballooning configuration:\n");
+	drm_info(&dev_priv->drm,
+		 "Mappable graphic memory: base 0x%lx size %ldKiB\n",
 		 mappable_base, mappable_size / 1024);
-	DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+	drm_info(&dev_priv->drm,
+		 "Unmappable graphic memory: base 0x%lx size %ldKiB\n",
 		 unmappable_base, unmappable_size / 1024);
 
 	if (mappable_end > ggtt->mappable_end ||
 	    unmappable_base < ggtt->mappable_end ||
 	    unmappable_end > ggtt_end) {
-		DRM_ERROR("Invalid ballooning configuration!\n");
+		drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n");
 		return -EINVAL;
 	}
 
@@ -287,7 +323,7 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
 			goto err_below_mappable;
 	}
 
-	DRM_INFO("VGT balloon successfully\n");
+	drm_info(&dev_priv->drm, "VGT balloon successfully\n");
 	return 0;
 
 err_below_mappable:
@@ -297,6 +333,6 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
 err_upon_mappable:
 	vgt_deballoon_space(ggtt, &bl_info.space[2]);
 err:
-	DRM_ERROR("VGT balloon fail\n");
+	drm_err(&dev_priv->drm, "VGT balloon fail\n");
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 8b3663d..ffbb77d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,24 +24,17 @@
 #ifndef _I915_VGPU_H_
 #define _I915_VGPU_H_
 
-#include "i915_drv.h"
-#include "i915_pvinfo.h"
+#include <linux/types.h>
 
-void i915_detect_vgpu(struct drm_i915_private *dev_priv);
+struct drm_i915_private;
+struct i915_ggtt;
 
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
-
-static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
-}
-
-static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
-}
+void intel_vgpu_detect(struct drm_i915_private *i915);
+bool intel_vgpu_active(struct drm_i915_private *i915);
+void intel_vgpu_register(struct drm_i915_private *i915);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915);
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915);
 
 int intel_vgt_balloon(struct i915_ggtt *ggtt);
 void intel_vgt_deballoon(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4ff3807..08699fa 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -294,6 +294,7 @@ struct i915_vma_work {
 	struct dma_fence_work base;
 	struct i915_vma *vma;
 	struct drm_i915_gem_object *pinned;
+	struct i915_sw_dma_fence_cb cb;
 	enum i915_cache_level cache_level;
 	unsigned int flags;
 };
@@ -339,6 +340,25 @@ struct i915_vma_work *i915_vma_work(void)
 	return vw;
 }
 
+int i915_vma_wait_for_bind(struct i915_vma *vma)
+{
+	int err = 0;
+
+	if (rcu_access_pointer(vma->active.excl.fence)) {
+		struct dma_fence *fence;
+
+		rcu_read_lock();
+		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
+		rcu_read_unlock();
+		if (fence) {
+			err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
+			dma_fence_put(fence);
+		}
+	}
+
+	return err;
+}
+
 /**
  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
  * @vma: VMA to map
@@ -386,6 +406,8 @@ int i915_vma_bind(struct i915_vma *vma,
 
 	trace_i915_vma_bind(vma, bind_flags);
 	if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+		struct dma_fence *prev;
+
 		work->vma = vma;
 		work->cache_level = cache_level;
 		work->flags = bind_flags | I915_VMA_ALLOC;
@@ -399,8 +421,14 @@ int i915_vma_bind(struct i915_vma *vma,
 		 * part of the obj->resv->excl_fence as it only affects
 		 * execution and not content or object's backing store lifetime.
 		 */
-		GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
-		i915_active_set_exclusive(&vma->active, &work->base.dma);
+		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
+		if (prev) {
+			__i915_sw_fence_await_dma_fence(&work->base.chain,
+							prev,
+							&work->cb);
+			dma_fence_put(prev);
+		}
+
 		work->base.dma.error = 0; /* enable the queue_work() */
 
 		if (vma->obj) {
@@ -408,7 +436,6 @@ int i915_vma_bind(struct i915_vma *vma,
 			work->pinned = vma->obj;
 		}
 	} else {
-		GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
 		ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
 		if (ret)
 			return ret;
@@ -614,7 +641,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	u64 start, end;
 	int ret;
 
-	GEM_BUG_ON(i915_vma_is_closed(vma));
 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
@@ -892,6 +918,11 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	if (err)
 		goto err_fence;
 
+	if (unlikely(i915_vma_is_closed(vma))) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
 	bound = atomic_read(&vma->flags);
 	if (unlikely(bound & I915_VMA_ERROR)) {
 		err = -ENOMEM;
@@ -977,8 +1008,14 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
 
 	do {
 		err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
-		if (err != -ENOSPC)
+		if (err != -ENOSPC) {
+			if (!err) {
+				err = i915_vma_wait_for_bind(vma);
+				if (err)
+					i915_vma_unpin(vma);
+			}
 			return err;
+		}
 
 		/* Unlike i915_vma_pin, we don't take no for an answer! */
 		flush_idle_contexts(vm->gt);
@@ -1060,6 +1097,7 @@ void i915_vma_release(struct kref *ref)
 void i915_vma_parked(struct intel_gt *gt)
 {
 	struct i915_vma *vma, *next;
+	LIST_HEAD(closed);
 
 	spin_lock_irq(&gt->closed_lock);
 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
@@ -1071,28 +1109,26 @@ void i915_vma_parked(struct intel_gt *gt)
 		if (!kref_get_unless_zero(&obj->base.refcount))
 			continue;
 
-		if (i915_vm_tryopen(vm)) {
-			list_del_init(&vma->closed_link);
-		} else {
+		if (!i915_vm_tryopen(vm)) {
 			i915_gem_object_put(obj);
-			obj = NULL;
+			continue;
 		}
 
-		spin_unlock_irq(&gt->closed_lock);
-
-		if (obj) {
-			__i915_vma_put(vma);
-			i915_gem_object_put(obj);
-		}
-
-		i915_vm_close(vm);
-
-		/* Restart after dropping lock */
-		spin_lock_irq(&gt->closed_lock);
-		next = list_first_entry(&gt->closed_vma,
-					typeof(*next), closed_link);
+		list_move(&vma->closed_link, &closed);
 	}
 	spin_unlock_irq(&gt->closed_lock);
+
+	/* As the GT is held idle, no vma can be reopened as we destroy them */
+	list_for_each_entry_safe(vma, next, &closed, closed_link) {
+		struct drm_i915_gem_object *obj = vma->obj;
+		struct i915_address_space *vm = vma->vm;
+
+		INIT_LIST_HEAD(&vma->closed_link);
+		__i915_vma_put(vma);
+
+		i915_gem_object_put(obj);
+		i915_vm_close(vm);
+	}
 }
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -1136,7 +1172,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
 
 	/* Wait for the vma to be bound before we start! */
-	err = i915_request_await_active(rq, &vma->active);
+	err = i915_request_await_active(rq, &vma->active, 0);
 	if (err)
 		return err;
 
@@ -1228,9 +1264,15 @@ int __i915_vma_unbind(struct i915_vma *vma)
 		 * before the unbind, other due to non-strict nature of those
 		 * indirect writes they may end up referencing the GGTT PTE
 		 * after the unbind.
+		 *
+		 * Note that we may be concurrently poking at the GGTT_WRITE
+		 * bit from set-domain, as we mark all GGTT vma associated
+		 * with an object. We know this is for another vma, as we
+		 * are currently unbinding this one -- so if this vma will be
+		 * reused, it will be refaulted and have its dirty bit set
+		 * before the next write.
 		 */
 		i915_vma_flush_writes(vma);
-		GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
 
 		/* release the fence reg _after_ flushing */
 		ret = i915_vma_revoke_fence(vma);
@@ -1250,7 +1292,8 @@ int __i915_vma_unbind(struct i915_vma *vma)
 		trace_i915_vma_unbind(vma);
 		vma->ops->unbind_vma(vma);
 	}
-	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
+	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
+		   &vma->flags);
 
 	i915_vma_detach(vma);
 	vma_unbind_pages(vma);
@@ -1272,16 +1315,21 @@ int i915_vma_unbind(struct i915_vma *vma)
 		/* XXX not always required: nop_clear_range */
 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
 
+	/* Optimistic wait before taking the mutex */
+	err = i915_vma_sync(vma);
+	if (err)
+		goto out_rpm;
+
 	err = mutex_lock_interruptible(&vm->mutex);
 	if (err)
-		return err;
+		goto out_rpm;
 
 	err = __i915_vma_unbind(vma);
 	mutex_unlock(&vm->mutex);
 
+out_rpm:
 	if (wakeref)
 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 02b31a6..e1ced1d 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -375,6 +375,8 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
 void i915_vma_make_shrinkable(struct i915_vma *vma);
 void i915_vma_make_purgeable(struct i915_vma *vma);
 
+int i915_vma_wait_for_bind(struct i915_vma *vma);
+
 static inline int i915_vma_sync(struct i915_vma *vma)
 {
 	/* Wait for the asynchronous bindings and pending GPU reads */
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index e0942ef..63831cd 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -273,21 +273,10 @@ struct i915_vma {
 	struct rb_node obj_node;
 	struct hlist_node obj_hash;
 
-	/** This vma's place in the execbuf reservation list */
-	struct list_head exec_link;
-	struct list_head reloc_link;
-
 	/** This vma's place in the eviction list */
 	struct list_head evict_link;
 
 	struct list_head closed_link;
-
-	/**
-	 * Used for performing relocations during execbuffer insertion.
-	 */
-	unsigned int *exec_flags;
-	struct hlist_node exec_node;
-	u32 exec_handle;
 };
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 6670a07..d7fe127 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -23,7 +23,9 @@
  */
 
 #include <drm/drm_print.h>
+#include <drm/i915_pciids.h>
 
+#include "display/intel_cdclk.h"
 #include "intel_device_info.h"
 #include "i915_drv.h"
 
@@ -132,6 +134,7 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
 {
 	sseu_dump(&info->sseu, p);
 
+	drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
 	drm_printf(p, "CS timestamp frequency: %u kHz\n",
 		   info->cs_timestamp_frequency_khz);
 }
@@ -743,7 +746,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
 		 *      hclks." (through the “Clocking Configuration”
 		 *      (“CLKCFG”) MCHBAR register)
 		 */
-		return dev_priv->rawclk_freq / 16;
+		return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
 	} else if (INTEL_GEN(dev_priv) <= 8) {
 		/* PRMs say:
 		 *
@@ -974,10 +977,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 		    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
 		    (HAS_PCH_CPT(dev_priv) &&
 		     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
-			DRM_INFO("Display fused off, disabling\n");
+			drm_info(&dev_priv->drm,
+				 "Display fused off, disabling\n");
 			info->pipe_mask = 0;
 		} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
-			DRM_INFO("PipeC fused off\n");
+			drm_info(&dev_priv->drm, "PipeC fused off\n");
 			info->pipe_mask &= ~BIT(PIPE_C);
 		}
 	} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
@@ -1000,8 +1004,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 		 * in the mask.
 		 */
 		if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
-			DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
-				  enabled_mask);
+			drm_err(&dev_priv->drm,
+				"invalid pipe fuse configuration: enabled_mask=0x%x\n",
+				enabled_mask);
 		else
 			info->pipe_mask = enabled_mask;
 
@@ -1036,12 +1041,26 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
 		gen12_sseu_info_init(dev_priv);
 
 	if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
-		DRM_INFO("Disabling ppGTT for VT-d support\n");
+		drm_info(&dev_priv->drm,
+			 "Disabling ppGTT for VT-d support\n");
 		info->ppgtt_type = INTEL_PPGTT_NONE;
 	}
 
+	runtime->rawclk_freq = intel_read_rawclk(dev_priv);
+	drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
+
 	/* Initialize command stream timestamp frequency */
-	runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+	runtime->cs_timestamp_frequency_khz =
+		read_timestamp_frequency(dev_priv);
+	if (runtime->cs_timestamp_frequency_khz) {
+		runtime->cs_timestamp_period_ns =
+			div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+		drm_dbg(&dev_priv->drm,
+			"CS timestamp wraparound in %lldms\n",
+			div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
+					    S32_MAX),
+				USEC_PER_SEC));
+	}
 }
 
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -1084,7 +1103,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 
 		if (!(BIT(i) & vdbox_mask)) {
 			info->engine_mask &= ~BIT(_VCS(i));
-			DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+			drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
 			continue;
 		}
 
@@ -1096,8 +1115,8 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 		if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
 			RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
 	}
-	DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
-			 vdbox_mask, VDBOX_MASK(dev_priv));
+	drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
+		vdbox_mask, VDBOX_MASK(dev_priv));
 	GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
 
 	for (i = 0; i < I915_MAX_VECS; i++) {
@@ -1108,10 +1127,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
 
 		if (!(BIT(i) & vebox_mask)) {
 			info->engine_mask &= ~BIT(_VECS(i));
-			DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+			drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
 		}
 	}
-	DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
-			 vebox_mask, VEBOX_MASK(dev_priv));
+	drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
+		vebox_mask, VEBOX_MASK(dev_priv));
 	GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
 }
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2725cb7..1ecb9df 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -180,6 +180,7 @@ struct intel_device_info {
 	} display;
 
 	u16 ddb_size; /* in blocks */
+	u8 num_supported_dbuf_slices; /* number of DBuf slices */
 
 	/* Register offsets for the various display pipes and transcoders */
 	int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -215,7 +216,10 @@ struct intel_runtime_info {
 	/* Slice/subslice/EU info */
 	struct sseu_dev_info sseu;
 
+	u32 rawclk_freq;
+
 	u32 cs_timestamp_frequency_khz;
+	u32 cs_timestamp_period_ns;
 
 	/* Media engine access to SFC per instance */
 	u8 vdbox_sfc_access;
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
new file mode 100644
index 0000000..6b922ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_dram.h"
+
+struct dram_dimm_info {
+	u8 size, width, ranks;
+};
+
+struct dram_channel_info {
+	struct dram_dimm_info dimm_l, dimm_s;
+	u8 ranks;
+	bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+	static const char * const str[] = {
+		DRAM_TYPE_STR(UNKNOWN),
+		DRAM_TYPE_STR(DDR3),
+		DRAM_TYPE_STR(DDR4),
+		DRAM_TYPE_STR(LPDDR3),
+		DRAM_TYPE_STR(LPDDR4),
+	};
+
+	if (type >= ARRAY_SIZE(str))
+		type = INTEL_DRAM_UNKNOWN;
+
+	return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+	return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+	return val & SKL_DRAM_SIZE_MASK;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+	if (skl_get_dimm_size(val) == 0)
+		return 0;
+
+	switch (val & SKL_DRAM_WIDTH_MASK) {
+	case SKL_DRAM_WIDTH_X8:
+	case SKL_DRAM_WIDTH_X16:
+	case SKL_DRAM_WIDTH_X32:
+		val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+		return 8 << val;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+	if (skl_get_dimm_size(val) == 0)
+		return 0;
+
+	val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+	return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+	return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+	if (cnl_get_dimm_size(val) == 0)
+		return 0;
+
+	switch (val & CNL_DRAM_WIDTH_MASK) {
+	case CNL_DRAM_WIDTH_X8:
+	case CNL_DRAM_WIDTH_X16:
+	case CNL_DRAM_WIDTH_X32:
+		val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+		return 8 << val;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+	if (cnl_get_dimm_size(val) == 0)
+		return 0;
+
+	val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+	return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+	/* Convert total GB to Gb per DRAM device */
+	return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+		       struct dram_dimm_info *dimm,
+		       int channel, char dimm_name, u16 val)
+{
+	if (INTEL_GEN(i915) >= 10) {
+		dimm->size = cnl_get_dimm_size(val);
+		dimm->width = cnl_get_dimm_width(val);
+		dimm->ranks = cnl_get_dimm_ranks(val);
+	} else {
+		dimm->size = skl_get_dimm_size(val);
+		dimm->width = skl_get_dimm_width(val);
+		dimm->ranks = skl_get_dimm_ranks(val);
+	}
+
+	drm_dbg_kms(&i915->drm,
+		    "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+		    channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+		    yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+			  struct dram_channel_info *ch,
+			  int channel, u32 val)
+{
+	skl_dram_get_dimm_info(i915, &ch->dimm_l,
+			       channel, 'L', val & 0xffff);
+	skl_dram_get_dimm_info(i915, &ch->dimm_s,
+			       channel, 'S', val >> 16);
+
+	if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+		drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+		return -EINVAL;
+	}
+
+	if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+		ch->ranks = 2;
+	else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+		ch->ranks = 2;
+	else
+		ch->ranks = 1;
+
+	ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+		skl_is_16gb_dimm(&ch->dimm_s);
+
+	drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+		    channel, ch->ranks, yesno(ch->is_16gb_dimm));
+
+	return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+			const struct dram_channel_info *ch1)
+{
+	return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+		(ch0->dimm_s.size == 0 ||
+		 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+	struct dram_info *dram_info = &i915->dram_info;
+	struct dram_channel_info ch0 = {}, ch1 = {};
+	u32 val;
+	int ret;
+
+	val = intel_uncore_read(&i915->uncore,
+				SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+	ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+	if (ret == 0)
+		dram_info->num_channels++;
+
+	val = intel_uncore_read(&i915->uncore,
+				SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+	ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+	if (ret == 0)
+		dram_info->num_channels++;
+
+	if (dram_info->num_channels == 0) {
+		drm_info(&i915->drm, "Number of memory channels is zero\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * If any of the channel is single rank channel, worst case output
+	 * will be same as if single rank memory, so consider single rank
+	 * memory.
+	 */
+	if (ch0.ranks == 1 || ch1.ranks == 1)
+		dram_info->ranks = 1;
+	else
+		dram_info->ranks = max(ch0.ranks, ch1.ranks);
+
+	if (dram_info->ranks == 0) {
+		drm_info(&i915->drm, "couldn't get memory rank information\n");
+		return -EINVAL;
+	}
+
+	dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+	dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+	drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+		    yesno(dram_info->symmetric_memory));
+
+	return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+	u32 val;
+
+	val = intel_uncore_read(&i915->uncore,
+				SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+	switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+	case SKL_DRAM_DDR_TYPE_DDR3:
+		return INTEL_DRAM_DDR3;
+	case SKL_DRAM_DDR_TYPE_DDR4:
+		return INTEL_DRAM_DDR4;
+	case SKL_DRAM_DDR_TYPE_LPDDR3:
+		return INTEL_DRAM_LPDDR3;
+	case SKL_DRAM_DDR_TYPE_LPDDR4:
+		return INTEL_DRAM_LPDDR4;
+	default:
+		MISSING_CASE(val);
+		return INTEL_DRAM_UNKNOWN;
+	}
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+	struct dram_info *dram_info = &i915->dram_info;
+	u32 mem_freq_khz, val;
+	int ret;
+
+	dram_info->type = skl_get_dram_type(i915);
+	drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+		    intel_dram_type_str(dram_info->type));
+
+	ret = skl_dram_get_channels_info(i915);
+	if (ret)
+		return ret;
+
+	val = intel_uncore_read(&i915->uncore,
+				SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+	mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+				    SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+	dram_info->bandwidth_kbps = dram_info->num_channels *
+		mem_freq_khz * 8;
+
+	if (dram_info->bandwidth_kbps == 0) {
+		drm_info(&i915->drm,
+			 "Couldn't get system memory bandwidth\n");
+		return -EINVAL;
+	}
+
+	dram_info->valid = true;
+	return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+	switch (val & BXT_DRAM_SIZE_MASK) {
+	case BXT_DRAM_SIZE_4GBIT:
+		return 4;
+	case BXT_DRAM_SIZE_6GBIT:
+		return 6;
+	case BXT_DRAM_SIZE_8GBIT:
+		return 8;
+	case BXT_DRAM_SIZE_12GBIT:
+		return 12;
+	case BXT_DRAM_SIZE_16GBIT:
+		return 16;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+	if (!bxt_get_dimm_size(val))
+		return 0;
+
+	val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+	return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+	if (!bxt_get_dimm_size(val))
+		return 0;
+
+	switch (val & BXT_DRAM_RANK_MASK) {
+	case BXT_DRAM_RANK_SINGLE:
+		return 1;
+	case BXT_DRAM_RANK_DUAL:
+		return 2;
+	default:
+		MISSING_CASE(val);
+		return 0;
+	}
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+	if (!bxt_get_dimm_size(val))
+		return INTEL_DRAM_UNKNOWN;
+
+	switch (val & BXT_DRAM_TYPE_MASK) {
+	case BXT_DRAM_TYPE_DDR3:
+		return INTEL_DRAM_DDR3;
+	case BXT_DRAM_TYPE_LPDDR3:
+		return INTEL_DRAM_LPDDR3;
+	case BXT_DRAM_TYPE_DDR4:
+		return INTEL_DRAM_DDR4;
+	case BXT_DRAM_TYPE_LPDDR4:
+		return INTEL_DRAM_LPDDR4;
+	default:
+		MISSING_CASE(val);
+		return INTEL_DRAM_UNKNOWN;
+	}
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+	dimm->width = bxt_get_dimm_width(val);
+	dimm->ranks = bxt_get_dimm_ranks(val);
+
+	/*
+	 * Size in register is Gb per DRAM device. Convert to total
+	 * GB to match the way we report this for non-LP platforms.
+	 */
+	dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+	struct dram_info *dram_info = &i915->dram_info;
+	u32 dram_channels;
+	u32 mem_freq_khz, val;
+	u8 num_active_channels;
+	int i;
+
+	val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
+	mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+				    BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+	dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+	num_active_channels = hweight32(dram_channels);
+
+	/* Each active bit represents 4-byte channel */
+	dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+	if (dram_info->bandwidth_kbps == 0) {
+		drm_info(&i915->drm,
+			 "Couldn't get system memory bandwidth\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+	 */
+	for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+		struct dram_dimm_info dimm;
+		enum intel_dram_type type;
+
+		val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+		if (val == 0xFFFFFFFF)
+			continue;
+
+		dram_info->num_channels++;
+
+		bxt_get_dimm_info(&dimm, val);
+		type = bxt_get_dimm_type(val);
+
+		drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+			    dram_info->type != INTEL_DRAM_UNKNOWN &&
+			    dram_info->type != type);
+
+		drm_dbg_kms(&i915->drm,
+			    "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+			    i - BXT_D_CR_DRP0_DUNIT_START,
+			    dimm.size, dimm.width, dimm.ranks,
+			    intel_dram_type_str(type));
+
+		/*
+		 * If any of the channel is single rank channel,
+		 * worst case output will be same as if single rank
+		 * memory, so consider single rank memory.
+		 */
+		if (dram_info->ranks == 0)
+			dram_info->ranks = dimm.ranks;
+		else if (dimm.ranks == 1)
+			dram_info->ranks = 1;
+
+		if (type != INTEL_DRAM_UNKNOWN)
+			dram_info->type = type;
+	}
+
+	if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+		drm_info(&i915->drm, "couldn't get memory information\n");
+		return -EINVAL;
+	}
+
+	dram_info->valid = true;
+
+	return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+	struct dram_info *dram_info = &i915->dram_info;
+	int ret;
+
+	/*
+	 * Assume 16Gb DIMMs are present until proven otherwise.
+	 * This is only used for the level 0 watermark latency
+	 * w/a which does not apply to bxt/glk.
+	 */
+	dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+
+	if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+		return;
+
+	if (IS_GEN9_LP(i915))
+		ret = bxt_get_dram_info(i915);
+	else
+		ret = skl_get_dram_info(i915);
+	if (ret)
+		return;
+
+	drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
+		    dram_info->bandwidth_kbps, dram_info->num_channels);
+
+	drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
+		    dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+	static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+	static const u8 sets[4] = { 1, 1, 2, 2 };
+
+	return EDRAM_NUM_BANKS(cap) *
+		ways[EDRAM_WAYS_IDX(cap)] *
+		sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+	u32 edram_cap = 0;
+
+	if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+		return;
+
+	edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+
+	/* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+	if (!(edram_cap & EDRAM_ENABLED))
+		return;
+
+	/*
+	 * The needed capability bits for size calculation are not there with
+	 * pre gen9 so return 128MB always.
+	 */
+	if (INTEL_GEN(i915) < 9)
+		i915->edram_size_mb = 128;
+	else
+		i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+	dev_info(i915->drm.dev,
+		 "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h
new file mode 100644
index 0000000..4ba13c1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 2b6c016..21b9131 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -22,6 +22,7 @@
  */
 
 #include "i915_drv.h"
+#include "i915_vgpu.h"
 #include "intel_gvt.h"
 
 /**
@@ -67,12 +68,13 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
 		return;
 
 	if (intel_vgpu_active(dev_priv)) {
-		DRM_INFO("GVT-g is disabled for guest\n");
+		drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
 		goto bail;
 	}
 
 	if (!is_supported_device(dev_priv)) {
-		DRM_INFO("Unsupported device. GVT-g is disabled\n");
+		drm_info(&dev_priv->drm,
+			 "Unsupported device. GVT-g is disabled\n");
 		goto bail;
 	}
 
@@ -99,18 +101,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
 		return -ENODEV;
 
 	if (!i915_modparams.enable_gvt) {
-		DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+		drm_dbg(&dev_priv->drm,
+			"GVT-g is disabled by kernel params\n");
 		return 0;
 	}
 
-	if (USES_GUC_SUBMISSION(dev_priv)) {
-		DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
+	if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
+		drm_err(&dev_priv->drm,
+			"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
 		return -EIO;
 	}
 
 	ret = intel_gvt_init_device(dev_priv);
 	if (ret) {
-		DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+		drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
 		goto bail;
 	}
 
@@ -121,6 +125,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->gvt;
+}
+
 /**
  * intel_gvt_driver_remove - cleanup GVT components when i915 driver is
  *			     unbinding
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d0d038b..6b5e9d8 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -265,7 +265,9 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
 
 		if (IS_ERR(mem)) {
 			err = PTR_ERR(mem);
-			DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+			drm_err(&i915->drm,
+				"Failed to setup region(%d) type=%d\n",
+				err, type);
 			goto out_cleanup;
 		}
 
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 4ed60e1..20ab9a5 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -13,91 +13,106 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
 	switch (id) {
 	case INTEL_PCH_IBX_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
-		WARN_ON(!IS_GEN(dev_priv, 5));
+		drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5));
 		return PCH_IBX;
 	case INTEL_PCH_CPT_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
-		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
 		return PCH_CPT;
 	case INTEL_PCH_PPT_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
-		WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
 		/* PantherPoint is CPT compatible */
 		return PCH_CPT;
 	case INTEL_PCH_LPT_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
 		return PCH_LPT;
 	case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
 		return PCH_LPT;
 	case INTEL_PCH_WPT_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
 		/* WildcatPoint is LPT compatible */
 		return PCH_LPT;
 	case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
-		WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
-		WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
 		/* WildcatPoint is LPT compatible */
 		return PCH_LPT;
 	case INTEL_PCH_SPT_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
-		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
 		return PCH_SPT;
 	case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
-		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
-			!IS_COFFEELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+			    !IS_COFFEELAKE(dev_priv));
 		return PCH_SPT;
 	case INTEL_PCH_KBP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
-		WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
-			!IS_COFFEELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm,
+			    !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+			    !IS_COFFEELAKE(dev_priv));
 		/* KBP is SPT compatible */
 		return PCH_SPT;
 	case INTEL_PCH_CNP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
-		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+			    !IS_COFFEELAKE(dev_priv));
 		return PCH_CNP;
 	case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm,
 			    "Found Cannon Lake LP PCH (CNP-LP)\n");
-		WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+			    !IS_COFFEELAKE(dev_priv));
 		return PCH_CNP;
 	case INTEL_PCH_CMP_DEVICE_ID_TYPE:
 	case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
-		WARN_ON(!IS_COFFEELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
 		/* CometPoint is CNP Compatible */
 		return PCH_CNP;
 	case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
-		WARN_ON(!IS_COFFEELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
 		/* Comet Lake V PCH is based on KBP, which is SPT compatible */
 		return PCH_SPT;
 	case INTEL_PCH_ICP_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
-		WARN_ON(!IS_ICELAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
 		return PCH_ICP;
 	case INTEL_PCH_MCC_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
-		WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
 		return PCH_MCC;
 	case INTEL_PCH_TGP_DEVICE_ID_TYPE:
 	case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
-		WARN_ON(!IS_TIGERLAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv));
 		return PCH_TGP;
 	case INTEL_PCH_JSP_DEVICE_ID_TYPE:
 	case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
 		drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
-		WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+		drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
 		return PCH_JSP;
 	default:
 		return PCH_NONE;
@@ -188,7 +203,8 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
 			pch_type = intel_pch_type(dev_priv, id);
 
 			/* Sanity check virtual PCH id */
-			if (WARN_ON(id && pch_type == PCH_NONE))
+			if (drm_WARN_ON(&dev_priv->drm,
+					id && pch_type == PCH_NONE))
 				id = 0;
 
 			dev_priv->pch_type = pch_type;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index bd2d30e..8375054 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -40,12 +40,36 @@
 #include "gt/intel_llc.h"
 
 #include "i915_drv.h"
+#include "i915_fixed.h"
 #include "i915_irq.h"
 #include "i915_trace.h"
 #include "intel_pm.h"
 #include "intel_sideband.h"
 #include "../../../platform/x86/intel_ips.h"
 
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+	bool x_tiled, y_tiled;
+	bool rc_surface;
+	bool is_planar;
+	u32 width;
+	u8 cpp;
+	u32 plane_pixel_rate;
+	u32 y_min_scanlines;
+	u32 plane_bytes_per_line;
+	uint_fixed_16_16_t plane_blocks_per_line;
+	uint_fixed_16_16_t y_tile_minimum;
+	u32 linetime_us;
+	u32 dbuf_block_size;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+	unsigned int num_pipes_active;
+	bool sprites_enabled;
+	bool sprites_scaled;
+};
+
 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	if (HAS_LLC(dev_priv)) {
@@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
 	 */
 	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
 		   PWM1_GATING_DIS | PWM2_GATING_DIS);
-
-	/* WaDDIIOTimeout:glk */
-	if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
-		u32 val = I915_READ(CHICKEN_MISC_2);
-		val &= ~(GLK_CL0_PWR_DOWN |
-			 GLK_CL1_PWR_DOWN |
-			 GLK_CL2_PWR_DOWN);
-		I915_WRITE(CHICKEN_MISC_2, val);
-	}
-
 }
 
 static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -469,9 +483,9 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
 	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
 	enum pipe pipe = crtc->pipe;
 	int sprite0_start, sprite1_start;
+	u32 dsparb, dsparb2, dsparb3;
 
 	switch (pipe) {
-		u32 dsparb, dsparb2, dsparb3;
 	case PIPE_A:
 		dsparb = I915_READ(DSPARB);
 		dsparb2 = I915_READ(DSPARB2);
@@ -1969,6 +1983,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 	const struct vlv_fifo_state *fifo_state =
 		&crtc_state->wm.vlv.fifo_state;
 	int sprite0_start, sprite1_start, fifo_size;
+	u32 dsparb, dsparb2, dsparb3;
 
 	if (!crtc_state->fifo_changed)
 		return;
@@ -1977,8 +1992,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 	sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
 	fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
 
-	WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
-	WARN_ON(fifo_size != 511);
+	drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+	drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
 
 	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
 
@@ -1994,7 +2009,6 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
 	spin_lock(&uncore->lock);
 
 	switch (crtc->pipe) {
-		u32 dsparb, dsparb2, dsparb3;
 	case PIPE_A:
 		dsparb = intel_uncore_read_fw(uncore, DSPARB);
 		dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
@@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level,
 }
 
 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
-				 const struct intel_crtc *intel_crtc,
+				 const struct intel_crtc *crtc,
 				 int level,
 				 struct intel_crtc_state *crtc_state,
 				 const struct intel_plane_state *pristate,
@@ -2810,34 +2824,6 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 	result->enable = true;
 }
 
-static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
-	const struct intel_atomic_state *intel_state =
-		to_intel_atomic_state(crtc_state->uapi.state);
-	const struct drm_display_mode *adjusted_mode =
-		&crtc_state->hw.adjusted_mode;
-	u32 linetime, ips_linetime;
-
-	if (!crtc_state->hw.active)
-		return 0;
-	if (WARN_ON(adjusted_mode->crtc_clock == 0))
-		return 0;
-	if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
-		return 0;
-
-	/* The WM are computed with base on how long it takes to fill a single
-	 * row at the given clock rate, multiplied by 8.
-	 * */
-	linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
-				     adjusted_mode->crtc_clock);
-	ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
-					 intel_state->cdclk.logical.cdclk);
-
-	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
-	       PIPE_WM_LINETIME_TIME(linetime);
-}
-
 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 				  u16 wm[8])
 {
@@ -3135,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
 static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
 	struct intel_pipe_wm *pipe_wm;
 	struct intel_plane *plane;
 	const struct intel_plane_state *plane_state;
@@ -3175,12 +3161,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 		usable_level = 0;
 
 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
-	ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
+	ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
 			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
-
 	if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
 		return -EINVAL;
 
@@ -3189,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 	for (level = 1; level <= usable_level; level++) {
 		struct intel_wm_level *wm = &pipe_wm->wm[level];
 
-		ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
+		ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
 				     pristate, sprstate, curstate, wm);
 
 		/*
@@ -3417,7 +3400,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
 		 * level is disabled. Doing otherwise could cause underruns.
 		 */
 		if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
-			WARN_ON(wm_lp != 1);
+			drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
 			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
 		} else
 			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
@@ -3426,14 +3409,12 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
 	/* LP0 register values */
 	for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
 		enum pipe pipe = intel_crtc->pipe;
-		const struct intel_wm_level *r =
-			&intel_crtc->wm.active.ilk.wm[0];
+		const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
+		const struct intel_wm_level *r = &pipe_wm->wm[0];
 
-		if (WARN_ON(!r->enable))
+		if (drm_WARN_ON(&dev_priv->drm, !r->enable))
 			continue;
 
-		results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
-
 		results->wm_pipe[pipe] =
 			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
 			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
@@ -3472,7 +3453,6 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
 
 /* dirty bits used to track which watermarks need changes */
 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
 #define WM_DIRTY_FBC (1 << 24)
@@ -3487,12 +3467,6 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
 	int wm_lp;
 
 	for_each_pipe(dev_priv, pipe) {
-		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
-			dirty |= WM_DIRTY_LINETIME(pipe);
-			/* Must disable LP1+ watermarks too */
-			dirty |= WM_DIRTY_LP_ALL;
-		}
-
 		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
 			dirty |= WM_DIRTY_PIPE(pipe);
 			/* Must disable LP1+ watermarks too */
@@ -3584,13 +3558,6 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 	if (dirty & WM_DIRTY_PIPE(PIPE_C))
 		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
-	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
-		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
-	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
-		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
-	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
-		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
-
 	if (dirty & WM_DIRTY_DDB) {
 		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 			val = I915_READ(WM_MISC);
@@ -3644,26 +3611,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
 {
-	u8 enabled_slices;
+	int i;
+	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+	u8 enabled_slices_mask = 0;
 
-	/* Slice 1 will always be enabled */
-	enabled_slices = 1;
+	for (i = 0; i < max_slices; i++) {
+		if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+			enabled_slices_mask |= BIT(i);
+	}
 
-	/* Gen prior to GEN11 have only one DBuf slice */
-	if (INTEL_GEN(dev_priv) < 11)
-		return enabled_slices;
-
-	/*
-	 * FIXME: for now we'll only ever use 1 slice; pretend that we have
-	 * only that 1 slice enabled until we have a proper way for on-demand
-	 * toggling of the second slice.
-	 */
-	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
-		enabled_slices++;
-
-	return enabled_slices;
+	return enabled_slices_mask;
 }
 
 /*
@@ -3864,47 +3823,46 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
 	return true;
 }
 
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
-			      const struct intel_crtc_state *crtc_state,
-			      const u64 total_data_rate,
-			      const int num_active,
-			      struct skl_ddb_allocation *ddb)
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static unsigned int
+icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+				u32 slice_size,
+				u32 ddb_size)
 {
-	const struct drm_display_mode *adjusted_mode;
-	u64 total_data_bw;
+	unsigned int offset = 0;
+
+	if (!dbuf_slice_mask)
+		return 0;
+
+	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+
+	WARN_ON(offset >= ddb_size);
+	return offset;
+}
+
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
+{
 	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
 
-	WARN_ON(ddb_size == 0);
+	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
 
 	if (INTEL_GEN(dev_priv) < 11)
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	adjusted_mode = &crtc_state->hw.adjusted_mode;
-	total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
-
-	/*
-	 * 12GB/s is maximum BW supported by single DBuf slice.
-	 *
-	 * FIXME dbuf slice code is broken:
-	 * - must wait for planes to stop using the slice before powering it off
-	 * - plane straddling both slices is illegal in multi-pipe scenarios
-	 * - should validate we stay within the hw bandwidth limits
-	 */
-	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
-		ddb->enabled_slices = 2;
-	} else {
-		ddb->enabled_slices = 1;
-		ddb_size /= 2;
-	}
-
 	return ddb_size;
 }
 
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+				  u8 active_pipes);
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   const struct intel_crtc_state *crtc_state,
 				   const u64 total_data_rate,
-				   struct skl_ddb_allocation *ddb,
 				   struct skl_ddb_entry *alloc, /* out */
 				   int *num_active /* out */)
 {
@@ -3912,12 +3870,19 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
 	const struct intel_crtc *crtc;
-	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
 	u16 ddb_size;
+	u32 ddb_range_size;
 	u32 i;
+	u32 dbuf_slice_mask;
+	u32 active_pipes;
+	u32 offset;
+	u32 slice_size;
+	u32 total_slice_mask;
+	u32 start, end;
 
-	if (WARN_ON(!state) || !crtc_state->hw.active) {
+	if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
 		alloc->start = 0;
 		alloc->end = 0;
 		*num_active = hweight8(dev_priv->active_pipes);
@@ -3925,12 +3890,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	}
 
 	if (intel_state->active_pipe_changes)
-		*num_active = hweight8(intel_state->active_pipes);
+		active_pipes = intel_state->active_pipes;
 	else
-		*num_active = hweight8(dev_priv->active_pipes);
+		active_pipes = dev_priv->active_pipes;
 
-	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
-				      *num_active, ddb);
+	*num_active = hweight8(active_pipes);
+
+	ddb_size = intel_get_ddb_size(dev_priv);
+
+	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
 
 	/*
 	 * If the state doesn't change the active CRTC's or there is no
@@ -3950,30 +3918,95 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	}
 
 	/*
+	 * Get allowed DBuf slices for correspondent pipe and platform.
+	 */
+	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
+
+	DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
+		      dbuf_slice_mask,
+		      pipe_name(for_pipe), active_pipes);
+
+	/*
+	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+	 * and slice size is 1024, the offset would be 1024
+	 */
+	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+						 slice_size, ddb_size);
+
+	/*
+	 * Figure out total size of allowed DBuf slices, which is basically
+	 * a number of allowed slices for that pipe multiplied by slice size.
+	 * Inside of this
+	 * range ddb entries are still allocated in proportion to display width.
+	 */
+	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
+
+	/*
 	 * Watermark/ddb requirement highly depends upon width of the
 	 * framebuffer, So instead of allocating DDB equally among pipes
 	 * distribute DDB based on resolution/width of the display.
 	 */
+	total_slice_mask = dbuf_slice_mask;
 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
 		const struct drm_display_mode *adjusted_mode =
 			&crtc_state->hw.adjusted_mode;
 		enum pipe pipe = crtc->pipe;
 		int hdisplay, vdisplay;
+		u32 pipe_dbuf_slice_mask;
 
-		if (!crtc_state->hw.enable)
+		if (!crtc_state->hw.active)
+			continue;
+
+		pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
+							       active_pipes);
+
+		/*
+		 * According to BSpec pipe can share one dbuf slice with another
+		 * pipes or pipe can use multiple dbufs, in both cases we
+		 * account for other pipes only if they have exactly same mask.
+		 * However we need to account how many slices we should enable
+		 * in total.
+		 */
+		total_slice_mask |= pipe_dbuf_slice_mask;
+
+		/*
+		 * Do not account pipes using other slice sets
+		 * luckily as of current BSpec slice sets do not partially
+		 * intersect(pipes share either same one slice or same slice set
+		 * i.e no partial intersection), so it is enough to check for
+		 * equality for now.
+		 */
+		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
 			continue;
 
 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
-		total_width += hdisplay;
+
+		total_width_in_range += hdisplay;
 
 		if (pipe < for_pipe)
-			width_before_pipe += hdisplay;
+			width_before_pipe_in_range += hdisplay;
 		else if (pipe == for_pipe)
 			pipe_width = hdisplay;
 	}
 
-	alloc->start = ddb_size * width_before_pipe / total_width;
-	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+	/*
+	 * FIXME: For now we always enable slice S1 as per
+	 * the Bspec display initialization sequence.
+	 */
+	intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+
+	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
+	end = ddb_range_size *
+		(width_before_pipe_in_range + pipe_width) / total_width_in_range;
+
+	alloc->start = offset + start;
+	alloc->end = offset + end;
+
+	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+		      alloc->start, alloc->end);
+	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
+		      intel_state->enabled_dbuf_slices_mask,
+		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
 }
 
 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4002,7 +4035,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
 				    DRM_FORMAT_MOD_LINEAR,
 				    DRM_MODE_ROTATE_0,
 				    crtc_state->pixel_rate, &wp, 0);
-	WARN_ON(ret);
+	drm_WARN_ON(&dev_priv->drm, ret);
 
 	for (level = 0; level <= max_level; level++) {
 		skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
@@ -4091,10 +4124,10 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 	intel_display_power_put(dev_priv, power_domain, wakeref);
 }
 
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
-			  struct skl_ddb_allocation *ddb /* out */)
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
 {
-	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+	dev_priv->enabled_dbuf_slices_mask =
+				intel_enabled_dbuf_slices_mask(dev_priv);
 }
 
 /*
@@ -4144,6 +4177,254 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
 	return mul_fixed16(downscale_w, downscale_h);
 }
 
+struct dbuf_slice_conf_entry {
+	u8 active_pipes;
+	u8 dbuf_mask[I915_MAX_PIPES];
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+	{
+		.active_pipes = BIT(PIPE_A),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+	{
+		.active_pipes = BIT(PIPE_A),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S2),
+			[PIPE_B] = BIT(DBUF_S1),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_C] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+			[PIPE_D] = BIT(DBUF_S2),
+		},
+	},
+	{}
+};
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+			      const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+	int i;
+
+	for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+		if (dbuf_slices[i].active_pipes == active_pipes)
+			return dbuf_slices[i].dbuf_mask[pipe];
+	}
+	return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+	/*
+	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+	 * required calculating "pipe ratio" in order to determine
+	 * if one or two slices can be used for single pipe configurations
+	 * as additional constraint to the existing table.
+	 * However based on recent info, it should be not "pipe ratio"
+	 * but rather ratio between pixel_rate and cdclk with additional
+	 * constants, so for now we are using only table until this is
+	 * clarified. Also this is the reason why crtc_state param is
+	 * still here - we will need it once those additional constraints
+	 * pop up.
+	 */
+	return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+	return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+				  u8 active_pipes)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+
+	if (IS_GEN(dev_priv, 12))
+		return tgl_compute_dbuf_slices(pipe, active_pipes);
+	else if (IS_GEN(dev_priv, 11))
+		return icl_compute_dbuf_slices(pipe, active_pipes);
+	/*
+	 * For anything else just return one slice yet.
+	 * Should be extended for other platforms.
+	 */
+	return BIT(DBUF_S1);
+}
+
 static u64
 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state,
@@ -4195,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 				 u64 *plane_data_rate,
 				 u64 *uv_plane_data_rate)
 {
-	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct intel_plane *plane;
 	const struct intel_plane_state *plane_state;
 	u64 total_data_rate = 0;
 
-	if (WARN_ON(!state))
-		return 0;
-
 	/* Calculate and cache data rate for each plane */
 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
 		enum plane_id plane_id = plane->id;
@@ -4230,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 	const struct intel_plane_state *plane_state;
 	u64 total_data_rate = 0;
 
-	if (WARN_ON(!crtc_state->uapi.state))
-		return 0;
-
 	/* Calculate and cache data rate for each plane */
 	intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
 		enum plane_id plane_id = plane->id;
@@ -4271,13 +4545,10 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 }
 
 static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
-		      struct skl_ddb_allocation *ddb /* out */)
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
 {
-	struct drm_atomic_state *state = crtc_state->uapi.state;
-	struct drm_crtc *crtc = crtc_state->uapi.crtc;
-	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
 	u16 alloc_size, start = 0;
 	u16 total[I915_MAX_PLANES] = {};
@@ -4294,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
 	memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
 
-	if (WARN_ON(!state))
-		return 0;
-
 	if (!crtc_state->hw.active) {
 		alloc->start = alloc->end = 0;
 		return 0;
@@ -4314,7 +4582,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 
 
 	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
-					   ddb, alloc, &num_active);
+					   alloc, &num_active);
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0)
 		return 0;
@@ -4335,13 +4603,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 */
 	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
 		blocks = 0;
-		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+		for_each_plane_id_on_crtc(crtc, plane_id) {
 			const struct skl_plane_wm *wm =
 				&crtc_state->wm.skl.optimal.planes[plane_id];
 
 			if (plane_id == PLANE_CURSOR) {
 				if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
-					WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
+					drm_WARN_ON(&dev_priv->drm,
+						    wm->wm[level].min_ddb_alloc != U16_MAX);
 					blocks = U32_MAX;
 					break;
 				}
@@ -4371,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 * watermark level, plus an extra share of the leftover blocks
 	 * proportional to its relative data rate.
 	 */
-	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+	for_each_plane_id_on_crtc(crtc, plane_id) {
 		const struct skl_plane_wm *wm =
 			&crtc_state->wm.skl.optimal.planes[plane_id];
 		u64 rate;
@@ -4406,11 +4675,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 		alloc_size -= extra;
 		total_data_rate -= rate;
 	}
-	WARN_ON(alloc_size != 0 || total_data_rate != 0);
+	drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
 
 	/* Set the actual DDB start/end points for each plane */
 	start = alloc->start;
-	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+	for_each_plane_id_on_crtc(crtc, plane_id) {
 		struct skl_ddb_entry *plane_alloc =
 			&crtc_state->wm.skl.plane_ddb_y[plane_id];
 		struct skl_ddb_entry *uv_plane_alloc =
@@ -4420,7 +4689,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 			continue;
 
 		/* Gen11+ uses a separate plane for UV watermarks */
-		WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+		drm_WARN_ON(&dev_priv->drm,
+			    INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
 
 		/* Leave disabled planes at (0,0) */
 		if (total[plane_id]) {
@@ -4443,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 * that aren't actually possible.
 	 */
 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
-		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+		for_each_plane_id_on_crtc(crtc, plane_id) {
 			struct skl_plane_wm *wm =
 				&crtc_state->wm.skl.optimal.planes[plane_id];
 
@@ -4480,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 * Go back and disable the transition watermark if it turns out we
 	 * don't have enough DDB blocks for it.
 	 */
-	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+	for_each_plane_id_on_crtc(crtc, plane_id) {
 		struct skl_plane_wm *wm =
 			&crtc_state->wm.skl.optimal.planes[plane_id];
 
@@ -4844,45 +5114,36 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
 	}
 }
 
-static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
-	struct drm_atomic_state *state = crtc_state->uapi.state;
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
-	uint_fixed_16_16_t linetime_us;
-	u32 linetime_wm;
-
-	linetime_us = intel_get_linetime_us(crtc_state);
-	linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
-
-	/* Display WA #1135: BXT:ALL GLK:ALL */
-	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
-		linetime_wm /= 2;
-
-	return linetime_wm;
-}
-
 static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
 				      const struct skl_wm_params *wp,
 				      struct skl_plane_wm *wm)
 {
 	struct drm_device *dev = crtc_state->uapi.crtc->dev;
 	const struct drm_i915_private *dev_priv = to_i915(dev);
-	u16 trans_min, trans_y_tile_min;
-	const u16 trans_amount = 10; /* This is configurable amount */
+	u16 trans_min, trans_amount, trans_y_tile_min;
 	u16 wm0_sel_res_b, trans_offset_b, res_blocks;
 
-	/* Transition WM are not recommended by HW team for GEN9 */
-	if (INTEL_GEN(dev_priv) <= 9)
-		return;
-
 	/* Transition WM don't make any sense if ipc is disabled */
 	if (!dev_priv->ipc_enabled)
 		return;
 
-	trans_min = 14;
+	/*
+	 * WaDisableTWM:skl,kbl,cfl,bxt
+	 * Transition WM are not recommended by HW team for GEN9
+	 */
+	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+		return;
+
 	if (INTEL_GEN(dev_priv) >= 11)
 		trans_min = 4;
+	else
+		trans_min = 14;
+
+	/* Display WA #1140: glk,cnl */
+	if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+		trans_amount = 0;
+	else
+		trans_amount = 10; /* This is configurable amount */
 
 	trans_offset_b = trans_min + trans_amount;
 
@@ -4909,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
 		/* WA BUG:1938466 add one block for non y-tile planes */
 		if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
 			res_blocks += 1;
-
 	}
 
 	/*
@@ -5049,8 +5309,6 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
 			return ret;
 	}
 
-	pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
-
 	return 0;
 }
 
@@ -5059,9 +5317,10 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 				const struct skl_ddb_entry *entry)
 {
 	if (entry->end)
-		I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
+		intel_de_write_fw(dev_priv, reg,
+				  (entry->end - 1) << 16 | entry->start);
 	else
-		I915_WRITE_FW(reg, 0);
+		intel_de_write_fw(dev_priv, reg, 0);
 }
 
 static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5077,7 +5336,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
 	val |= level->plane_res_b;
 	val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
 
-	I915_WRITE_FW(reg, val);
+	intel_de_write_fw(dev_priv, reg, val);
 }
 
 void skl_write_plane_wm(struct intel_plane *plane,
@@ -5153,31 +5412,18 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
 	int level, max_level = ilk_wm_max_level(dev_priv);
 
 	for (level = 0; level <= max_level; level++) {
-		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
-		    !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+		/*
+		 * We don't check uv_wm as the hardware doesn't actually
+		 * use it. It only gets used for calculating the required
+		 * ddb allocation.
+		 */
+		if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
 			return false;
 	}
 
 	return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
 }
 
-static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
-			       const struct skl_pipe_wm *wm1,
-			       const struct skl_pipe_wm *wm2)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	enum plane_id plane_id;
-
-	for_each_plane_id_on_crtc(crtc, plane_id) {
-		if (!skl_plane_wm_equals(dev_priv,
-					 &wm1->planes[plane_id],
-					 &wm2->planes[plane_id]))
-			return false;
-	}
-
-	return wm1->linetime == wm2->linetime;
-}
-
 static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
 					   const struct skl_ddb_entry *b)
 {
@@ -5231,18 +5477,17 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
 static int
 skl_compute_ddb(struct intel_atomic_state *state)
 {
-	const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc_state *old_crtc_state;
 	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc *crtc;
 	int ret, i;
 
-	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+	state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
+		ret = skl_allocate_pipe_ddb(new_crtc_state);
 		if (ret)
 			return ret;
 
@@ -5439,8 +5684,6 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
 	 * to grab the lock on *all* CRTC's.
 	 */
 	if (state->active_pipe_changes || state->modeset) {
-		state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
-
 		ret = intel_add_all_pipes(state);
 		if (ret)
 			return ret;
@@ -5515,12 +5758,8 @@ skl_compute_wm(struct intel_atomic_state *state)
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc_state *old_crtc_state;
-	struct skl_ddb_values *results = &state->wm_results;
 	int ret, i;
 
-	/* Clear all dirty flags */
-	results->dirty_pipes = 0;
-
 	ret = skl_ddb_add_affected_pipes(state);
 	if (ret)
 		return ret;
@@ -5528,68 +5767,36 @@ skl_compute_wm(struct intel_atomic_state *state)
 	/*
 	 * Calculate WM's for all pipes that are part of this transaction.
 	 * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
-	 * weren't otherwise being modified (and set bits in dirty_pipes) if
-	 * pipe allocations had to change.
+	 * weren't otherwise being modified if pipe allocations had to change.
 	 */
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
 		ret = skl_build_pipe_wm(new_crtc_state);
 		if (ret)
 			return ret;
-
-		ret = skl_wm_add_affected_planes(state, crtc);
-		if (ret)
-			return ret;
-
-		if (!skl_pipe_wm_equals(crtc,
-					&old_crtc_state->wm.skl.optimal,
-					&new_crtc_state->wm.skl.optimal))
-			results->dirty_pipes |= BIT(crtc->pipe);
 	}
 
 	ret = skl_compute_ddb(state);
 	if (ret)
 		return ret;
 
+	/*
+	 * skl_compute_ddb() will have adjusted the final watermarks
+	 * based on how much ddb is available. Now we can actually
+	 * check if the final watermarks changed.
+	 */
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		ret = skl_wm_add_affected_planes(state, crtc);
+		if (ret)
+			return ret;
+	}
+
 	skl_print_wm_changes(state);
 
 	return 0;
 }
 
-static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
-				      struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
-	enum pipe pipe = crtc->pipe;
-
-	if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
-		return;
-
-	I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-}
-
-static void skl_initial_wm(struct intel_atomic_state *state,
-			   struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	const struct intel_crtc_state *crtc_state =
-		intel_atomic_get_new_crtc_state(state, crtc);
-	struct skl_ddb_values *results = &state->wm_results;
-
-	if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
-		return;
-
-	mutex_lock(&dev_priv->wm.wm_mutex);
-
-	if (crtc_state->uapi.active_changed)
-		skl_atomic_update_crtc_wm(state, crtc);
-
-	mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
 				  struct intel_wm_config *config)
 {
@@ -5712,25 +5919,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 
 	if (!crtc->active)
 		return;
-
-	out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
 }
 
 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
-	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
-	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *crtc_state;
 
-	skl_ddb_get_hw_state(dev_priv, ddb);
+	skl_ddb_get_hw_state(dev_priv);
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		crtc_state = to_intel_crtc_state(crtc->base.state);
 
 		skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
-
-		if (crtc->active)
-			hw->dirty_pipes |= BIT(crtc->pipe);
 	}
 
 	if (dev_priv->active_pipes) {
@@ -5754,8 +5954,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
 	};
 
 	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
 	memset(active, 0, sizeof(*active));
 
@@ -5774,7 +5972,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
 		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
 		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
-		active->linetime = hw->wm_linetime[pipe];
 	} else {
 		int level, max_level = ilk_wm_max_level(dev_priv);
 
@@ -6629,20 +6826,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
 		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
 
-	/* WaEnable32PlaneMode:icl */
-	I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
-		   _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
-
-	/*
-	 * Wa_1408615072:icl,ehl  (vsunit)
-	 * Wa_1407596294:icl,ehl  (hsunit)
-	 */
-	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
-			 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
-	/* Wa_1407352427:icl,ehl */
-	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
-			 0, PSDUNIT_CLKGATE_DIS);
+	/*Wa_14010594013:icl, ehl */
+	intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+			 0, CNL_DELAY_PMRSP);
 }
 
 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6650,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
 	u32 vd_pg_enable = 0;
 	unsigned int i;
 
-	/* Wa_1408615072:tgl */
-	intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
-			 0, VSUNIT_CLKGATE_DIS_TGL);
-
 	/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
 	for (i = 0; i < I915_MAX_VCS; i++) {
 		if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -6663,6 +6845,11 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
 
 	I915_WRITE(POWERGATE_ENABLE,
 		   I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+
+	/* Wa_1409825376:tgl (pre-prod)*/
+	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+		I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+			   TGL_VRH_GATING_DIS);
 }
 
 static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7248,8 +7435,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 	/* For FIFO watermark updates */
 	if (INTEL_GEN(dev_priv) >= 9) {
 		skl_setup_wm_latency(dev_priv);
-		dev_priv->display.initial_watermarks = skl_initial_wm;
-		dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
 		dev_priv->display.compute_global_watermarks = skl_compute_wm;
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		ilk_setup_wm_latency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c06c6a8..d60a854 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -17,7 +17,6 @@ struct intel_atomic_state;
 struct intel_crtc;
 struct intel_crtc_state;
 struct intel_plane;
-struct skl_ddb_allocation;
 struct skl_ddb_entry;
 struct skl_pipe_wm;
 struct skl_wm_level;
@@ -33,11 +32,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 			       struct skl_ddb_entry *ddb_y,
 			       struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
-			  struct skl_ddb_allocation *ddb /* out */);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 			      struct skl_pipe_wm *out);
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 0648eda..3f13baa 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -242,8 +242,9 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
 	 * FIXME: There might be some registers where all 1's is a valid value,
 	 * so ideally we should check the register offset instead...
 	 */
-	WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
-	     pipe_name(pipe), reg, val);
+	drm_WARN(&i915->drm, val == 0xffffffff,
+		 "DPIO read pipe %c reg 0x%x == 0x%x\n",
+		 pipe_name(pipe), reg, val);
 
 	return val;
 }
@@ -366,6 +367,10 @@ static inline int gen7_check_mailbox_status(u32 mbox)
 		return -ETIMEDOUT;
 	case GEN7_PCODE_ILLEGAL_DATA:
 		return -EINVAL;
+	case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
+		return -ENXIO;
+	case GEN11_PCODE_LOCKED:
+		return -EBUSY;
 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
 		return -EOVERFLOW;
 	default:
@@ -526,7 +531,7 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
 	 */
 	drm_dbg_kms(&i915->drm,
 		    "PCODE timeout, retrying with preemption disabled\n");
-	WARN_ON_ONCE(timeout_base_ms > 3);
+	drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
 	preempt_disable();
 	ret = wait_for_atomic(COND, 50);
 	preempt_enable();
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 5f2cf6f..abb18b9 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -324,8 +324,9 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
 	 * w/a for a sporadic read returning 0 by waiting for the GT
 	 * thread to wake up.
 	 */
-	WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
-		  "GT thread status wait timed out\n");
+	drm_WARN_ONCE(&uncore->i915->drm,
+		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
+		      "GT thread status wait timed out\n");
 }
 
 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
@@ -441,7 +442,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
 		cond_resched();
 	}
 
-	WARN_ON(active_domains);
+	drm_WARN_ON(&uncore->i915->drm, active_domains);
 
 	fw = uncore->fw_domains_active;
 	if (fw)
@@ -757,9 +758,9 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
 	if (!uncore->funcs.force_wake_get)
 		return;
 
-	WARN(uncore->fw_domains_active,
-	     "Expected all fw_domains to be inactive, but %08x are still on\n",
-	     uncore->fw_domains_active);
+	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
+		 "Expected all fw_domains to be inactive, but %08x are still on\n",
+		 uncore->fw_domains_active);
 }
 
 void assert_forcewakes_active(struct intel_uncore *uncore,
@@ -779,9 +780,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
 	assert_rpm_wakelock_held(uncore->rpm);
 
 	fw_domains &= uncore->fw_domains;
-	WARN(fw_domains & ~uncore->fw_domains_active,
-	     "Expected %08x fw_domains to be active, but %08x are off\n",
-	     fw_domains, fw_domains & ~uncore->fw_domains_active);
+	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
+		 "Expected %08x fw_domains to be active, but %08x are off\n",
+		 fw_domains, fw_domains & ~uncore->fw_domains_active);
 
 	/*
 	 * Check that the caller has an explicit wakeref and we don't mistake
@@ -794,9 +795,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
 		if (uncore->fw_domains_timer & domain->mask)
 			expect++; /* pending automatic release */
 
-		if (WARN(actual < expect,
-			 "Expected domain %d to be held awake by caller, count=%d\n",
-			 domain->id, actual))
+		if (drm_WARN(&uncore->i915->drm, actual < expect,
+			     "Expected domain %d to be held awake by caller, count=%d\n",
+			     domain->id, actual))
 			break;
 	}
 
@@ -866,9 +867,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
 	if (entry->domains == FORCEWAKE_ALL)
 		return uncore->fw_domains;
 
-	WARN(entry->domains & ~uncore->fw_domains,
-	     "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
-	     entry->domains & ~uncore->fw_domains, offset);
+	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
+		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
+		 entry->domains & ~uncore->fw_domains, offset);
 
 	return entry->domains;
 }
@@ -1158,10 +1159,11 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
 		      const bool read,
 		      const bool before)
 {
-	if (WARN(check_for_unclaimed_mmio(uncore) && !before,
-		 "Unclaimed %s register 0x%x\n",
-		 read ? "read from" : "write to",
-		 i915_mmio_reg_offset(reg)))
+	if (drm_WARN(&uncore->i915->drm,
+		     check_for_unclaimed_mmio(uncore) && !before,
+		     "Unclaimed %s register 0x%x\n",
+		     read ? "read from" : "write to",
+		     i915_mmio_reg_offset(reg)))
 		/* Only report the first N failures */
 		i915_modparams.mmio_debug--;
 }
@@ -1436,8 +1438,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
 	if (!d)
 		return -ENOMEM;
 
-	WARN_ON(!i915_mmio_reg_valid(reg_set));
-	WARN_ON(!i915_mmio_reg_valid(reg_ack));
+	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
+	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
 
 	d->uncore = uncore;
 	d->wake_count = 0;
@@ -1482,8 +1484,8 @@ static void fw_domain_fini(struct intel_uncore *uncore,
 		return;
 
 	uncore->fw_domains &= ~BIT(domain_id);
-	WARN_ON(d->wake_count);
-	WARN_ON(hrtimer_cancel(&d->timer));
+	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
+	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
 	kfree(d);
 }
 
@@ -1613,7 +1615,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 #undef fw_domain_init
 
 	/* All future platforms are expected to require complex power gating */
-	WARN_ON(!ret && uncore->fw_domains == 0);
+	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
 
 out:
 	if (ret)
@@ -2108,7 +2110,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
 {
 	enum forcewake_domains fw_domains = 0;
 
-	WARN_ON(!op);
+	drm_WARN_ON(&uncore->i915->drm, !op);
 
 	if (!intel_uncore_has_forcewake(uncore))
 		return 0;
@@ -2119,7 +2121,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
 	if (op & FW_REG_WRITE)
 		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
 
-	WARN_ON(fw_domains & ~uncore->fw_domains);
+	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
 
 	return fw_domains;
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index ef572a0..68bbb15 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
 	return err;
 }
 
+static int live_active_barrier(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_engine_cs *engine;
+	struct live_active *active;
+	int err = 0;
+
+	/* Check that we get a callback when requests retire upon waiting */
+
+	active = __live_alloc(i915);
+	if (!active)
+		return -ENOMEM;
+
+	err = i915_active_acquire(&active->base);
+	if (err)
+		goto out;
+
+	for_each_uabi_engine(engine, i915) {
+		err = i915_active_acquire_preallocate_barrier(&active->base,
+							      engine);
+		if (err)
+			break;
+
+		i915_active_acquire_barrier(&active->base);
+	}
+
+	i915_active_release(&active->base);
+
+	if (err == 0)
+		err = i915_active_wait(&active->base);
+
+	if (err == 0 && !READ_ONCE(active->retired)) {
+		pr_err("i915_active not retired after flushing barriers!\n");
+		err = -EINVAL;
+	}
+
+out:
+	__live_put(active);
+
+	if (igt_flush_test(i915))
+		err = -EIO;
+
+	return err;
+}
+
 int i915_active_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_active_wait),
 		SUBTEST(live_active_retire),
+		SUBTEST(live_active_barrier),
 	};
 
 	if (intel_gt_is_wedged(&i915->gt))
@@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock)
 	spin_unlock_irq(lock);
 }
 
+static void active_flush(struct i915_active *ref,
+			 struct i915_active_fence *active)
+{
+	struct dma_fence *fence;
+
+	fence = xchg(__active_fence_slot(active), NULL);
+	if (!fence)
+		return;
+
+	spin_lock_irq(fence->lock);
+	__list_del_entry(&active->cb.node);
+	spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+	atomic_dec(&ref->count);
+
+	GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
 void i915_active_unlock_wait(struct i915_active *ref)
 {
 	if (i915_active_acquire_if_busy(ref)) {
 		struct active_node *it, *n;
 
+		/* Wait for all active callbacks */
 		rcu_read_lock();
-		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-			struct dma_fence *f;
-
-			/* Wait for all active callbacks */
-			f = rcu_dereference(it->base.fence);
-			if (f)
-				spin_unlock_wait(f->lock);
-		}
+		active_flush(ref, &ref->excl);
+		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+			active_flush(ref, &it->base);
 		rcu_read_unlock();
 
 		i915_active_release(ref);
 	}
 
 	/* And wait for the retire callback */
-	spin_lock_irq(&ref->tree_lock);
-	spin_unlock_irq(&ref->tree_lock);
+	spin_unlock_wait(&ref->tree_lock);
 
 	/* ... which may have been on a thread instead */
 	flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856ba..939a6ca 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
 static int igt_buddy_alloc_smoke(void *arg)
 {
 	struct i915_buddy_mm mm;
-	int max_order;
+	IGT_TIMEOUT(end_time);
+	I915_RND_STATE(prng);
 	u64 chunk_size;
 	u64 mm_size;
-	int err;
+	int *order;
+	int err, i;
 
 	igt_mm_config(&mm_size, &chunk_size);
 
@@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg)
 		return err;
 	}
 
-	for (max_order = mm.max_order; max_order >= 0; max_order--) {
+	order = i915_random_order(mm.max_order + 1, &prng);
+	if (!order)
+		goto out_fini;
+
+	for (i = 0; i <= mm.max_order; ++i) {
 		struct i915_buddy_block *block;
-		int order;
+		int max_order = order[i];
+		bool timeout = false;
 		LIST_HEAD(blocks);
+		int order;
 		u64 total;
 
 		err = igt_check_mm(&mm);
@@ -360,6 +368,11 @@ static int igt_buddy_alloc_smoke(void *arg)
 			}
 
 			total += i915_buddy_block_size(&mm, block);
+
+			if (__igt_timeout(end_time, NULL)) {
+				timeout = true;
+				break;
+			}
 		} while (total < mm.size);
 
 		if (!err)
@@ -373,7 +386,7 @@ static int igt_buddy_alloc_smoke(void *arg)
 				pr_err("post-mm check failed\n");
 		}
 
-		if (err)
+		if (err || timeout)
 			break;
 
 		cond_resched();
@@ -382,6 +395,8 @@ static int igt_buddy_alloc_smoke(void *arg)
 	if (err == -ENOMEM)
 		err = 0;
 
+	kfree(order);
+out_fini:
 	i915_buddy_fini(&mm);
 
 	return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 78f36fa..623759b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915)
 	intel_wakeref_t wakeref;
 
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		i915_gem_suspend_gtt_mappings(i915);
+		i915_ggtt_suspend(&i915->ggtt);
 		i915_gem_suspend_late(i915);
 	}
 }
@@ -108,7 +108,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
 	intel_wakeref_t wakeref;
 
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		i915_gem_suspend_gtt_mappings(i915);
+		i915_ggtt_suspend(&i915->ggtt);
 
 		i915_gem_freeze(i915);
 		i915_gem_freeze_late(i915);
@@ -124,7 +124,7 @@ static void pm_resume(struct drm_i915_private *i915)
 	 * that runtime-pm just works.
 	 */
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		i915_gem_restore_gtt_mappings(i915);
+		i915_ggtt_resume(&i915->ggtt);
 		i915_gem_restore_fences(&i915->ggtt);
 
 		i915_gem_resume(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 34138c7..0a953bf 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests)
 selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
 selftest(perf, i915_perf_live_selftests)
 /* Here be dragons: keep last to run last! */
 selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 5a577a1..3bf7f53 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -17,3 +17,4 @@
  */
 selftest(engine_cs, intel_engine_cs_perf_selftests)
 selftest(blt, i915_gem_object_blt_perf_selftests)
+selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e8a58fe..9ad4ab0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
 
 cancel_rq:
 	if (err) {
-		i915_request_skip(rq, err);
+		i915_request_set_error_once(rq, err);
 		i915_request_add(rq);
 	}
 unpin_hws:
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 3ef3620..2a1d4ba 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -4,6 +4,7 @@
  */
 
 #include <linux/prime_numbers.h>
+#include <linux/sort.h>
 
 #include "../i915_selftest.h"
 
@@ -19,6 +20,7 @@
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
+#include "i915_memcpy.h"
 #include "selftests/igt_flush_test.h"
 #include "selftests/i915_random.h"
 
@@ -572,6 +574,195 @@ static int igt_lmem_write_cpu(void *arg)
 	return err;
 }
 
+static const char *repr_type(u32 type)
+{
+	switch (type) {
+	case I915_MAP_WB:
+		return "WB";
+	case I915_MAP_WC:
+		return "WC";
+	}
+
+	return "";
+}
+
+static struct drm_i915_gem_object *
+create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
+			  void **out_addr)
+{
+	struct drm_i915_gem_object *obj;
+	void *addr;
+
+	obj = i915_gem_object_create_region(mr, size, 0);
+	if (IS_ERR(obj))
+		return obj;
+
+	addr = i915_gem_object_pin_map(obj, type);
+	if (IS_ERR(addr)) {
+		i915_gem_object_put(obj);
+		if (PTR_ERR(addr) == -ENXIO)
+			return ERR_PTR(-ENODEV);
+		return addr;
+	}
+
+	*out_addr = addr;
+	return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+	const ktime_t *a = A, *b = B;
+
+	return ktime_compare(*a, *b);
+}
+
+static void igt_memcpy_long(void *dst, const void *src, size_t size)
+{
+	unsigned long *tmp = dst;
+	const unsigned long *s = src;
+
+	size = size / sizeof(unsigned long);
+	while (size--)
+		*tmp++ = *s++;
+}
+
+static inline void igt_memcpy(void *dst, const void *src, size_t size)
+{
+	memcpy(dst, src, size);
+}
+
+static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
+{
+	i915_memcpy_from_wc(dst, src, size);
+}
+
+static int _perf_memcpy(struct intel_memory_region *src_mr,
+			struct intel_memory_region *dst_mr,
+			u64 size, u32 src_type, u32 dst_type)
+{
+	struct drm_i915_private *i915 = src_mr->i915;
+	const struct {
+		const char *name;
+		void (*copy)(void *dst, const void *src, size_t size);
+		bool skip;
+	} tests[] = {
+		{
+			"memcpy",
+			igt_memcpy,
+		},
+		{
+			"memcpy_long",
+			igt_memcpy_long,
+		},
+		{
+			"memcpy_from_wc",
+			igt_memcpy_from_wc,
+			!i915_has_memcpy_from_wc(),
+		},
+	};
+	struct drm_i915_gem_object *src, *dst;
+	void *src_addr, *dst_addr;
+	int ret = 0;
+	int i;
+
+	src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
+	if (IS_ERR(src)) {
+		ret = PTR_ERR(src);
+		goto out;
+	}
+
+	dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
+	if (IS_ERR(dst)) {
+		ret = PTR_ERR(dst);
+		goto out_unpin_src;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+		ktime_t t[5];
+		int pass;
+
+		if (tests[i].skip)
+			continue;
+
+		for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+			ktime_t t0, t1;
+
+			t0 = ktime_get();
+
+			tests[i].copy(dst_addr, src_addr, size);
+
+			t1 = ktime_get();
+			t[pass] = ktime_sub(t1, t0);
+		}
+
+		sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+		pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
+			__func__,
+			src_mr->name,
+			repr_type(src_type),
+			dst_mr->name,
+			repr_type(dst_type),
+			tests[i].name,
+			size >> 10,
+			div64_u64(mul_u32_u32(4 * size,
+					      1000 * 1000 * 1000),
+				  t[1] + 2 * t[2] + t[3]) >> 20);
+
+		cond_resched();
+	}
+
+	i915_gem_object_unpin_map(dst);
+	i915_gem_object_put(dst);
+out_unpin_src:
+	i915_gem_object_unpin_map(src);
+	i915_gem_object_put(src);
+
+	i915_gem_drain_freed_objects(i915);
+out:
+	if (ret == -ENODEV)
+		ret = 0;
+
+	return ret;
+}
+
+static int perf_memcpy(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	static const u32 types[] = {
+		I915_MAP_WB,
+		I915_MAP_WC,
+	};
+	static const u32 sizes[] = {
+		SZ_4K,
+		SZ_64K,
+		SZ_4M,
+	};
+	struct intel_memory_region *src_mr, *dst_mr;
+	int src_id, dst_id;
+	int i, j, k;
+	int ret;
+
+	for_each_memory_region(src_mr, i915, src_id) {
+		for_each_memory_region(dst_mr, i915, dst_id) {
+			for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+				for (j = 0; j < ARRAY_SIZE(types); ++j) {
+					for (k = 0; k < ARRAY_SIZE(types); ++k) {
+						ret = _perf_memcpy(src_mr,
+								   dst_mr,
+								   sizes[i],
+								   types[j],
+								   types[k]);
+						if (ret)
+							return ret;
+					}
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -619,3 +810,15 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 
 	return i915_live_subtests(tests, i915);
 }
+
+int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(perf_memcpy),
+	};
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b89869..754d0eb 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void)
 		goto put_device;
 	}
 	i915->drm.pdev = pdev;
-	i915->drm.dev_private = i915;
 
 	intel_runtime_pm_init_early(&i915->runtime_pm);
 
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
new file mode 100644
index 0000000..23adb64d
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_utils.h"
+#include "intel_pm.h"
+#include "vlv_suspend.h"
+
+struct vlv_s0ix_state {
+	/* GAM */
+	u32 wr_watermark;
+	u32 gfx_prio_ctrl;
+	u32 arb_mode;
+	u32 gfx_pend_tlb0;
+	u32 gfx_pend_tlb1;
+	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+	u32 media_max_req_count;
+	u32 gfx_max_req_count;
+	u32 render_hwsp;
+	u32 ecochk;
+	u32 bsd_hwsp;
+	u32 blt_hwsp;
+	u32 tlb_rd_addr;
+
+	/* MBC */
+	u32 g3dctl;
+	u32 gsckgctl;
+	u32 mbctl;
+
+	/* GCP */
+	u32 ucgctl1;
+	u32 ucgctl3;
+	u32 rcgctl1;
+	u32 rcgctl2;
+	u32 rstctl;
+	u32 misccpctl;
+
+	/* GPM */
+	u32 gfxpause;
+	u32 rpdeuhwtc;
+	u32 rpdeuc;
+	u32 ecobus;
+	u32 pwrdwnupctl;
+	u32 rp_down_timeout;
+	u32 rp_deucsw;
+	u32 rcubmabdtmr;
+	u32 rcedata;
+	u32 spare2gh;
+
+	/* Display 1 CZ domain */
+	u32 gt_imr;
+	u32 gt_ier;
+	u32 pm_imr;
+	u32 pm_ier;
+	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+	/* GT SA CZ domain */
+	u32 tilectl;
+	u32 gt_fifoctl;
+	u32 gtlc_wake_ctrl;
+	u32 gtlc_survive;
+	u32 pmwgicz;
+
+	/* Display 2 CZ domain */
+	u32 gu_ctl0;
+	u32 gu_ctl1;
+	u32 pcbr;
+	u32 clock_gate_dis2;
+};
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ *                    used internally by the HW in a way that doesn't depend
+ *                    keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ *   These have no effect on the driver's operation, so we don't save/restore
+ *   them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ *   the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+	struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+	struct intel_uncore *uncore = &i915->uncore;
+	int i;
+
+	if (!s)
+		return;
+
+	/* GAM 0x4000-0x4770 */
+	s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
+	s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
+	s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
+	s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
+	s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
+
+	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+		s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
+
+	s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
+	s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
+
+	s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
+	s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+	s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
+	s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
+
+	s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
+
+	/* MBC 0x9024-0x91D0, 0x8500 */
+	s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
+	s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
+	s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
+
+	/* GCP 0x9400-0x9424, 0x8100-0x810C */
+	s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
+	s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
+	s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
+	s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
+	s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
+	s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
+
+	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+	s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
+	s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
+	s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
+	s->ecobus = intel_uncore_read(uncore, ECOBUS);
+	s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
+	s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
+	s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
+	s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
+	s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
+	s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
+
+	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+	s->gt_imr = intel_uncore_read(uncore, GTIMR);
+	s->gt_ier = intel_uncore_read(uncore, GTIER);
+	s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+	s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+
+	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+		s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
+
+	/* GT SA CZ domain, 0x100000-0x138124 */
+	s->tilectl = intel_uncore_read(uncore, TILECTL);
+	s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
+	s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+	s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+	s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
+
+	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
+	s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
+	s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
+	s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
+	s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
+
+	/*
+	 * Not saving any of:
+	 * DFT,		0x9800-0x9EC0
+	 * SARB,	0xB000-0xB1FC
+	 * GAC,		0x5208-0x524C, 0x14000-0x14C000
+	 * PCI CFG
+	 */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+	struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+	struct intel_uncore *uncore = &i915->uncore;
+	u32 val;
+	int i;
+
+	if (!s)
+		return;
+
+	/* GAM 0x4000-0x4770 */
+	intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
+	intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+	intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
+	intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+	intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+	for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+		intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
+
+	intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+	intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+	intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
+	intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
+	intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+	intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+	intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+	/* MBC 0x9024-0x91D0, 0x8500 */
+	intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
+	intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
+	intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
+
+	/* GCP 0x9400-0x9424, 0x8100-0x810C */
+	intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
+	intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
+	intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
+	intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
+	intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
+	intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
+
+	/* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+	intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
+	intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
+	intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
+	intel_uncore_write(uncore, ECOBUS, s->ecobus);
+	intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+	intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
+	intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
+	intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+	intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
+	intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
+
+	/* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+	intel_uncore_write(uncore, GTIMR, s->gt_imr);
+	intel_uncore_write(uncore, GTIER, s->gt_ier);
+	intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
+	intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
+
+	for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+		intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
+
+	/* GT SA CZ domain, 0x100000-0x138124 */
+	intel_uncore_write(uncore, TILECTL, s->tilectl);
+	intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
+	/*
+	 * Preserve the GT allow wake and GFX force clock bit, they are not
+	 * be restored, as they are used to control the s0ix suspend/resume
+	 * sequence by the caller.
+	 */
+	val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+	val &= VLV_GTLC_ALLOWWAKEREQ;
+	val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+	intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+
+	val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+	val &= VLV_GFX_CLK_FORCE_ON_BIT;
+	val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+	intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+	intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
+
+	/* Gunit-Display CZ domain, 0x182028-0x1821CF */
+	intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
+	intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
+	intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
+	intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
+}
+
+static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
+				  u32 mask, u32 val)
+{
+	i915_reg_t reg = VLV_GTLC_PW_STATUS;
+	u32 reg_value;
+	int ret;
+
+	/* The HW does not like us polling for PW_STATUS frequently, so
+	 * use the sleeping loop rather than risk the busy spin within
+	 * intel_wait_for_register().
+	 *
+	 * Transitioning between RC6 states should be at most 2ms (see
+	 * valleyview_enable_rps) so use a 3ms timeout.
+	 */
+	ret = wait_for(((reg_value =
+			 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
+		       == val, 3);
+
+	/* just trace the final value */
+	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
+	return ret;
+}
+
+static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
+{
+	struct intel_uncore *uncore = &i915->uncore;
+	u32 val;
+	int err;
+
+	val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+	val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+	if (force_on)
+		val |= VLV_GFX_CLK_FORCE_ON_BIT;
+	intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+	if (!force_on)
+		return 0;
+
+	err = intel_wait_for_register(uncore,
+				      VLV_GTLC_SURVIVABILITY_REG,
+				      VLV_GFX_CLK_STATUS_BIT,
+				      VLV_GFX_CLK_STATUS_BIT,
+				      20);
+	if (err)
+		drm_err(&i915->drm,
+			"timeout waiting for GFX clock force-on (%08x)\n",
+			intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
+
+	return err;
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
+{
+	struct intel_uncore *uncore = &i915->uncore;
+	u32 mask;
+	u32 val;
+	int err;
+
+	val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+	val &= ~VLV_GTLC_ALLOWWAKEREQ;
+	if (allow)
+		val |= VLV_GTLC_ALLOWWAKEREQ;
+	intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+	intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
+
+	mask = VLV_GTLC_ALLOWWAKEACK;
+	val = allow ? mask : 0;
+
+	err = vlv_wait_for_pw_status(i915, mask, val);
+	if (err)
+		drm_err(&i915->drm, "timeout disabling GT waking\n");
+
+	return err;
+}
+
+static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+				  bool wait_for_on)
+{
+	u32 mask;
+	u32 val;
+
+	mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+	val = wait_for_on ? mask : 0;
+
+	/*
+	 * RC6 transitioning can be delayed up to 2 msec (see
+	 * valleyview_enable_rps), use 3 msec for safety.
+	 *
+	 * This can fail to turn off the rc6 if the GPU is stuck after a failed
+	 * reset and we are trying to force the machine to sleep.
+	 */
+	if (vlv_wait_for_pw_status(dev_priv, mask, val))
+		drm_dbg(&dev_priv->drm,
+			"timeout waiting for GT wells to go %s\n",
+			onoff(wait_for_on));
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *i915)
+{
+	struct intel_uncore *uncore = &i915->uncore;
+
+	if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+		return;
+
+	drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
+	intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+int vlv_suspend_complete(struct drm_i915_private *dev_priv)
+{
+	u32 mask;
+	int err;
+
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+		return 0;
+
+	/*
+	 * Bspec defines the following GT well on flags as debug only, so
+	 * don't treat them as hard failures.
+	 */
+	vlv_wait_for_gt_wells(dev_priv, false);
+
+	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+	drm_WARN_ON(&dev_priv->drm,
+		    (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+	vlv_check_no_gt_access(dev_priv);
+
+	err = vlv_force_gfx_clock(dev_priv, true);
+	if (err)
+		goto err1;
+
+	err = vlv_allow_gt_wake(dev_priv, false);
+	if (err)
+		goto err2;
+
+	vlv_save_gunit_s0ix_state(dev_priv);
+
+	err = vlv_force_gfx_clock(dev_priv, false);
+	if (err)
+		goto err2;
+
+	return 0;
+
+err2:
+	/* For safety always re-enable waking and disable gfx clock forcing */
+	vlv_allow_gt_wake(dev_priv, true);
+err1:
+	vlv_force_gfx_clock(dev_priv, false);
+
+	return err;
+}
+
+int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
+{
+	int err;
+	int ret;
+
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+		return 0;
+
+	/*
+	 * If any of the steps fail just try to continue, that's the best we
+	 * can do at this point. Return the first error code (which will also
+	 * leave RPM permanently disabled).
+	 */
+	ret = vlv_force_gfx_clock(dev_priv, true);
+
+	vlv_restore_gunit_s0ix_state(dev_priv);
+
+	err = vlv_allow_gt_wake(dev_priv, true);
+	if (!ret)
+		ret = err;
+
+	err = vlv_force_gfx_clock(dev_priv, false);
+	if (!ret)
+		ret = err;
+
+	vlv_check_no_gt_access(dev_priv);
+
+	if (rpm_resume)
+		intel_init_clock_gating(dev_priv);
+
+	return ret;
+}
+
+int vlv_suspend_init(struct drm_i915_private *i915)
+{
+	if (!IS_VALLEYVIEW(i915))
+		return 0;
+
+	/* we write all the values in the struct, so no need to zero it out */
+	i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+				       GFP_KERNEL);
+	if (!i915->vlv_s0ix_state)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void vlv_suspend_cleanup(struct drm_i915_private *i915)
+{
+	if (!i915->vlv_s0ix_state)
+		return;
+
+	kfree(i915->vlv_s0ix_state);
+	i915->vlv_s0ix_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/vlv_suspend.h b/drivers/gpu/drm/i915/vlv_suspend.h
new file mode 100644
index 0000000..895091c
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __VLV_SUSPEND_H__
+#define __VLV_SUSPEND_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int vlv_suspend_init(struct drm_i915_private *i915);
+void vlv_suspend_cleanup(struct drm_i915_private *i915);
+int vlv_suspend_complete(struct drm_i915_private *i915);
+int vlv_resume_prepare(struct drm_i915_private *i915, bool rpm_resume);
+
+#endif /* __VLV_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8cb2665..4da22a9 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -446,7 +446,7 @@ static int imx_ldb_register(struct drm_device *drm,
 
 	if (imx_ldb_ch->bridge) {
 		ret = drm_bridge_attach(&imx_ldb_ch->encoder,
-					imx_ldb_ch->bridge, NULL);
+					imx_ldb_ch->bridge, NULL, 0);
 		if (ret) {
 			DRM_ERROR("Failed to initialize bridge with drm\n");
 			return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 28826c0..6776ebb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -359,7 +359,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
 	if (!fb)
 		return 0;
 
-	if (!state->crtc)
+	if (WARN_ON(!state->crtc))
 		return -EINVAL;
 
 	crtc_state =
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 3dca424..08fafa4 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -24,6 +24,7 @@
 struct imx_parallel_display {
 	struct drm_connector connector;
 	struct drm_encoder encoder;
+	struct drm_bridge bridge;
 	struct device *dev;
 	void *edid;
 	int edid_len;
@@ -31,7 +32,7 @@ struct imx_parallel_display {
 	u32 bus_flags;
 	struct drm_display_mode mode;
 	struct drm_panel *panel;
-	struct drm_bridge *bridge;
+	struct drm_bridge *next_bridge;
 };
 
 static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
@@ -44,6 +45,11 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
 	return container_of(e, struct imx_parallel_display, encoder);
 }
 
+static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
+{
+	return container_of(b, struct imx_parallel_display, bridge);
+}
+
 static int imx_pd_connector_get_modes(struct drm_connector *connector)
 {
 	struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -89,37 +95,148 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
 	return &imxpd->encoder;
 }
 
-static void imx_pd_encoder_enable(struct drm_encoder *encoder)
+static void imx_pd_bridge_enable(struct drm_bridge *bridge)
 {
-	struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+	struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
 
 	drm_panel_prepare(imxpd->panel);
 	drm_panel_enable(imxpd->panel);
 }
 
-static void imx_pd_encoder_disable(struct drm_encoder *encoder)
+static void imx_pd_bridge_disable(struct drm_bridge *bridge)
 {
-	struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+	struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
 
 	drm_panel_disable(imxpd->panel);
 	drm_panel_unprepare(imxpd->panel);
 }
 
-static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
-				       struct drm_crtc_state *crtc_state,
-				       struct drm_connector_state *conn_state)
+static const u32 imx_pd_bus_fmts[] = {
+	MEDIA_BUS_FMT_RGB888_1X24,
+	MEDIA_BUS_FMT_BGR888_1X24,
+	MEDIA_BUS_FMT_GBR888_1X24,
+	MEDIA_BUS_FMT_RGB666_1X18,
+	MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+	MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static u32 *
+imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+					 struct drm_bridge_state *bridge_state,
+					 struct drm_crtc_state *crtc_state,
+					 struct drm_connector_state *conn_state,
+					 unsigned int *num_output_fmts)
+{
+	struct drm_display_info *di = &conn_state->connector->display_info;
+	struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+	u32 *output_fmts;
+
+	if (!imxpd->bus_format && !di->num_bus_formats) {
+		*num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts);
+		return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts),
+			       GFP_KERNEL);
+	}
+
+	*num_output_fmts = 1;
+	output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+	if (!output_fmts)
+		return NULL;
+
+	if (!imxpd->bus_format && di->num_bus_formats)
+		output_fmts[0] = di->bus_formats[0];
+	else
+		output_fmts[0] = imxpd->bus_format;
+
+	return output_fmts;
+}
+
+static bool imx_pd_format_supported(u32 output_fmt)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) {
+		if (imx_pd_bus_fmts[i] == output_fmt)
+			return true;
+	}
+
+	return false;
+}
+
+static u32 *
+imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					u32 output_fmt,
+					unsigned int *num_input_fmts)
+{
+	struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+	u32 *input_fmts;
+
+	/*
+	 * If the next bridge does not support bus format negotiation, let's
+	 * use the static bus format definition (imxpd->bus_format) if it's
+	 * specified, RGB888 when it's not.
+	 */
+	if (output_fmt == MEDIA_BUS_FMT_FIXED)
+		output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24;
+
+	/* Now make sure the requested output format is supported. */
+	if ((imxpd->bus_format && imxpd->bus_format != output_fmt) ||
+	    !imx_pd_format_supported(output_fmt)) {
+		*num_input_fmts = 0;
+		return NULL;
+	}
+
+	*num_input_fmts = 1;
+	input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+	if (!input_fmts)
+		return NULL;
+
+	input_fmts[0] = output_fmt;
+	return input_fmts;
+}
+
+static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
+				      struct drm_bridge_state *bridge_state,
+				      struct drm_crtc_state *crtc_state,
+				      struct drm_connector_state *conn_state)
 {
 	struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
 	struct drm_display_info *di = &conn_state->connector->display_info;
-	struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+	struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+	struct drm_bridge_state *next_bridge_state = NULL;
+	struct drm_bridge *next_bridge;
+	u32 bus_flags, bus_fmt;
 
-	if (!imxpd->bus_format && di->num_bus_formats) {
-		imx_crtc_state->bus_flags = di->bus_flags;
-		imx_crtc_state->bus_format = di->bus_formats[0];
-	} else {
-		imx_crtc_state->bus_flags = imxpd->bus_flags;
-		imx_crtc_state->bus_format = imxpd->bus_format;
+	next_bridge = drm_bridge_get_next_bridge(bridge);
+	if (next_bridge)
+		next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+								    next_bridge);
+
+	if (next_bridge_state)
+		bus_flags = next_bridge_state->input_bus_cfg.flags;
+	else if (!imxpd->bus_format && di->num_bus_formats)
+		bus_flags = di->bus_flags;
+	else
+		bus_flags = imxpd->bus_flags;
+
+	bus_fmt = bridge_state->input_bus_cfg.format;
+	if (!imx_pd_format_supported(bus_fmt))
+		return -EINVAL;
+
+	if (bus_flags &
+	    ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
+	      DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+	      DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
+		dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
+		return -EINVAL;
 	}
+
+	bridge_state->output_bus_cfg.flags = bus_flags;
+	bridge_state->input_bus_cfg.flags = bus_flags;
+	imx_crtc_state->bus_flags = bus_flags;
+	imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format;
 	imx_crtc_state->di_hsync_pin = 2;
 	imx_crtc_state->di_vsync_pin = 3;
 
@@ -143,10 +260,15 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
 	.destroy = imx_drm_encoder_destroy,
 };
 
-static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
-	.enable = imx_pd_encoder_enable,
-	.disable = imx_pd_encoder_disable,
-	.atomic_check = imx_pd_encoder_atomic_check,
+static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
+	.enable = imx_pd_bridge_enable,
+	.disable = imx_pd_bridge_disable,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_check = imx_pd_bridge_atomic_check,
+	.atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts,
+	.atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
 };
 
 static int imx_pd_register(struct drm_device *drm,
@@ -166,11 +288,13 @@ static int imx_pd_register(struct drm_device *drm,
 	 */
 	imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
 
-	drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
 	drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
 			 DRM_MODE_ENCODER_NONE, NULL);
 
-	if (!imxpd->bridge) {
+	imxpd->bridge.funcs = &imx_pd_bridge_funcs;
+	drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+
+	if (!imxpd->next_bridge) {
 		drm_connector_helper_add(&imxpd->connector,
 				&imx_pd_connector_helper_funcs);
 		drm_connector_init(drm, &imxpd->connector,
@@ -181,8 +305,9 @@ static int imx_pd_register(struct drm_device *drm,
 	if (imxpd->panel)
 		drm_panel_attach(imxpd->panel, &imxpd->connector);
 
-	if (imxpd->bridge) {
-		ret = drm_bridge_attach(encoder, imxpd->bridge, NULL);
+	if (imxpd->next_bridge) {
+		ret = drm_bridge_attach(encoder, imxpd->next_bridge,
+					&imxpd->bridge, 0);
 		if (ret < 0) {
 			dev_err(imxpd->dev, "failed to attach bridge: %d\n",
 				ret);
@@ -227,7 +352,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 	imxpd->bus_format = bus_format;
 
 	/* port@1 is the output port */
-	ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+	ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
+					  &imxpd->next_bridge);
 	if (ret && ret != -ENODEV)
 		return ret;
 
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 6d47ef7..9dfe7cb 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -737,7 +737,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	ret = drm_bridge_attach(&priv->encoder, bridge, NULL);
+	ret = drm_bridge_attach(&priv->encoder, bridge, NULL, 0);
 	if (ret) {
 		dev_err(dev, "Unable to attach bridge");
 		return ret;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 124efe4..2daac64 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -15,10 +15,14 @@
 #include "lima_vm.h"
 
 int lima_sched_timeout_ms;
+uint lima_heap_init_nr_pages = 8;
 
 MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
 module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
 
+MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
+module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+
 static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
 {
 	struct drm_lima_get_param *args = data;
@@ -68,7 +72,7 @@ static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_
 	if (args->pad)
 		return -EINVAL;
 
-	if (args->flags)
+	if (args->flags & ~(LIMA_BO_FLAG_HEAP))
 		return -EINVAL;
 
 	if (args->size == 0)
@@ -241,6 +245,12 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
 
 DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
 
+/**
+ * Changelog:
+ *
+ * - 1.1.0 - add heap buffer support
+ */
+
 static struct drm_driver lima_drm_driver = {
 	.driver_features    = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
 	.open               = lima_drm_driver_open,
@@ -250,9 +260,9 @@ static struct drm_driver lima_drm_driver = {
 	.fops               = &lima_drm_driver_fops,
 	.name               = "lima",
 	.desc               = "lima DRM",
-	.date               = "20190217",
+	.date               = "20191231",
 	.major              = 1,
-	.minor              = 0,
+	.minor              = 1,
 	.patchlevel         = 0,
 
 	.gem_create_object  = lima_gem_create_object,
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index 69c7344..f492ecc 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -9,6 +9,7 @@
 #include "lima_ctx.h"
 
 extern int lima_sched_timeout_ms;
+extern uint lima_heap_init_nr_pages;
 
 struct lima_vm;
 struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index d0059d8..5404e0d 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -4,6 +4,8 @@
 #include <linux/mm.h>
 #include <linux/sync_file.h>
 #include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-mapping.h>
 
 #include <drm/drm_file.h>
 #include <drm/drm_syncobj.h>
@@ -15,6 +17,83 @@
 #include "lima_gem.h"
 #include "lima_vm.h"
 
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+{
+	struct page **pages;
+	struct address_space *mapping = bo->base.base.filp->f_mapping;
+	struct device *dev = bo->base.base.dev->dev;
+	size_t old_size = bo->heap_size;
+	size_t new_size = bo->heap_size ? bo->heap_size * 2 :
+		(lima_heap_init_nr_pages << PAGE_SHIFT);
+	struct sg_table sgt;
+	int i, ret;
+
+	if (bo->heap_size >= bo->base.base.size)
+		return -ENOSPC;
+
+	new_size = min(new_size, bo->base.base.size);
+
+	mutex_lock(&bo->base.pages_lock);
+
+	if (bo->base.pages) {
+		pages = bo->base.pages;
+	} else {
+		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
+		if (!pages) {
+			mutex_unlock(&bo->base.pages_lock);
+			return -ENOMEM;
+		}
+
+		bo->base.pages = pages;
+		bo->base.pages_use_count = 1;
+
+		mapping_set_unevictable(mapping);
+	}
+
+	for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
+		struct page *page = shmem_read_mapping_page(mapping, i);
+
+		if (IS_ERR(page)) {
+			mutex_unlock(&bo->base.pages_lock);
+			return PTR_ERR(page);
+		}
+		pages[i] = page;
+	}
+
+	mutex_unlock(&bo->base.pages_lock);
+
+	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
+					new_size, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	if (bo->base.sgt) {
+		dma_unmap_sg(dev, bo->base.sgt->sgl,
+			     bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+		sg_free_table(bo->base.sgt);
+	} else {
+		bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+		if (!bo->base.sgt) {
+			sg_free_table(&sgt);
+			return -ENOMEM;
+		}
+	}
+
+	dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+
+	*bo->base.sgt = sgt;
+
+	if (vm) {
+		ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+		if (ret)
+			return ret;
+	}
+
+	bo->heap_size = new_size;
+	return 0;
+}
+
 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
 			   u32 size, u32 flags, u32 *handle)
 {
@@ -22,7 +101,8 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
 	gfp_t mask;
 	struct drm_gem_shmem_object *shmem;
 	struct drm_gem_object *obj;
-	struct sg_table *sgt;
+	struct lima_bo *bo;
+	bool is_heap = flags & LIMA_BO_FLAG_HEAP;
 
 	shmem = drm_gem_shmem_create(dev, size);
 	if (IS_ERR(shmem))
@@ -36,10 +116,18 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
 	mask |= __GFP_DMA32;
 	mapping_set_gfp_mask(obj->filp->f_mapping, mask);
 
-	sgt = drm_gem_shmem_get_pages_sgt(obj);
-	if (IS_ERR(sgt)) {
-		err = PTR_ERR(sgt);
-		goto out;
+	if (is_heap) {
+		bo = to_lima_bo(obj);
+		err = lima_heap_alloc(bo, NULL);
+		if (err)
+			goto out;
+	} else {
+		struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
+
+		if (IS_ERR(sgt)) {
+			err = PTR_ERR(sgt);
+			goto out;
+		}
 	}
 
 	err = drm_gem_handle_create(file, obj, handle);
@@ -79,17 +167,47 @@ static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *f
 	lima_vm_bo_del(vm, bo);
 }
 
+static int lima_gem_pin(struct drm_gem_object *obj)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+
+	if (bo->heap_size)
+		return -EINVAL;
+
+	return drm_gem_shmem_pin(obj);
+}
+
+static void *lima_gem_vmap(struct drm_gem_object *obj)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+
+	if (bo->heap_size)
+		return ERR_PTR(-EINVAL);
+
+	return drm_gem_shmem_vmap(obj);
+}
+
+static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	struct lima_bo *bo = to_lima_bo(obj);
+
+	if (bo->heap_size)
+		return -EINVAL;
+
+	return drm_gem_shmem_mmap(obj, vma);
+}
+
 static const struct drm_gem_object_funcs lima_gem_funcs = {
 	.free = lima_gem_free_object,
 	.open = lima_gem_object_open,
 	.close = lima_gem_object_close,
 	.print_info = drm_gem_shmem_print_info,
-	.pin = drm_gem_shmem_pin,
+	.pin = lima_gem_pin,
 	.unpin = drm_gem_shmem_unpin,
 	.get_sg_table = drm_gem_shmem_get_sg_table,
-	.vmap = drm_gem_shmem_vmap,
+	.vmap = lima_gem_vmap,
 	.vunmap = drm_gem_shmem_vunmap,
-	.mmap = drm_gem_shmem_mmap,
+	.mmap = lima_gem_mmap,
 };
 
 struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 1800feb..ccea061 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -7,12 +7,15 @@
 #include <drm/drm_gem_shmem_helper.h>
 
 struct lima_submit;
+struct lima_vm;
 
 struct lima_bo {
 	struct drm_gem_shmem_object base;
 
 	struct mutex lock;
 	struct list_head va;
+
+	size_t heap_size;
 };
 
 static inline struct lima_bo *
@@ -31,6 +34,7 @@ static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
 	return bo->base.base.resv;
 }
 
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm);
 struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
 			   u32 size, u32 flags, u32 *handle);
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index ccf49fa..d8841c8 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -11,6 +11,8 @@
 #include "lima_device.h"
 #include "lima_gp.h"
 #include "lima_regs.h"
+#include "lima_gem.h"
+#include "lima_vm.h"
 
 #define gp_write(reg, data) writel(data, ip->iomem + reg)
 #define gp_read(reg) readl(ip->iomem + reg)
@@ -20,6 +22,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
 	struct lima_ip *ip = data;
 	struct lima_device *dev = ip->dev;
 	struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+	struct lima_sched_task *task = pipe->current_task;
 	u32 state = gp_read(LIMA_GP_INT_STAT);
 	u32 status = gp_read(LIMA_GP_STATUS);
 	bool done = false;
@@ -29,8 +32,16 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
 		return IRQ_NONE;
 
 	if (state & LIMA_GP_IRQ_MASK_ERROR) {
-		dev_err(dev->dev, "gp error irq state=%x status=%x\n",
-			state, status);
+		if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
+		    LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
+			dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
+				status);
+		} else {
+			dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+				state, status);
+			if (task)
+				task->recoverable = false;
+		}
 
 		/* mask all interrupts before hard reset */
 		gp_write(LIMA_GP_INT_MASK, 0);
@@ -43,6 +54,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
 		bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
 					LIMA_GP_STATUS_PLBU_ACTIVE);
 		done = valid && !active;
+		pipe->error = false;
 	}
 
 	gp_write(LIMA_GP_INT_CLEAR, state);
@@ -121,6 +133,22 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
 	u32 cmd = 0;
 	int i;
 
+	/* update real heap buffer size for GP */
+	for (i = 0; i < task->num_bos; i++) {
+		struct lima_bo *bo = task->bos[i];
+
+		if (bo->heap_size &&
+		    lima_vm_get_va(task->vm, bo) ==
+		    f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) {
+			f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+				f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] +
+				bo->heap_size;
+			task->recoverable = true;
+			task->heap = bo;
+			break;
+		}
+	}
+
 	if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
 	    f[LIMA_GP_VSCL_END_ADDR >> 2])
 		cmd |= LIMA_GP_CMD_START_VS;
@@ -184,6 +212,36 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
 	lima_sched_pipe_task_done(pipe);
 }
 
+static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+{
+	struct lima_ip *ip = pipe->processor[0];
+	struct lima_sched_task *task = pipe->current_task;
+	struct drm_lima_gp_frame *frame = task->frame;
+	u32 *f = frame->frame;
+	size_t fail_size =
+		f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] -
+		f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2];
+
+	if (fail_size == task->heap->heap_size) {
+		int ret;
+
+		ret = lima_heap_alloc(task->heap, task->vm);
+		if (ret < 0)
+			return ret;
+	}
+
+	gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+	/* Resume from where we stopped, i.e. new start is old end */
+	gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR,
+		 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+	f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+		f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
+	gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR,
+		 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+	return 0;
+}
+
 static void lima_gp_print_version(struct lima_ip *ip)
 {
 	u32 version, major, minor;
@@ -270,6 +328,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
 	pipe->task_fini = lima_gp_task_fini;
 	pipe->task_error = lima_gp_task_error;
 	pipe->task_mmu_error = lima_gp_task_mmu_error;
+	pipe->task_recover = lima_gp_task_recover;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 97ec09d..f79d2af 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -99,6 +99,11 @@ void lima_mmu_fini(struct lima_ip *ip)
 
 }
 
+void lima_mmu_flush_tlb(struct lima_ip *ip)
+{
+	mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+}
+
 void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
 {
 	struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 8c78319..4f8ccbe 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -10,6 +10,7 @@ struct lima_vm;
 int lima_mmu_init(struct lima_ip *ip);
 void lima_mmu_fini(struct lima_ip *ip);
 
+void lima_mmu_flush_tlb(struct lima_ip *ip);
 void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
 void lima_mmu_page_fault_resume(struct lima_ip *ip);
 
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
index ace8ece..0124c90 100644
--- a/drivers/gpu/drm/lima/lima_regs.h
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -239,6 +239,7 @@
 #define   LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
 #define   LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
 #define   LIMA_MMU_STATUS_BUS_ID(x)           ((x >> 6) & 0x1F)
+#define   LIMA_MMU_STATUS_STALL_NOT_ACTIVE    BIT(31)
 #define LIMA_MMU_COMMAND                      0x0008
 #define   LIMA_MMU_COMMAND_ENABLE_PAGING      0x00
 #define   LIMA_MMU_COMMAND_DISABLE_PAGING     0x01
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b561dd0..3886999 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -313,6 +313,26 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
 	.free_job = lima_sched_free_job,
 };
 
+static void lima_sched_recover_work(struct work_struct *work)
+{
+	struct lima_sched_pipe *pipe =
+		container_of(work, struct lima_sched_pipe, recover_work);
+	int i;
+
+	for (i = 0; i < pipe->num_l2_cache; i++)
+		lima_l2_cache_flush(pipe->l2_cache[i]);
+
+	if (pipe->bcast_mmu) {
+		lima_mmu_flush_tlb(pipe->bcast_mmu);
+	} else {
+		for (i = 0; i < pipe->num_mmu; i++)
+			lima_mmu_flush_tlb(pipe->mmu[i]);
+	}
+
+	if (pipe->task_recover(pipe))
+		drm_sched_fault(&pipe->base);
+}
+
 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
 {
 	unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -321,6 +341,8 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
 	pipe->fence_context = dma_fence_context_alloc(1);
 	spin_lock_init(&pipe->fence_lock);
 
+	INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
+
 	return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
 			      msecs_to_jiffies(timeout), name);
 }
@@ -332,11 +354,14 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
 
 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
 {
-	if (pipe->error)
-		drm_sched_fault(&pipe->base);
-	else {
-		struct lima_sched_task *task = pipe->current_task;
+	struct lima_sched_task *task = pipe->current_task;
 
+	if (pipe->error) {
+		if (task && task->recoverable)
+			schedule_work(&pipe->recover_work);
+		else
+			drm_sched_fault(&pipe->base);
+	} else {
 		pipe->task_fini(pipe);
 		dma_fence_signal(task->fence);
 	}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 1d814fe..d64393f 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -20,6 +20,9 @@ struct lima_sched_task {
 	struct lima_bo **bos;
 	int num_bos;
 
+	bool recoverable;
+	struct lima_bo *heap;
+
 	/* pipe fence */
 	struct dma_fence *fence;
 };
@@ -68,6 +71,9 @@ struct lima_sched_pipe {
 	void (*task_fini)(struct lima_sched_pipe *pipe);
 	void (*task_error)(struct lima_sched_pipe *pipe);
 	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+	int (*task_recover)(struct lima_sched_pipe *pipe);
+
+	struct work_struct recover_work;
 };
 
 int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 840e235..5b92fb8 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -155,6 +155,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
 void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
 {
 	struct lima_bo_va *bo_va;
+	u32 size;
 
 	mutex_lock(&bo->lock);
 
@@ -166,8 +167,9 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
 
 	mutex_lock(&vm->lock);
 
+	size = bo->heap_size ? bo->heap_size : bo_va->node.size;
 	lima_vm_unmap_range(vm, bo_va->node.start,
-			    bo_va->node.start + bo_va->node.size - 1);
+			    bo_va->node.start + size - 1);
 
 	drm_mm_remove_node(&bo_va->node);
 
@@ -277,3 +279,45 @@ void lima_vm_print(struct lima_vm *vm)
 		}
 	}
 }
+
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+{
+	struct lima_bo_va *bo_va;
+	struct sg_dma_page_iter sg_iter;
+	int offset = 0, err;
+	u32 base;
+
+	mutex_lock(&bo->lock);
+
+	bo_va = lima_vm_bo_find(vm, bo);
+	if (!bo_va) {
+		err = -ENOENT;
+		goto err_out0;
+	}
+
+	mutex_lock(&vm->lock);
+
+	base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+	for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+			     bo->base.sgt->nents, pageoff) {
+		err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+				       base + offset);
+		if (err)
+			goto err_out1;
+
+		offset += PAGE_SIZE;
+	}
+
+	mutex_unlock(&vm->lock);
+
+	mutex_unlock(&bo->lock);
+	return 0;
+
+err_out1:
+	if (offset)
+		lima_vm_unmap_range(vm, base, base + offset - 1);
+	mutex_unlock(&vm->lock);
+err_out0:
+	mutex_unlock(&bo->lock);
+	return err;
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index e0bdedc..22aeec7 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -58,5 +58,6 @@ static inline void lima_vm_put(struct lima_vm *vm)
 }
 
 void lima_vm_print(struct lima_vm *vm);
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff);
 
 #endif
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9008ddc..f28cb7a 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -20,11 +20,11 @@
  * input formats including most variants of RGB and YUV.
  *
  * The hardware has four display pipes, and the layout is a little
- * bit like this:
+ * bit like this::
  *
- * Memory     -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External      0..5       0..3       A,B,    3 x DSI         bridge
- * source 0..9                         C0,C1   2 x DPI
+ *   Memory     -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
+ *   External      0..5       0..3       A,B,    3 x DSI         bridge
+ *   source 0..9                         C0,C1   2 x DPI
  *
  * FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
  * panels with embedded buffer.
@@ -43,6 +43,7 @@
  * to change as we exploit more of the hardware capabilities.
  *
  * TODO:
+ *
  * - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
  *   so we can selectively just transmit the damaged area to a
  *   command-only display.
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index bb6528b..7af5ebb 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -986,7 +986,8 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
 	clk_disable_unprepare(d->lp_clk);
 }
 
-static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
+static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+				  enum drm_bridge_attach_flags flags)
 {
 	struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
 	struct drm_device *drm = bridge->dev;
@@ -998,7 +999,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
 	}
 
 	/* Attach the DSI bridge to the output (panel etc) bridge */
-	ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
+	ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
 	if (ret) {
 		dev_err(d->dev, "failed to attach the DSI bridge\n");
 		return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 01fa8b8..4f0ce4c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -607,7 +607,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
 	/* Currently DPI0 is fixed to be driven by OVL1 */
 	dpi->encoder.possible_crtcs = BIT(1);
 
-	ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
+	ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
 	if (ret) {
 		dev_err(dev, "Failed to attach bridge: %d\n", ret);
 		goto err_cleanup;
@@ -664,6 +664,16 @@ static unsigned int mt2701_calculate_factor(int clock)
 		return 1;
 }
 
+static unsigned int mt8183_calculate_factor(int clock)
+{
+	if (clock <= 27000)
+		return 8;
+	else if (clock <= 167000)
+		return 4;
+	else
+		return 2;
+}
+
 static const struct mtk_dpi_conf mt8173_conf = {
 	.cal_factor = mt8173_calculate_factor,
 	.reg_h_fre_con = 0xe0,
@@ -675,6 +685,11 @@ static const struct mtk_dpi_conf mt2701_conf = {
 	.edge_sel_en = true,
 };
 
+static const struct mtk_dpi_conf mt8183_conf = {
+	.cal_factor = mt8183_calculate_factor,
+	.reg_h_fre_con = 0xe0,
+};
+
 static int mtk_dpi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -770,6 +785,9 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
 	{ .compatible = "mediatek,mt8173-dpi",
 	  .data = &mt8173_conf,
 	},
+	{ .compatible = "mediatek,mt8183-dpi",
+	  .data = &mt8183_conf,
+	},
 	{ },
 };
 
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5fa1073..0ede698 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -904,7 +904,7 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
 
 	/* If there's a bridge, attach to it and let it create the connector */
 	if (dsi->bridge) {
-		ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+		ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
 		if (ret) {
 			DRM_ERROR("Failed to attach bridge to drm\n");
 			goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5e4a4db..a8b2055 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1297,11 +1297,17 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
  * Bridge callbacks
  */
 
-static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+				  enum drm_bridge_attach_flags flags)
 {
 	struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
 	int ret;
 
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
+
 	ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
 					  &mtk_hdmi_connector_funcs,
 					  DRM_MODE_CONNECTOR_HDMIA,
@@ -1326,7 +1332,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
 
 	if (hdmi->next_bridge) {
 		ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
-					bridge);
+					bridge, flags);
 		if (ret) {
 			dev_err(hdmi->dev,
 				"Failed to attach external bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3bb7ffe5..e8c94915 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -16,6 +16,7 @@
 
 #include <drm/bridge/dw_hdmi.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
 #include <drm/drm_device.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_probe_helper.h>
@@ -135,6 +136,7 @@ struct meson_dw_hdmi_data {
 
 struct meson_dw_hdmi {
 	struct drm_encoder encoder;
+	struct drm_bridge bridge;
 	struct dw_hdmi_plat_data dw_plat_data;
 	struct meson_drm *priv;
 	struct device *dev;
@@ -148,9 +150,12 @@ struct meson_dw_hdmi {
 	struct regulator *hdmi_supply;
 	u32 irq_stat;
 	struct dw_hdmi *hdmi;
+	unsigned long output_bus_fmt;
 };
 #define encoder_to_meson_dw_hdmi(x) \
 	container_of(x, struct meson_dw_hdmi, encoder)
+#define bridge_to_meson_dw_hdmi(x) \
+	container_of(x, struct meson_dw_hdmi, bridge)
 
 static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
 					const char *compat)
@@ -297,6 +302,10 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
 	struct meson_drm *priv = dw_hdmi->priv;
 	unsigned int pixel_clock = mode->clock;
 
+	/* For 420, pixel clock is half unlike venc clock */
+	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+		pixel_clock /= 2;
+
 	if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
 	    dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
 		if (pixel_clock >= 371250) {
@@ -368,29 +377,40 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
 }
 
 static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
-			     struct drm_display_mode *mode)
+			     const struct drm_display_mode *mode)
 {
 	struct meson_drm *priv = dw_hdmi->priv;
 	int vic = drm_match_cea_mode(mode);
+	unsigned int phy_freq;
 	unsigned int vclk_freq;
 	unsigned int venc_freq;
 	unsigned int hdmi_freq;
 
 	vclk_freq = mode->clock;
 
+	/* For 420, pixel clock is half unlike venc clock */
+	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+		vclk_freq /= 2;
+
+	/* TMDS clock is pixel_clock * 10 */
+	phy_freq = vclk_freq * 10;
+
 	if (!vic) {
-		meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
-				 vclk_freq, vclk_freq, false);
+		meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+				 vclk_freq, vclk_freq, vclk_freq, false);
 		return;
 	}
 
+	/* 480i/576i needs global pixel doubling */
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		vclk_freq *= 2;
 
 	venc_freq = vclk_freq;
 	hdmi_freq = vclk_freq;
 
-	if (meson_venc_hdmi_venc_repeat(vic))
+	/* VENC double pixels for 1080i, 720p and YUV420 modes */
+	if (meson_venc_hdmi_venc_repeat(vic) ||
+	    dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
 		venc_freq *= 2;
 
 	vclk_freq = max(venc_freq, hdmi_freq);
@@ -398,11 +418,11 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		venc_freq /= 2;
 
-	DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
-		vclk_freq, venc_freq, hdmi_freq,
+	DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+		phy_freq, vclk_freq, venc_freq, hdmi_freq,
 		priv->venc.hdmi_use_enci);
 
-	meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+	meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
 			 venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
 }
 
@@ -437,8 +457,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
 	/* Enable normal output to PHY */
 	dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
 
-	/* TMDS pattern setup (TOFIX Handle the YUV420 case) */
-	if (mode->clock > 340000) {
+	/* TMDS pattern setup */
+	if (mode->clock > 340000 &&
+	    dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
 		dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
 				  0);
 		dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -613,6 +634,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 		   const struct drm_display_mode *mode)
 {
 	struct meson_drm *priv = connector->dev->dev_private;
+	bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+	unsigned int phy_freq;
 	unsigned int vclk_freq;
 	unsigned int venc_freq;
 	unsigned int hdmi_freq;
@@ -621,9 +644,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
 	DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
-	/* If sink max TMDS clock, we reject the mode */
+	/* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
 	if (connector->display_info.max_tmds_clock &&
-	    mode->clock > connector->display_info.max_tmds_clock)
+	    mode->clock > connector->display_info.max_tmds_clock &&
+	    !drm_mode_is_420_only(&connector->display_info, mode) &&
+	    !drm_mode_is_420_also(&connector->display_info, mode))
 		return MODE_BAD;
 
 	/* Check against non-VIC supported modes */
@@ -639,6 +664,15 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
 	vclk_freq = mode->clock;
 
+	/* For 420, pixel clock is half unlike venc clock */
+	if (drm_mode_is_420_only(&connector->display_info, mode) ||
+	    (!is_hdmi2_sink &&
+	     drm_mode_is_420_also(&connector->display_info, mode)))
+		vclk_freq /= 2;
+
+	/* TMDS clock is pixel_clock * 10 */
+	phy_freq = vclk_freq * 10;
+
 	/* 480i/576i needs global pixel doubling */
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		vclk_freq *= 2;
@@ -646,8 +680,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 	venc_freq = vclk_freq;
 	hdmi_freq = vclk_freq;
 
-	/* VENC double pixels for 1080i and 720p modes */
-	if (meson_venc_hdmi_venc_repeat(vic))
+	/* VENC double pixels for 1080i, 720p and YUV420 modes */
+	if (meson_venc_hdmi_venc_repeat(vic) ||
+	    drm_mode_is_420_only(&connector->display_info, mode) ||
+	    (!is_hdmi2_sink &&
+	     drm_mode_is_420_also(&connector->display_info, mode)))
 		venc_freq *= 2;
 
 	vclk_freq = max(venc_freq, hdmi_freq);
@@ -655,14 +692,19 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 		venc_freq /= 2;
 
-	dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
-		vclk_freq, venc_freq, hdmi_freq);
+	dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+		__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
 
-	return meson_vclk_vic_supported_freq(vclk_freq);
+	return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
 }
 
 /* Encoder */
 
+static const u32 meson_dw_hdmi_out_bus_fmts[] = {
+	MEDIA_BUS_FMT_YUV8_1X24,
+	MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+};
+
 static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
@@ -672,16 +714,54 @@ static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
 	.destroy        = meson_venc_hdmi_encoder_destroy,
 };
 
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+static u32 *
+meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					u32 output_fmt,
+					unsigned int *num_input_fmts)
+{
+	u32 *input_fmts = NULL;
+	int i;
+
+	*num_input_fmts = 0;
+
+	for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
+		if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
+			*num_input_fmts = 1;
+			input_fmts = kcalloc(*num_input_fmts,
+					     sizeof(*input_fmts),
+					     GFP_KERNEL);
+			if (!input_fmts)
+				return NULL;
+
+			input_fmts[0] = output_fmt;
+
+			break;
+		}
+	}
+
+	return input_fmts;
+}
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
 					struct drm_crtc_state *crtc_state,
 					struct drm_connector_state *conn_state)
 {
+	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
+
+	dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
+
+	DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
+
 	return 0;
 }
 
-static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
 {
-	struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
 	struct meson_drm *priv = dw_hdmi->priv;
 
 	DRM_DEBUG_DRIVER("\n");
@@ -693,9 +773,9 @@ static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
 	writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
 }
 
-static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
 {
-	struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
 	struct meson_drm *priv = dw_hdmi->priv;
 
 	DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
@@ -706,32 +786,47 @@ static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
 		writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
 }
 
-static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
-				   struct drm_display_mode *mode,
-				   struct drm_display_mode *adjusted_mode)
+static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
+				   const struct drm_display_mode *mode,
+				   const struct drm_display_mode *adjusted_mode)
 {
-	struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+	struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
 	struct meson_drm *priv = dw_hdmi->priv;
 	int vic = drm_match_cea_mode(mode);
+	unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
+	bool yuv420_mode = false;
 
 	DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
 
+	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
+		ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
+		yuv420_mode = true;
+	}
+
 	/* VENC + VENC-DVI Mode setup */
-	meson_venc_hdmi_mode_set(priv, vic, mode);
+	meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
 
 	/* VCLK Set clock */
 	dw_hdmi_set_vclk(dw_hdmi, mode);
 
-	/* Setup YUV444 to HDMI-TX, no 10bit diphering */
-	writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+	if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+		/* Setup YUV420 to HDMI-TX, no 10bit diphering */
+		writel_relaxed(2 | (2 << 2),
+			       priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+	else
+		/* Setup YUV444 to HDMI-TX, no 10bit diphering */
+		writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
 }
 
-static const struct drm_encoder_helper_funcs
-				meson_venc_hdmi_encoder_helper_funcs = {
-	.atomic_check	= meson_venc_hdmi_encoder_atomic_check,
-	.disable	= meson_venc_hdmi_encoder_disable,
-	.enable		= meson_venc_hdmi_encoder_enable,
-	.mode_set	= meson_venc_hdmi_encoder_mode_set,
+static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
+	.atomic_check = meson_venc_hdmi_encoder_atomic_check,
+	.enable	= meson_venc_hdmi_encoder_enable,
+	.disable = meson_venc_hdmi_encoder_disable,
+	.mode_set = meson_venc_hdmi_encoder_mode_set,
 };
 
 /* DW HDMI Regmap */
@@ -852,6 +947,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
 	struct drm_device *drm = data;
 	struct meson_drm *priv = drm->dev_private;
 	struct dw_hdmi_plat_data *dw_plat_data;
+	struct drm_bridge *next_bridge;
 	struct drm_encoder *encoder;
 	struct resource *res;
 	int irq;
@@ -953,8 +1049,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
 
 	/* Encoder */
 
-	drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
-
 	ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
 			       DRM_MODE_ENCODER_TMDS, "meson_hdmi");
 	if (ret) {
@@ -962,6 +1056,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
 		return ret;
 	}
 
+	meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
+	drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
+
 	encoder->possible_crtcs = BIT(0);
 
 	DRM_DEBUG_DRIVER("encoder initialized\n");
@@ -974,8 +1071,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
 	dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
 	dw_plat_data->phy_name = "meson_dw_hdmi_phy";
 	dw_plat_data->phy_data = meson_dw_hdmi;
-	dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
 	dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+	dw_plat_data->ycbcr_420_allowed = true;
 
 	if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
 	    dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
@@ -984,11 +1081,16 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
 
 	platform_set_drvdata(pdev, meson_dw_hdmi);
 
-	meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
-					   &meson_dw_hdmi->dw_plat_data);
+	meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
+					    &meson_dw_hdmi->dw_plat_data);
 	if (IS_ERR(meson_dw_hdmi->hdmi))
 		return PTR_ERR(meson_dw_hdmi->hdmi);
 
+	next_bridge = of_drm_find_bridge(pdev->dev.of_node);
+	if (next_bridge)
+		drm_bridge_attach(encoder, next_bridge,
+				  &meson_dw_hdmi->bridge, 0);
+
 	DRM_DEBUG_DRIVER("HDMI controller initialized\n");
 
 	return 0;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f690793..fdf26da 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -354,12 +354,17 @@ enum {
 /* 2970 /1 /1 /1 /5 /2  => /1 /1 */
 	MESON_VCLK_HDMI_297000,
 /* 5940 /1 /1 /2 /5 /1  => /1 /1 */
-	MESON_VCLK_HDMI_594000
+	MESON_VCLK_HDMI_594000,
+/* 2970 /1 /1 /1 /5 /1  => /1 /2 */
+	MESON_VCLK_HDMI_594000_YUV420,
 };
 
 struct meson_vclk_params {
+	unsigned int pll_freq;
+	unsigned int phy_freq;
+	unsigned int vclk_freq;
+	unsigned int venc_freq;
 	unsigned int pixel_freq;
-	unsigned int pll_base_freq;
 	unsigned int pll_od1;
 	unsigned int pll_od2;
 	unsigned int pll_od3;
@@ -367,8 +372,11 @@ struct meson_vclk_params {
 	unsigned int vclk_div;
 } params[] = {
 	[MESON_VCLK_HDMI_ENCI_54000] = {
+		.pll_freq = 4320000,
+		.phy_freq = 270000,
+		.vclk_freq = 54000,
+		.venc_freq = 54000,
 		.pixel_freq = 54000,
-		.pll_base_freq = 4320000,
 		.pll_od1 = 4,
 		.pll_od2 = 4,
 		.pll_od3 = 1,
@@ -376,8 +384,11 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_DDR_54000] = {
-		.pixel_freq = 54000,
-		.pll_base_freq = 4320000,
+		.pll_freq = 4320000,
+		.phy_freq = 270000,
+		.vclk_freq = 54000,
+		.venc_freq = 54000,
+		.pixel_freq = 27000,
 		.pll_od1 = 4,
 		.pll_od2 = 4,
 		.pll_od3 = 1,
@@ -385,8 +396,11 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_DDR_148500] = {
-		.pixel_freq = 148500,
-		.pll_base_freq = 2970000,
+		.pll_freq = 2970000,
+		.phy_freq = 742500,
+		.vclk_freq = 148500,
+		.venc_freq = 148500,
+		.pixel_freq = 74250,
 		.pll_od1 = 4,
 		.pll_od2 = 1,
 		.pll_od3 = 1,
@@ -394,8 +408,11 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_74250] = {
+		.pll_freq = 2970000,
+		.phy_freq = 742500,
+		.vclk_freq = 74250,
+		.venc_freq = 74250,
 		.pixel_freq = 74250,
-		.pll_base_freq = 2970000,
 		.pll_od1 = 2,
 		.pll_od2 = 2,
 		.pll_od3 = 2,
@@ -403,8 +420,11 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_148500] = {
+		.pll_freq = 2970000,
+		.phy_freq = 1485000,
+		.vclk_freq = 148500,
+		.venc_freq = 148500,
 		.pixel_freq = 148500,
-		.pll_base_freq = 2970000,
 		.pll_od1 = 1,
 		.pll_od2 = 2,
 		.pll_od3 = 2,
@@ -412,8 +432,11 @@ struct meson_vclk_params {
 		.vclk_div = 1,
 	},
 	[MESON_VCLK_HDMI_297000] = {
+		.pll_freq = 5940000,
+		.phy_freq = 2970000,
+		.venc_freq = 297000,
+		.vclk_freq = 297000,
 		.pixel_freq = 297000,
-		.pll_base_freq = 5940000,
 		.pll_od1 = 2,
 		.pll_od2 = 1,
 		.pll_od3 = 1,
@@ -421,14 +444,29 @@ struct meson_vclk_params {
 		.vclk_div = 2,
 	},
 	[MESON_VCLK_HDMI_594000] = {
+		.pll_freq = 5940000,
+		.phy_freq = 5940000,
+		.venc_freq = 594000,
+		.vclk_freq = 594000,
 		.pixel_freq = 594000,
-		.pll_base_freq = 5940000,
 		.pll_od1 = 1,
 		.pll_od2 = 1,
 		.pll_od3 = 2,
 		.vid_pll_div = VID_PLL_DIV_5,
 		.vclk_div = 1,
 	},
+	[MESON_VCLK_HDMI_594000_YUV420] = {
+		.pll_freq = 5940000,
+		.phy_freq = 2970000,
+		.venc_freq = 594000,
+		.vclk_freq = 594000,
+		.pixel_freq = 297000,
+		.pll_od1 = 2,
+		.pll_od2 = 1,
+		.pll_od3 = 1,
+		.vid_pll_div = VID_PLL_DIV_5,
+		.vclk_div = 1,
+	},
 	{ /* sentinel */ },
 };
 
@@ -701,6 +739,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
 	unsigned int od, m, frac, od1, od2, od3;
 
 	if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+		/* OD2 goes to the PHY, and needs to be *10, so keep OD3=1 */
 		od3 = 1;
 		if (od < 4) {
 			od1 = 2;
@@ -723,21 +762,28 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
 }
 
 enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq)
+meson_vclk_vic_supported_freq(unsigned int phy_freq,
+			      unsigned int vclk_freq)
 {
 	int i;
 
-	DRM_DEBUG_DRIVER("freq = %d\n", freq);
+	DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+			 phy_freq, vclk_freq);
 
 	for (i = 0 ; params[i].pixel_freq ; ++i) {
 		DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
 				 i, params[i].pixel_freq,
 				 FREQ_1000_1001(params[i].pixel_freq));
+		DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+				 i, params[i].phy_freq,
+				 FREQ_1000_1001(params[i].phy_freq/10)*10);
 		/* Match strict frequency */
-		if (freq == params[i].pixel_freq)
+		if (phy_freq == params[i].phy_freq &&
+		    vclk_freq == params[i].vclk_freq)
 			return MODE_OK;
 		/* Match 1000/1001 variant */
-		if (freq == FREQ_1000_1001(params[i].pixel_freq))
+		if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+		    vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
 			return MODE_OK;
 	}
 
@@ -965,8 +1011,9 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
 }
 
 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
-		      unsigned int vclk_freq, unsigned int venc_freq,
-		      unsigned int dac_freq, bool hdmi_use_enci)
+		      unsigned int phy_freq, unsigned int vclk_freq,
+		      unsigned int venc_freq, unsigned int dac_freq,
+		      bool hdmi_use_enci)
 {
 	bool vic_alternate_clock = false;
 	unsigned int freq;
@@ -986,7 +1033,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		 * - venc_div = 1
 		 * - encp encoder
 		 */
-		meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+		meson_vclk_set(priv, phy_freq, 0, 0, 0,
 			       VID_PLL_DIV_5, 2, 1, 1, false, false);
 		return;
 	}
@@ -1008,9 +1055,11 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 	}
 
 	for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
-		if (vclk_freq == params[freq].pixel_freq ||
-		    vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
-			if (vclk_freq != params[freq].pixel_freq)
+		if ((phy_freq == params[freq].phy_freq ||
+		     phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+		    (vclk_freq == params[freq].vclk_freq ||
+		     vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+			if (vclk_freq != params[freq].vclk_freq)
 				vic_alternate_clock = true;
 			else
 				vic_alternate_clock = false;
@@ -1039,7 +1088,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 		return;
 	}
 
-	meson_vclk_set(priv, params[freq].pll_base_freq,
+	meson_vclk_set(priv, params[freq].pll_freq,
 		       params[freq].pll_od1, params[freq].pll_od2,
 		       params[freq].pll_od3, params[freq].vid_pll_div,
 		       params[freq].vclk_div, hdmi_tx_div, venc_div,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index b621255..aed0ab2 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,10 +25,11 @@ enum {
 enum drm_mode_status
 meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
 enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq);
+meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
 
 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
-		      unsigned int vclk_freq, unsigned int venc_freq,
-		      unsigned int dac_freq, bool hdmi_use_enci);
+		      unsigned int phy_freq, unsigned int vclk_freq,
+		      unsigned int venc_freq, unsigned int dac_freq,
+		      bool hdmi_use_enci);
 
 #endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 4efd786..f93c725 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,7 +946,9 @@ bool meson_venc_hdmi_venc_repeat(int vic)
 EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
 
 void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
-			      struct drm_display_mode *mode)
+			      unsigned int ycrcb_map,
+			      bool yuv420_mode,
+			      const struct drm_display_mode *mode)
 {
 	union meson_hdmi_venc_mode *vmode = NULL;
 	union meson_hdmi_venc_mode vmode_dmt;
@@ -1528,14 +1530,14 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
 	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
 		reg |= VPU_HDMI_INV_VSYNC;
 
-	/* Output data format: CbYCr */
-	reg |= VPU_HDMI_OUTPUT_CBYCR;
+	/* Output data format */
+	reg |= ycrcb_map;
 
 	/*
 	 * Write rate to the async FIFO between VENC and HDMI.
 	 * One write every 2 wr_clk.
 	 */
-	if (venc_repeat)
+	if (venc_repeat || yuv420_mode)
 		reg |= VPU_HDMI_WR_RATE(2);
 
 	/*
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 576768b..9138255f 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,7 +60,9 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
 void meson_venci_cvbs_mode_set(struct meson_drm *priv,
 			       struct meson_cvbs_enci_mode *mode);
 void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
-			      struct drm_display_mode *mode);
+			      unsigned int ycrcb_map,
+			      bool yuv420_mode,
+			      const struct drm_display_mode *mode);
 unsigned int meson_venci_get_field(struct meson_drm *priv);
 
 void meson_venc_enable_vsync(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 1bd6b6d..541f9eb 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -213,8 +213,10 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
 		meson_venci_cvbs_mode_set(priv, meson_mode->enci);
 
 		/* Setup 27MHz vclk2 for ENCI and VDAC */
-		meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
-				 MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+		meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+				 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+				 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+				 true);
 	}
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index aa32aad..9691252 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -95,7 +95,6 @@
 #define MATROX_DPMS_CLEARED (-1)
 
 #define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
-#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
 #define to_mga_connector(x) container_of(x, struct mga_connector, base)
 
 struct mga_crtc {
@@ -110,12 +109,6 @@ struct mga_mode_info {
 	struct mga_crtc *crtc;
 };
 
-struct mga_encoder {
-	struct drm_encoder base;
-	int last_dpms;
-};
-
-
 struct mga_i2c_chan {
 	struct i2c_adapter adapter;
 	struct drm_device *dev;
@@ -185,6 +178,8 @@ struct mga_device {
 
 	/* SE model number stored in reg 0x1e24 */
 	u32 unique_rev_id;
+
+	struct drm_encoder encoder;
 };
 
 static inline enum mga_type
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 62a8e9c..d90e839 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "mgag200_drv.h"
 
@@ -1449,76 +1450,6 @@ static void mga_crtc_init(struct mga_device *mdev)
 	drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
 }
 
-/*
- * The encoder comes after the CRTC in the output pipeline, but before
- * the connector. It's responsible for ensuring that the digital
- * stream is appropriately converted into the output format. Setup is
- * very simple in this case - all we have to do is inform qemu of the
- * colour depth in order to ensure that it displays appropriately
- */
-
-/*
- * These functions are analagous to those in the CRTC code, but are intended
- * to handle any encoder-specific limitations
- */
-static void mga_encoder_mode_set(struct drm_encoder *encoder,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
-{
-	return;
-}
-
-static void mga_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
-	drm_encoder_cleanup(encoder);
-	kfree(mga_encoder);
-}
-
-static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
-	.dpms = mga_encoder_dpms,
-	.mode_set = mga_encoder_mode_set,
-	.prepare = mga_encoder_prepare,
-	.commit = mga_encoder_commit,
-};
-
-static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
-	.destroy = mga_encoder_destroy,
-};
-
-static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-	struct mga_encoder *mga_encoder;
-
-	mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
-	if (!mga_encoder)
-		return NULL;
-
-	encoder = &mga_encoder->base;
-	encoder->possible_crtcs = 0x1;
-
-	drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
-			 DRM_MODE_ENCODER_DAC, NULL);
-	drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
-
-	return encoder;
-}
-
-
 static int mga_vga_get_modes(struct drm_connector *connector)
 {
 	struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1686,8 +1617,9 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
 
 int mgag200_modeset_init(struct mga_device *mdev)
 {
-	struct drm_encoder *encoder;
+	struct drm_encoder *encoder = &mdev->encoder;
 	struct drm_connector *connector;
+	int ret;
 
 	mdev->mode_info.mode_config_initialized = true;
 
@@ -1698,11 +1630,15 @@ int mgag200_modeset_init(struct mga_device *mdev)
 
 	mga_crtc_init(mdev);
 
-	encoder = mga_encoder_init(mdev->dev);
-	if (!encoder) {
-		DRM_ERROR("mga_encoder_init failed\n");
-		return -1;
+	ret = drm_simple_encoder_init(mdev->dev, encoder,
+				      DRM_MODE_ENCODER_DAC);
+	if (ret) {
+		drm_err(mdev->dev,
+			"drm_simple_encoder_init() failed, error %d\n",
+			ret);
+		return ret;
 	}
+	encoder->possible_crtcs = 0x1;
 
 	connector = mga_vga_init(mdev->dev);
 	if (!connector) {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7d9e63e..724024a 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = {
 static void check_speed_bin(struct device *dev)
 {
 	struct nvmem_cell *cell;
-	u32 bin, val;
+	u32 val;
+
+	/*
+	 * If the OPP table specifies a opp-supported-hw property then we have
+	 * to set something with dev_pm_opp_set_supported_hw() or the table
+	 * doesn't get populated so pick an arbitrary value that should
+	 * ensure the default frequencies are selected but not conflict with any
+	 * actual bins
+	 */
+	val = 0x80;
 
 	cell = nvmem_cell_get(dev, "speed_bin");
 
-	/* If a nvmem cell isn't defined, nothing to do */
-	if (IS_ERR(cell))
-		return;
+	if (!IS_ERR(cell)) {
+		void *buf = nvmem_cell_read(cell, NULL);
 
-	bin = *((u32 *) nvmem_cell_read(cell, NULL));
-	nvmem_cell_put(cell);
+		if (!IS_ERR(buf)) {
+			u8 bin = *((u8 *) buf);
 
-	val = (1 << bin);
+			val = (1 << bin);
+			kfree(buf);
+		}
+
+		nvmem_cell_put(cell);
+	}
 
 	dev_pm_opp_set_supported_hw(dev, &val, 1);
 }
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 748cd37..c4e71ab 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
 
 #include <linux/clk.h>
+#include <linux/dma-mapping.h>
 #include <linux/interconnect.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
@@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
 
 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
 {
-	int count, i;
-	u64 iova;
-
 	if (IS_ERR_OR_NULL(bo))
 		return;
 
-	count = bo->size >> PAGE_SHIFT;
-	iova = bo->iova;
-
-	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
-		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
-		__free_pages(bo->pages[i], 0);
-	}
-
-	kfree(bo->pages);
+	dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
 	kfree(bo);
 }
 
@@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 		size_t size)
 {
 	struct a6xx_gmu_bo *bo;
-	int ret, count, i;
 
 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 	if (!bo)
@@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 
 	bo->size = PAGE_ALIGN(size);
 
-	count = bo->size >> PAGE_SHIFT;
+	bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
 
-	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
-	if (!bo->pages) {
+	if (!bo->virt) {
 		kfree(bo);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	for (i = 0; i < count; i++) {
-		bo->pages[i] = alloc_page(GFP_KERNEL);
-		if (!bo->pages[i])
-			goto err;
-	}
-
-	bo->iova = gmu->uncached_iova_base;
-
-	for (i = 0; i < count; i++) {
-		ret = iommu_map(gmu->domain,
-			bo->iova + (PAGE_SIZE * i),
-			page_to_phys(bo->pages[i]), PAGE_SIZE,
-			IOMMU_READ | IOMMU_WRITE);
-
-		if (ret) {
-			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
-
-			for (i = i - 1 ; i >= 0; i--)
-				iommu_unmap(gmu->domain,
-					bo->iova + (PAGE_SIZE * i),
-					PAGE_SIZE);
-
-			goto err;
-		}
-	}
-
-	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
-		pgprot_writecombine(PAGE_KERNEL));
-	if (!bo->virt)
-		goto err;
-
-	/* Align future IOVA addresses on 1MB boundaries */
-	gmu->uncached_iova_base += ALIGN(size, SZ_1M);
-
 	return bo;
-
-err:
-	for (i = 0; i < count; i++) {
-		if (bo->pages[i])
-			__free_pages(bo->pages[i], 0);
-	}
-
-	kfree(bo->pages);
-	kfree(bo);
-
-	return ERR_PTR(-ENOMEM);
-}
-
-static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
-{
-	int ret;
-
-	/*
-	 * The GMU address space is hardcoded to treat the range
-	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
-	 * between the GMU and the CPU will live in this space
-	 */
-	gmu->uncached_iova_base = 0x60000000;
-
-
-	gmu->domain = iommu_domain_alloc(&platform_bus_type);
-	if (!gmu->domain)
-		return -ENODEV;
-
-	ret = iommu_attach_device(gmu->domain, gmu->dev);
-
-	if (ret) {
-		iommu_domain_free(gmu->domain);
-		gmu->domain = NULL;
-	}
-
-	return ret;
 }
 
 /* Return the 'arc-level' for the given frequency */
@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
 
 	a6xx_gmu_memory_free(gmu, gmu->hfi);
 
-	iommu_detach_device(gmu->domain, gmu->dev);
-
-	iommu_domain_free(gmu->domain);
-
 	free_irq(gmu->gmu_irq, gmu);
 	free_irq(gmu->hfi_irq, gmu);
 
@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 
 	gmu->dev = &pdev->dev;
 
-	of_dma_configure(gmu->dev, node, true);
+	/* Pass force_dma false to require the DT to set the dma region */
+	ret = of_dma_configure(gmu->dev, node, false);
+	if (ret)
+		return ret;
+
+	/* Set the mask after the of_dma_configure() */
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
+	if (ret)
+		return ret;
 
 	/* Fow now, don't do anything fancy until we get our feet under us */
 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 	if (ret)
 		goto err_put_device;
 
-	/* Set up the IOMMU context bank */
-	ret = a6xx_gmu_memory_probe(gmu);
-	if (ret)
-		goto err_put_device;
-
 	/* Allocate memory for for the HFI queues */
 	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
 	if (IS_ERR(gmu->hfi))
@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 err_memory:
 	a6xx_gmu_memory_free(gmu, gmu->hfi);
 
-	if (gmu->domain) {
-		iommu_detach_device(gmu->domain, gmu->dev);
-
-		iommu_domain_free(gmu->domain);
-	}
 	ret = -ENODEV;
 
 err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 2af91ed..4af65a3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -12,8 +12,7 @@
 struct a6xx_gmu_bo {
 	void *virt;
 	size_t size;
-	u64 iova;
-	struct page **pages;
+	dma_addr_t iova;
 };
 
 /*
@@ -49,9 +48,6 @@ struct a6xx_gmu {
 	int hfi_irq;
 	int gmu_irq;
 
-	struct iommu_domain *domain;
-	u64 uncached_iova_base;
-
 	struct device *gxpd;
 
 	int idle_level;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index e67c20c..24c974c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers {
 };
 
 static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
-	"CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+	"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
 		REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
 };
 
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7fd2982..1d5c43c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
 		return NULL;
 
 	for (i = 0; i < l; i++)
-		buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+		buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
 				ascii85_encode(src[i], out));
 
 	return buf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index bf51341..1744850 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1272,6 +1272,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
 	.atomic_destroy_state = dpu_crtc_destroy_state,
 	.late_register = dpu_crtc_late_register,
 	.early_unregister = dpu_crtc_early_unregister,
+	.enable_vblank  = msm_crtc_enable_vblank,
+	.disable_vblank = msm_crtc_disable_vblank,
 };
 
 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f8ac3bf..a1b79ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
  *				clks and resources after IDLE_TIMEOUT time.
  * @vsync_event_work:		worker to handle vsync event for autorefresh
  * @topology:                   topology of the display
- * @mode_set_complete:          flag to indicate modeset completion
  * @idle_timeout:		idle timeout duration in milliseconds
  */
 struct dpu_encoder_virt {
@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
 	struct delayed_work delayed_off_work;
 	struct kthread_work vsync_event_work;
 	struct msm_display_topology topology;
-	bool mode_set_complete;
 
 	u32 idle_timeout;
 };
@@ -461,7 +459,7 @@ void dpu_encoder_helper_split_config(
 	struct msm_display_info *disp_info;
 
 	if (!phys_enc->hw_mdptop || !phys_enc->parent) {
-		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
 		return;
 	}
 
@@ -512,7 +510,6 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
 		if (cur_mode->vdisplay == adj_mode->vdisplay &&
 		    cur_mode->hdisplay == adj_mode->hdisplay &&
 		    drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
-			adj_mode->private = cur_mode->private;
 			adj_mode->private_flags |= cur_mode->private_flags;
 		}
 	}
@@ -563,12 +560,13 @@ static int dpu_encoder_virt_atomic_check(
 	const struct drm_display_mode *mode;
 	struct drm_display_mode *adj_mode;
 	struct msm_display_topology topology;
+	struct dpu_global_state *global_state;
 	int i = 0;
 	int ret = 0;
 
 	if (!drm_enc || !crtc_state || !conn_state) {
 		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
-				drm_enc != 0, crtc_state != 0, conn_state != 0);
+				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
 		return -EINVAL;
 	}
 
@@ -579,6 +577,7 @@ static int dpu_encoder_virt_atomic_check(
 	dpu_kms = to_dpu_kms(priv->kms);
 	mode = &crtc_state->mode;
 	adj_mode = &crtc_state->adjusted_mode;
+	global_state = dpu_kms_get_existing_global_state(dpu_kms);
 	trace_dpu_enc_atomic_check(DRMID(drm_enc));
 
 	/*
@@ -610,17 +609,15 @@ static int dpu_encoder_virt_atomic_check(
 
 	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
 
-	/* Reserve dynamic resources now. Indicating AtomicTest phase */
+	/* Reserve dynamic resources now. */
 	if (!ret) {
 		/*
 		 * Avoid reserving resources when mode set is pending. Topology
 		 * info may not be available to complete reservation.
 		 */
-		if (drm_atomic_crtc_needs_modeset(crtc_state)
-				&& dpu_enc->mode_set_complete) {
-			ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
-					     topology, true);
-			dpu_enc->mode_set_complete = false;
+		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+			ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+					drm_enc, crtc_state, topology);
 		}
 	}
 
@@ -957,12 +954,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	struct drm_connector *conn = NULL, *conn_iter;
 	struct drm_crtc *drm_crtc;
 	struct dpu_crtc_state *cstate;
-	struct dpu_rm_hw_iter hw_iter;
+	struct dpu_global_state *global_state;
 	struct msm_display_topology topology;
-	struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
-	struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
-	int num_lm = 0, num_ctl = 0;
-	int i, j, ret;
+	struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+	struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+	struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+	int num_lm, num_ctl, num_pp;
+	int i, j;
 
 	if (!drm_enc) {
 		DPU_ERROR("invalid encoder\n");
@@ -976,6 +974,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	dpu_kms = to_dpu_kms(priv->kms);
 	connector_list = &dpu_kms->dev->mode_config.connector_list;
 
+	global_state = dpu_kms_get_existing_global_state(dpu_kms);
+	if (IS_ERR_OR_NULL(global_state)) {
+		DPU_ERROR("Failed to get global state");
+		return;
+	}
+
 	trace_dpu_enc_mode_set(DRMID(drm_enc));
 
 	list_for_each_entry(conn_iter, connector_list, head)
@@ -996,77 +1000,57 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 
 	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
 
-	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
-	ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
-			     topology, false);
-	if (ret) {
-		DPU_ERROR_ENC(dpu_enc,
-				"failed to reserve hw resources, %d\n", ret);
-		return;
-	}
+	/* Query resource that have been reserved in atomic check step. */
+	num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+		drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
+		ARRAY_SIZE(hw_pp));
+	num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+		drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+	num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+		drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
 
-	dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		dpu_enc->hw_pp[i] = NULL;
-		if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
-			break;
-		dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
-	}
-
-	dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
-			break;
-		hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
-		num_ctl++;
-	}
-
-	dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
-		if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
-			break;
-		hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
-		num_lm++;
-	}
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+		dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+						: NULL;
 
 	cstate = to_dpu_crtc_state(drm_crtc->state);
 
 	for (i = 0; i < num_lm; i++) {
 		int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
 
-		cstate->mixers[i].hw_lm = hw_lm[i];
-		cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+		cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+		cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
 	}
 
 	cstate->num_mixers = num_lm;
 
 	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		int num_blk;
+		struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
 		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
 
 		if (!dpu_enc->hw_pp[i]) {
 			DPU_ERROR_ENC(dpu_enc,
 				"no pp block assigned at idx: %d\n", i);
-			goto error;
+			return;
 		}
 
 		if (!hw_ctl[i]) {
 			DPU_ERROR_ENC(dpu_enc,
 				"no ctl block assigned at idx: %d\n", i);
-			goto error;
+			return;
 		}
 
 		phys->hw_pp = dpu_enc->hw_pp[i];
-		phys->hw_ctl = hw_ctl[i];
+		phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
 
-		dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
-				    DPU_HW_BLK_INTF);
-		for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+		num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
+			global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
+			hw_blk, ARRAY_SIZE(hw_blk));
+		for (j = 0; j < num_blk; j++) {
 			struct dpu_hw_intf *hw_intf;
 
-			if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
-				break;
-
-			hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+			hw_intf = to_dpu_hw_intf(hw_blk[i]);
 			if (hw_intf->idx == phys->intf_idx)
 				phys->hw_intf = hw_intf;
 		}
@@ -1074,18 +1058,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 		if (!phys->hw_intf) {
 			DPU_ERROR_ENC(dpu_enc,
 				      "no intf block assigned at idx: %d\n", i);
-				goto error;
+			return;
 		}
 
 		phys->connector = conn->state->connector;
 		if (phys->ops.mode_set)
 			phys->ops.mode_set(phys, mode, adj_mode);
 	}
-
-	dpu_enc->mode_set_complete = true;
-
-error:
-	dpu_rm_release(&dpu_kms->rm, drm_enc);
 }
 
 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1182,6 +1161,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 	struct dpu_encoder_virt *dpu_enc = NULL;
 	struct msm_drm_private *priv;
 	struct dpu_kms *dpu_kms;
+	struct dpu_global_state *global_state;
 	int i = 0;
 
 	if (!drm_enc) {
@@ -1200,6 +1180,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 
 	priv = drm_enc->dev->dev_private;
 	dpu_kms = to_dpu_kms(priv->kms);
+	global_state = dpu_kms_get_existing_global_state(dpu_kms);
 
 	trace_dpu_enc_disable(DRMID(drm_enc));
 
@@ -1229,7 +1210,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 
 	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
 
-	dpu_rm_release(&dpu_kms->rm, drm_enc);
+	dpu_rm_release(global_state, drm_enc);
 
 	mutex_unlock(&dpu_enc->enc_lock);
 }
@@ -1965,7 +1946,7 @@ static int dpu_encoder_virt_add_phys_encs(
 		if (IS_ERR_OR_NULL(enc)) {
 			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
 				PTR_ERR(enc));
-			return enc == 0 ? -EINVAL : PTR_ERR(enc);
+			return enc == NULL ? -EINVAL : PTR_ERR(enc);
 		}
 
 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -1978,7 +1959,7 @@ static int dpu_encoder_virt_add_phys_encs(
 		if (IS_ERR_OR_NULL(enc)) {
 			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
 				PTR_ERR(enc));
-			return enc == 0 ? -EINVAL : PTR_ERR(enc);
+			return enc == NULL ? -EINVAL : PTR_ERR(enc);
 		}
 
 		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
@@ -2009,7 +1990,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
 	struct dpu_enc_phys_init_params phys_params;
 
 	if (!dpu_enc) {
-		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0);
+		DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 39e1e28..8493d68 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config(
 		to_dpu_encoder_phys_cmd(phys_enc);
 
 	if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
-		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
 		return;
 	}
 
@@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
 	u32 flush_mask = 0;
 
 	if (!phys_enc->hw_pp) {
-		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index c71c18d..b5a4905 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
 	struct dpu_hw_intf_cfg intf_cfg = { 0 };
 
 	if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
-		DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+		DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
 		return;
 	}
 
@@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
 
 	if (!phys_enc->hw_intf) {
 		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
-				phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+				phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 8546898..0ead64d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -90,6 +90,16 @@ struct dpu_hw_intf {
 };
 
 /**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
  * dpu_hw_intf_init(): Initializes the intf driver for the passed
  * interface idx.
  * @idx:  interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3d6f46b..d73cb73 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -97,6 +97,16 @@ struct dpu_hw_pingpong {
 };
 
 /**
+ * to_dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
  * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
  *	pingpong idx.
  * @idx:  Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index cb08faf..ce19f1d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
 {
 	struct dpu_debugfs_regset32 *regset = s->private;
 	struct dpu_kms *dpu_kms = regset->dpu_kms;
-	struct drm_device *dev;
-	struct msm_drm_private *priv;
 	void __iomem *base;
 	uint32_t i, addr;
 
 	if (!dpu_kms->mmio)
 		return 0;
 
-	dev = dpu_kms->dev;
-	priv = dev->dev_private;
 	base = dpu_kms->mmio + regset->offset;
 
 	/* insert padding spaces, if needed */
@@ -228,6 +224,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 }
 #endif
 
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct dpu_global_state *
+dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
+{
+	return to_dpu_global_state(dpu_kms->global_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
+{
+	struct msm_drm_private *priv = s->dev->dev_private;
+	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+	struct drm_private_state *priv_state;
+	int ret;
+
+	ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
+	if (ret)
+		return ERR_PTR(ret);
+
+	priv_state = drm_atomic_get_private_obj_state(s,
+						&dpu_kms->global_state);
+	if (IS_ERR(priv_state))
+		return ERR_CAST(priv_state);
+
+	return to_dpu_global_state(priv_state);
+}
+
+static struct drm_private_state *
+dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
+{
+	struct dpu_global_state *state;
+
+	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+	return &state->base;
+}
+
+static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
+				      struct drm_private_state *state)
+{
+	struct dpu_global_state *dpu_state = to_dpu_global_state(state);
+
+	kfree(dpu_state);
+}
+
+static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
+	.atomic_duplicate_state = dpu_kms_global_duplicate_state,
+	.atomic_destroy_state = dpu_kms_global_destroy_state,
+};
+
+static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
+{
+	struct dpu_global_state *state;
+
+	drm_modeset_lock_init(&dpu_kms->global_state_lock);
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
+				    &state->base,
+				    &dpu_kms_global_state_funcs);
+	return 0;
+}
+
 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
 	return dpu_crtc_vblank(crtc, true);
@@ -267,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
 static void dpu_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
-	struct dpu_kms *dpu_kms;
-	struct drm_device *dev;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	struct drm_encoder *encoder;
@@ -276,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
 
 	if (!kms)
 		return;
-	dpu_kms = to_dpu_kms(kms);
-	dev = dpu_kms->dev;
 
 	/* Call prepare_commit for all affected encoders */
 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
@@ -552,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
 
 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
 {
-	struct drm_device *dev;
 	int i;
 
-	dev = dpu_kms->dev;
-
 	if (dpu_kms->hw_intr)
 		dpu_hw_intr_destroy(dpu_kms->hw_intr);
 	dpu_kms->hw_intr = NULL;
@@ -760,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 {
 	struct dpu_kms *dpu_kms;
 	struct drm_device *dev;
-	struct msm_drm_private *priv;
 	int i, rc = -EINVAL;
 
 	if (!kms) {
@@ -770,7 +837,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
 	dpu_kms = to_dpu_kms(kms);
 	dev = dpu_kms->dev;
-	priv = dev->dev_private;
+
+	rc = dpu_kms_global_obj_init(dpu_kms);
+	if (rc)
+		return rc;
 
 	atomic_set(&dpu_kms->bandwidth_ref, 0);
 
@@ -1018,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
 	int rc = -1;
 	struct platform_device *pdev = to_platform_device(dev);
 	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
-	struct drm_device *ddev;
 	struct dss_module_power *mp = &dpu_kms->mp;
 
-	ddev = dpu_kms->dev;
 	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
 	if (rc)
 		DPU_ERROR("clock disable failed rc:%d\n", rc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index c6169e7..211f5de9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -111,6 +111,13 @@ struct dpu_kms {
 
 	struct dpu_core_perf perf;
 
+	/*
+	 * Global private object state, Do not access directly, use
+	 * dpu_kms_global_get_state()
+	 */
+	struct drm_modeset_lock global_state_lock;
+	struct drm_private_obj global_state;
+
 	struct dpu_rm rm;
 	bool rm_init;
 
@@ -139,6 +146,25 @@ struct vsync_info {
 
 #define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
 
+#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+struct dpu_global_state {
+	struct drm_private_state base;
+
+	uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
+	uint32_t mixer_to_enc_id[LM_MAX - LM_0];
+	uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
+	uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+};
+
+struct dpu_global_state
+	*dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
+struct dpu_global_state
+	*__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
+
 /**
  * Debugfs functions - extra helper functions for debugfs support
  *
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 23f5b14..9b62451 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -12,8 +12,12 @@
 #include "dpu_encoder.h"
 #include "dpu_trace.h"
 
-#define RESERVED_BY_OTHER(h, r)  \
-		((h)->enc_id && (h)->enc_id != r)
+
+static inline bool reserved_by_other(uint32_t *res_map, int idx,
+				     uint32_t enc_id)
+{
+	return res_map[idx] && res_map[idx] != enc_id;
+}
 
 /**
  * struct dpu_rm_requirements - Reservation requirements parameter bundle
@@ -25,171 +29,43 @@ struct dpu_rm_requirements {
 	struct dpu_encoder_hw_resources hw_res;
 };
 
-
-/**
- * struct dpu_rm_hw_blk - hardware block tracking list member
- * @list:	List head for list of all hardware blocks tracking items
- * @id:		Hardware ID number, within it's own space, ie. LM_X
- * @enc_id:	Encoder id to which this blk is binded
- * @hw:		Pointer to the hardware register access object for this block
- */
-struct dpu_rm_hw_blk {
-	struct list_head list;
-	uint32_t id;
-	uint32_t enc_id;
-	struct dpu_hw_blk *hw;
-};
-
-void dpu_rm_init_hw_iter(
-		struct dpu_rm_hw_iter *iter,
-		uint32_t enc_id,
-		enum dpu_hw_blk_type type)
-{
-	memset(iter, 0, sizeof(*iter));
-	iter->enc_id = enc_id;
-	iter->type = type;
-}
-
-static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
-	struct list_head *blk_list;
-
-	if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
-		DPU_ERROR("invalid rm\n");
-		return false;
-	}
-
-	i->hw = NULL;
-	blk_list = &rm->hw_blks[i->type];
-
-	if (i->blk && (&i->blk->list == blk_list)) {
-		DPU_DEBUG("attempt resume iteration past last\n");
-		return false;
-	}
-
-	i->blk = list_prepare_entry(i->blk, blk_list, list);
-
-	list_for_each_entry_continue(i->blk, blk_list, list) {
-		if (i->enc_id == i->blk->enc_id) {
-			i->hw = i->blk->hw;
-			DPU_DEBUG("found type %d id %d for enc %d\n",
-					i->type, i->blk->id, i->enc_id);
-			return true;
-		}
-	}
-
-	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
-
-	return false;
-}
-
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
-	bool ret;
-
-	mutex_lock(&rm->rm_lock);
-	ret = _dpu_rm_get_hw_locked(rm, i);
-	mutex_unlock(&rm->rm_lock);
-
-	return ret;
-}
-
-static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
-{
-	switch (type) {
-	case DPU_HW_BLK_LM:
-		dpu_hw_lm_destroy(hw);
-		break;
-	case DPU_HW_BLK_CTL:
-		dpu_hw_ctl_destroy(hw);
-		break;
-	case DPU_HW_BLK_PINGPONG:
-		dpu_hw_pingpong_destroy(hw);
-		break;
-	case DPU_HW_BLK_INTF:
-		dpu_hw_intf_destroy(hw);
-		break;
-	case DPU_HW_BLK_SSPP:
-		/* SSPPs are not managed by the resource manager */
-	case DPU_HW_BLK_TOP:
-		/* Top is a singleton, not managed in hw_blks list */
-	case DPU_HW_BLK_MAX:
-	default:
-		DPU_ERROR("unsupported block type %d\n", type);
-		break;
-	}
-}
-
 int dpu_rm_destroy(struct dpu_rm *rm)
 {
-	struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
-	enum dpu_hw_blk_type type;
+	int i;
 
-	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
-		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
-				list) {
-			list_del(&hw_cur->list);
-			_dpu_rm_hw_destroy(type, hw_cur->hw);
-			kfree(hw_cur);
+	for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
+		struct dpu_hw_pingpong *hw;
+
+		if (rm->pingpong_blks[i]) {
+			hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
+			dpu_hw_pingpong_destroy(hw);
 		}
 	}
+	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
+		struct dpu_hw_mixer *hw;
 
-	mutex_destroy(&rm->rm_lock);
-
-	return 0;
-}
-
-static int _dpu_rm_hw_blk_create(
-		struct dpu_rm *rm,
-		const struct dpu_mdss_cfg *cat,
-		void __iomem *mmio,
-		enum dpu_hw_blk_type type,
-		uint32_t id,
-		const void *hw_catalog_info)
-{
-	struct dpu_rm_hw_blk *blk;
-	void *hw;
-
-	switch (type) {
-	case DPU_HW_BLK_LM:
-		hw = dpu_hw_lm_init(id, mmio, cat);
-		break;
-	case DPU_HW_BLK_CTL:
-		hw = dpu_hw_ctl_init(id, mmio, cat);
-		break;
-	case DPU_HW_BLK_PINGPONG:
-		hw = dpu_hw_pingpong_init(id, mmio, cat);
-		break;
-	case DPU_HW_BLK_INTF:
-		hw = dpu_hw_intf_init(id, mmio, cat);
-		break;
-	case DPU_HW_BLK_SSPP:
-		/* SSPPs are not managed by the resource manager */
-	case DPU_HW_BLK_TOP:
-		/* Top is a singleton, not managed in hw_blks list */
-	case DPU_HW_BLK_MAX:
-	default:
-		DPU_ERROR("unsupported block type %d\n", type);
-		return -EINVAL;
+		if (rm->mixer_blks[i]) {
+			hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
+			dpu_hw_lm_destroy(hw);
+		}
 	}
+	for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
+		struct dpu_hw_ctl *hw;
 
-	if (IS_ERR_OR_NULL(hw)) {
-		DPU_ERROR("failed hw object creation: type %d, err %ld\n",
-				type, PTR_ERR(hw));
-		return -EFAULT;
+		if (rm->ctl_blks[i]) {
+			hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
+			dpu_hw_ctl_destroy(hw);
+		}
 	}
+	for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) {
+		struct dpu_hw_intf *hw;
 
-	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
-	if (!blk) {
-		_dpu_rm_hw_destroy(type, hw);
-		return -ENOMEM;
+		if (rm->intf_blks[i]) {
+			hw = to_dpu_hw_intf(rm->intf_blks[i]);
+			dpu_hw_intf_destroy(hw);
+		}
 	}
 
-	blk->id = id;
-	blk->hw = hw;
-	blk->enc_id = 0;
-	list_add_tail(&blk->list, &rm->hw_blks[type]);
-
 	return 0;
 }
 
@@ -198,7 +74,6 @@ int dpu_rm_init(struct dpu_rm *rm,
 		void __iomem *mmio)
 {
 	int rc, i;
-	enum dpu_hw_blk_type type;
 
 	if (!rm || !cat || !mmio) {
 		DPU_ERROR("invalid kms\n");
@@ -208,13 +83,9 @@ int dpu_rm_init(struct dpu_rm *rm,
 	/* Clear, setup lists */
 	memset(rm, 0, sizeof(*rm));
 
-	mutex_init(&rm->rm_lock);
-
-	for (type = 0; type < DPU_HW_BLK_MAX; type++)
-		INIT_LIST_HEAD(&rm->hw_blks[type]);
-
 	/* Interrogate HW catalog and create tracking items for hw blocks */
 	for (i = 0; i < cat->mixer_count; i++) {
+		struct dpu_hw_mixer *hw;
 		const struct dpu_lm_cfg *lm = &cat->mixer[i];
 
 		if (lm->pingpong == PINGPONG_MAX) {
@@ -222,12 +93,17 @@ int dpu_rm_init(struct dpu_rm *rm,
 			continue;
 		}
 
-		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
-				cat->mixer[i].id, &cat->mixer[i]);
-		if (rc) {
-			DPU_ERROR("failed: lm hw not available\n");
+		if (lm->id < LM_0 || lm->id >= LM_MAX) {
+			DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
+			continue;
+		}
+		hw = dpu_hw_lm_init(lm->id, mmio, cat);
+		if (IS_ERR_OR_NULL(hw)) {
+			rc = PTR_ERR(hw);
+			DPU_ERROR("failed lm object creation: err %d\n", rc);
 			goto fail;
 		}
+		rm->mixer_blks[lm->id - LM_0] = &hw->base;
 
 		if (!rm->lm_max_width) {
 			rm->lm_max_width = lm->sblk->maxwidth;
@@ -243,35 +119,59 @@ int dpu_rm_init(struct dpu_rm *rm,
 	}
 
 	for (i = 0; i < cat->pingpong_count; i++) {
-		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
-				cat->pingpong[i].id, &cat->pingpong[i]);
-		if (rc) {
-			DPU_ERROR("failed: pp hw not available\n");
+		struct dpu_hw_pingpong *hw;
+		const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
+
+		if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
+			DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
+			continue;
+		}
+		hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
+		if (IS_ERR_OR_NULL(hw)) {
+			rc = PTR_ERR(hw);
+			DPU_ERROR("failed pingpong object creation: err %d\n",
+				rc);
 			goto fail;
 		}
+		rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
 	}
 
 	for (i = 0; i < cat->intf_count; i++) {
-		if (cat->intf[i].type == INTF_NONE) {
+		struct dpu_hw_intf *hw;
+		const struct dpu_intf_cfg *intf = &cat->intf[i];
+
+		if (intf->type == INTF_NONE) {
 			DPU_DEBUG("skip intf %d with type none\n", i);
 			continue;
 		}
-
-		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
-				cat->intf[i].id, &cat->intf[i]);
-		if (rc) {
-			DPU_ERROR("failed: intf hw not available\n");
+		if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
+			DPU_ERROR("skip intf %d with invalid id\n", intf->id);
+			continue;
+		}
+		hw = dpu_hw_intf_init(intf->id, mmio, cat);
+		if (IS_ERR_OR_NULL(hw)) {
+			rc = PTR_ERR(hw);
+			DPU_ERROR("failed intf object creation: err %d\n", rc);
 			goto fail;
 		}
+		rm->intf_blks[intf->id - INTF_0] = &hw->base;
 	}
 
 	for (i = 0; i < cat->ctl_count; i++) {
-		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
-				cat->ctl[i].id, &cat->ctl[i]);
-		if (rc) {
-			DPU_ERROR("failed: ctl hw not available\n");
+		struct dpu_hw_ctl *hw;
+		const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
+
+		if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
+			DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
+			continue;
+		}
+		hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
+		if (IS_ERR_OR_NULL(hw)) {
+			rc = PTR_ERR(hw);
+			DPU_ERROR("failed ctl object creation: err %d\n", rc);
 			goto fail;
 		}
+		rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
 	}
 
 	return 0;
@@ -279,7 +179,7 @@ int dpu_rm_init(struct dpu_rm *rm,
 fail:
 	dpu_rm_destroy(rm);
 
-	return rc;
+	return rc ? rc : -EFAULT;
 }
 
 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
@@ -288,85 +188,81 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
 }
 
 /**
+ * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
+ * @rm: dpu resource manager handle
+ * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ * @peer_idx: index of other mixer in rm->mixer_blks[]
+ * @Return: true if rm->mixer_blks[peer_idx] is a peer of
+ *          rm->mixer_blks[primary_idx]
+ */
+static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
+		int peer_idx)
+{
+	const struct dpu_lm_cfg *prim_lm_cfg;
+	const struct dpu_lm_cfg *peer_cfg;
+
+	prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
+	peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
+
+	if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+		DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
+				peer_cfg->id);
+		return false;
+	}
+	return true;
+}
+
+/**
  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
  *	proposed use case requirements, incl. hardwired dependent blocks like
  *	pingpong
  * @rm: dpu resource manager handle
  * @enc_id: encoder id requesting for allocation
- * @reqs: proposed use case requirements
- * @lm: proposed layer mixer, function checks if lm, and all other hardwired
- *      blocks connected to the lm (pp) is available and appropriate
- * @pp: output parameter, pingpong block attached to the layer mixer.
- *      NULL if pp was not available, or not matching requirements.
- * @primary_lm: if non-null, this function check if lm is compatible primary_lm
- *              as well as satisfying all other requirements
+ * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
+ *      if lm, and all other hardwired blocks connected to the lm (pp) is
+ *      available and appropriate
+ * @pp_idx: output parameter, index of pingpong block attached to the layer
+ *      mixer in rm->pongpong_blks[].
  * @Return: true if lm matches all requirements, false otherwise
  */
-static bool _dpu_rm_check_lm_and_get_connected_blks(
-		struct dpu_rm *rm,
-		uint32_t enc_id,
-		struct dpu_rm_requirements *reqs,
-		struct dpu_rm_hw_blk *lm,
-		struct dpu_rm_hw_blk **pp,
-		struct dpu_rm_hw_blk *primary_lm)
+static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
+		uint32_t enc_id, int lm_idx, int *pp_idx)
 {
-	const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
-	struct dpu_rm_hw_iter iter;
-
-	*pp = NULL;
-
-	DPU_DEBUG("check lm %d pp %d\n",
-			   lm_cfg->id, lm_cfg->pingpong);
-
-	/* Check if this layer mixer is a peer of the proposed primary LM */
-	if (primary_lm) {
-		const struct dpu_lm_cfg *prim_lm_cfg =
-				to_dpu_hw_mixer(primary_lm->hw)->cap;
-
-		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
-			DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
-					prim_lm_cfg->id);
-			return false;
-		}
-	}
+	const struct dpu_lm_cfg *lm_cfg;
+	int idx;
 
 	/* Already reserved? */
-	if (RESERVED_BY_OTHER(lm, enc_id)) {
-		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+	if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+		DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
 		return false;
 	}
 
-	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
-	while (_dpu_rm_get_hw_locked(rm, &iter)) {
-		if (iter.blk->id == lm_cfg->pingpong) {
-			*pp = iter.blk;
-			break;
-		}
-	}
-
-	if (!*pp) {
+	lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
+	idx = lm_cfg->pingpong - PINGPONG_0;
+	if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
 		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
 		return false;
 	}
 
-	if (RESERVED_BY_OTHER(*pp, enc_id)) {
-		DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
-				(*pp)->id);
+	if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+		DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
+				lm_cfg->pingpong);
 		return false;
 	}
-
+	*pp_idx = idx;
 	return true;
 }
 
-static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+			       struct dpu_global_state *global_state,
+			       uint32_t enc_id,
 			       struct dpu_rm_requirements *reqs)
 
 {
-	struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
-	struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
-	struct dpu_rm_hw_iter iter_i, iter_j;
-	int lm_count = 0;
-	int i, rc = 0;
+	int lm_idx[MAX_BLOCKS];
+	int pp_idx[MAX_BLOCKS];
+	int i, j, lm_count = 0;
 
 	if (!reqs->topology.num_lm) {
 		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
@@ -374,36 +270,40 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
 	}
 
 	/* Find a primary mixer */
-	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
-	while (lm_count != reqs->topology.num_lm &&
-			_dpu_rm_get_hw_locked(rm, &iter_i)) {
-		memset(&lm, 0, sizeof(lm));
-		memset(&pp, 0, sizeof(pp));
+	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+			lm_count < reqs->topology.num_lm; i++) {
+		if (!rm->mixer_blks[i])
+			continue;
 
 		lm_count = 0;
-		lm[lm_count] = iter_i.blk;
+		lm_idx[lm_count] = i;
 
-		if (!_dpu_rm_check_lm_and_get_connected_blks(
-				rm, enc_id, reqs, lm[lm_count],
-				&pp[lm_count], NULL))
+		if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
+				enc_id, i, &pp_idx[lm_count])) {
 			continue;
+		}
 
 		++lm_count;
 
 		/* Valid primary mixer found, find matching peers */
-		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
-
-		while (lm_count != reqs->topology.num_lm &&
-				_dpu_rm_get_hw_locked(rm, &iter_j)) {
-			if (iter_i.blk == iter_j.blk)
+		for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
+				lm_count < reqs->topology.num_lm; j++) {
+			if (!rm->mixer_blks[j])
 				continue;
 
-			if (!_dpu_rm_check_lm_and_get_connected_blks(
-					rm, enc_id, reqs, iter_j.blk,
-					&pp[lm_count], iter_i.blk))
+			if (!_dpu_rm_check_lm_peer(rm, i, j)) {
+				DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
+						LM_0 + i);
 				continue;
+			}
 
-			lm[lm_count] = iter_j.blk;
+			if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
+					global_state, enc_id, j,
+					&pp_idx[lm_count])) {
+				continue;
+			}
+
+			lm_idx[lm_count] = j;
 			++lm_count;
 		}
 	}
@@ -413,65 +313,65 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
 		return -ENAVAIL;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(lm); i++) {
-		if (!lm[i])
-			break;
+	for (i = 0; i < lm_count; i++) {
+		global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
+		global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
 
-		lm[i]->enc_id = enc_id;
-		pp[i]->enc_id = enc_id;
-
-		trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
+		trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+					 pp_idx[i] + PINGPONG_0);
 	}
 
-	return rc;
+	return 0;
 }
 
 static int _dpu_rm_reserve_ctls(
 		struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
 		uint32_t enc_id,
 		const struct msm_display_topology *top)
 {
-	struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
-	struct dpu_rm_hw_iter iter;
-	int i = 0, num_ctls = 0;
-	bool needs_split_display = false;
-
-	memset(&ctls, 0, sizeof(ctls));
+	int ctl_idx[MAX_BLOCKS];
+	int i = 0, j, num_ctls;
+	bool needs_split_display;
 
 	/* each hw_intf needs its own hw_ctrl to program its control path */
 	num_ctls = top->num_intf;
 
 	needs_split_display = _dpu_rm_needs_split_display(top);
 
-	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
-	while (_dpu_rm_get_hw_locked(rm, &iter)) {
-		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
-		unsigned long features = ctl->caps->features;
+	for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
+		const struct dpu_hw_ctl *ctl;
+		unsigned long features;
 		bool has_split_display;
 
-		if (RESERVED_BY_OTHER(iter.blk, enc_id))
+		if (!rm->ctl_blks[j])
+			continue;
+		if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
 			continue;
 
+		ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
+		features = ctl->caps->features;
 		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
 
-		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+		DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features);
 
 		if (needs_split_display != has_split_display)
 			continue;
 
-		ctls[i] = iter.blk;
-		DPU_DEBUG("ctl %d match\n", iter.blk->id);
+		ctl_idx[i] = j;
+		DPU_DEBUG("ctl %d match\n", j + CTL_0);
 
 		if (++i == num_ctls)
 			break;
+
 	}
 
 	if (i != num_ctls)
 		return -ENAVAIL;
 
-	for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
-		ctls[i]->enc_id = enc_id;
-		trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
+	for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
+		global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
+		trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
 	}
 
 	return 0;
@@ -479,40 +379,34 @@ static int _dpu_rm_reserve_ctls(
 
 static int _dpu_rm_reserve_intf(
 		struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
 		uint32_t enc_id,
-		uint32_t id,
-		enum dpu_hw_blk_type type)
+		uint32_t id)
 {
-	struct dpu_rm_hw_iter iter;
-	int ret = 0;
+	int idx = id - INTF_0;
 
-	/* Find the block entry in the rm, and note the reservation */
-	dpu_rm_init_hw_iter(&iter, 0, type);
-	while (_dpu_rm_get_hw_locked(rm, &iter)) {
-		if (iter.blk->id != id)
-			continue;
-
-		if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
-			DPU_ERROR("type %d id %d already reserved\n", type, id);
-			return -ENAVAIL;
-		}
-
-		iter.blk->enc_id = enc_id;
-		trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
-		break;
-	}
-
-	/* Shouldn't happen since intfs are fixed at probe */
-	if (!iter.hw) {
-		DPU_ERROR("couldn't find type %d id %d\n", type, id);
+	if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) {
+		DPU_ERROR("invalid intf id: %d", id);
 		return -EINVAL;
 	}
 
-	return ret;
+	if (!rm->intf_blks[idx]) {
+		DPU_ERROR("couldn't find intf id %d\n", id);
+		return -EINVAL;
+	}
+
+	if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) {
+		DPU_ERROR("intf id %d already reserved\n", id);
+		return -ENAVAIL;
+	}
+
+	global_state->intf_to_enc_id[idx] = enc_id;
+	return 0;
 }
 
 static int _dpu_rm_reserve_intf_related_hw(
 		struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
 		uint32_t enc_id,
 		struct dpu_encoder_hw_resources *hw_res)
 {
@@ -523,8 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw(
 		if (hw_res->intfs[i] == INTF_MODE_NONE)
 			continue;
 		id = i + INTF_0;
-		ret = _dpu_rm_reserve_intf(rm, enc_id, id,
-				DPU_HW_BLK_INTF);
+		ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id);
 		if (ret)
 			return ret;
 	}
@@ -534,25 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw(
 
 static int _dpu_rm_make_reservation(
 		struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
 		struct drm_encoder *enc,
-		struct drm_crtc_state *crtc_state,
 		struct dpu_rm_requirements *reqs)
 {
 	int ret;
 
-	ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
+	ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
 	if (ret) {
 		DPU_ERROR("unable to find appropriate mixers\n");
 		return ret;
 	}
 
-	ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
+	ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+				&reqs->topology);
 	if (ret) {
 		DPU_ERROR("unable to find appropriate CTL\n");
 		return ret;
 	}
 
-	ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
+	ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id,
+				&reqs->hw_res);
 	if (ret)
 		return ret;
 
@@ -560,9 +455,7 @@ static int _dpu_rm_make_reservation(
 }
 
 static int _dpu_rm_populate_requirements(
-		struct dpu_rm *rm,
 		struct drm_encoder *enc,
-		struct drm_crtc_state *crtc_state,
 		struct dpu_rm_requirements *reqs,
 		struct msm_display_topology req_topology)
 {
@@ -577,37 +470,36 @@ static int _dpu_rm_populate_requirements(
 	return 0;
 }
 
-static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
+static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
+				  uint32_t enc_id)
 {
-	struct dpu_rm_hw_blk *blk;
-	enum dpu_hw_blk_type type;
+	int i;
 
-	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
-		list_for_each_entry(blk, &rm->hw_blks[type], list) {
-			if (blk->enc_id == enc_id) {
-				blk->enc_id = 0;
-				DPU_DEBUG("rel enc %d %d %d\n", enc_id,
-					  type, blk->id);
-			}
-		}
+	for (i = 0; i < cnt; i++) {
+		if (res_mapping[i] == enc_id)
+			res_mapping[i] = 0;
 	}
 }
 
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+void dpu_rm_release(struct dpu_global_state *global_state,
+		    struct drm_encoder *enc)
 {
-	mutex_lock(&rm->rm_lock);
-
-	_dpu_rm_release_reservation(rm, enc->base.id);
-
-	mutex_unlock(&rm->rm_lock);
+	_dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
+		ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
+	_dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
+		ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
+	_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
+		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+	_dpu_rm_clear_mapping(global_state->intf_to_enc_id,
+		ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id);
 }
 
 int dpu_rm_reserve(
 		struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
 		struct drm_encoder *enc,
 		struct drm_crtc_state *crtc_state,
-		struct msm_display_topology topology,
-		bool test_only)
+		struct msm_display_topology topology)
 {
 	struct dpu_rm_requirements reqs;
 	int ret;
@@ -616,31 +508,75 @@ int dpu_rm_reserve(
 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
 		return 0;
 
-	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
-		      enc->base.id, crtc_state->crtc->base.id, test_only);
+	if (IS_ERR(global_state)) {
+		DPU_ERROR("failed to global state\n");
+		return PTR_ERR(global_state);
+	}
 
-	mutex_lock(&rm->rm_lock);
+	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
+		      enc->base.id, crtc_state->crtc->base.id);
 
-	ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
-					    topology);
+	ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
 	if (ret) {
 		DPU_ERROR("failed to populate hw requirements\n");
-		goto end;
+		return ret;
 	}
 
-	ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
-	if (ret) {
+	ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+	if (ret)
 		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
-		_dpu_rm_release_reservation(rm, enc->base.id);
-	} else if (test_only) {
-		 /* test_only: test the reservation and then undo */
-		DPU_DEBUG("test_only: discard test [enc: %d]\n",
-				enc->base.id);
-		_dpu_rm_release_reservation(rm, enc->base.id);
-	}
 
-end:
-	mutex_unlock(&rm->rm_lock);
+
 
 	return ret;
 }
+
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+	struct dpu_global_state *global_state, uint32_t enc_id,
+	enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
+{
+	struct dpu_hw_blk **hw_blks;
+	uint32_t *hw_to_enc_id;
+	int i, num_blks, max_blks;
+
+	switch (type) {
+	case DPU_HW_BLK_PINGPONG:
+		hw_blks = rm->pingpong_blks;
+		hw_to_enc_id = global_state->pingpong_to_enc_id;
+		max_blks = ARRAY_SIZE(rm->pingpong_blks);
+		break;
+	case DPU_HW_BLK_LM:
+		hw_blks = rm->mixer_blks;
+		hw_to_enc_id = global_state->mixer_to_enc_id;
+		max_blks = ARRAY_SIZE(rm->mixer_blks);
+		break;
+	case DPU_HW_BLK_CTL:
+		hw_blks = rm->ctl_blks;
+		hw_to_enc_id = global_state->ctl_to_enc_id;
+		max_blks = ARRAY_SIZE(rm->ctl_blks);
+		break;
+	case DPU_HW_BLK_INTF:
+		hw_blks = rm->intf_blks;
+		hw_to_enc_id = global_state->intf_to_enc_id;
+		max_blks = ARRAY_SIZE(rm->intf_blks);
+		break;
+	default:
+		DPU_ERROR("blk type %d not managed by rm\n", type);
+		return 0;
+	}
+
+	num_blks = 0;
+	for (i = 0; i < max_blks; i++) {
+		if (hw_to_enc_id[i] != enc_id)
+			continue;
+
+		if (num_blks == blks_size) {
+			DPU_ERROR("More than %d resources assigned to enc %d\n",
+				  blks_size, enc_id);
+			break;
+		}
+		blks[num_blks++] = hw_blks[i];
+	}
+
+	return num_blks;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 9c580a0..6d2b04f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -11,37 +11,24 @@
 #include "msm_kms.h"
 #include "dpu_hw_top.h"
 
+struct dpu_global_state;
+
 /**
  * struct dpu_rm - DPU dynamic hardware resource manager
- * @hw_blks: array of lists of hardware resources present in the system, one
- *	list per type of hardware block
+ * @pingpong_blks: array of pingpong hardware resources
+ * @mixer_blks: array of layer mixer hardware resources
+ * @ctl_blks: array of ctl hardware resources
+ * @intf_blks: array of intf hardware resources
  * @lm_max_width: cached layer mixer maximum width
  * @rm_lock: resource manager mutex
  */
 struct dpu_rm {
-	struct list_head hw_blks[DPU_HW_BLK_MAX];
+	struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
+	struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
+	struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
+	struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
+
 	uint32_t lm_max_width;
-	struct mutex rm_lock;
-};
-
-/**
- *  struct dpu_rm_hw_blk - resource manager internal structure
- *	forward declaration for single iterator definition without void pointer
- */
-struct dpu_rm_hw_blk;
-
-/**
- * struct dpu_rm_hw_iter - iterator for use with dpu_rm
- * @hw: dpu_hw object requested, or NULL on failure
- * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
- */
-struct dpu_rm_hw_iter {
-	void *hw;
-	struct dpu_rm_hw_blk *blk;
-	uint32_t enc_id;
-	enum dpu_hw_blk_type type;
 };
 
 /**
@@ -74,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm);
  * @drm_enc: DRM Encoder handle
  * @crtc_state: Proposed Atomic DRM CRTC State handle
  * @topology: Pointer to topology info for the display
- * @test_only: Atomic-Test phase, discard results (unless property overrides)
  * @Return: 0 on Success otherwise -ERROR
  */
 int dpu_rm_reserve(struct dpu_rm *rm,
+		struct dpu_global_state *global_state,
 		struct drm_encoder *drm_enc,
 		struct drm_crtc_state *crtc_state,
-		struct msm_display_topology topology,
-		bool test_only);
+		struct msm_display_topology topology);
 
 /**
  * dpu_rm_reserve - Given the encoder for the display chain, release any
@@ -90,31 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm,
  * @enc: DRM Encoder handle
  * @Return: 0 on Success otherwise -ERROR
  */
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+void dpu_rm_release(struct dpu_global_state *global_state,
+		struct drm_encoder *enc);
 
 /**
- * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
- *	using dpu_rm_get_hw
- * @iter: iter object to initialize
- * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
- * @type: Hardware Block Type client wishes to search for.
+ * Get hw resources of the given type that are assigned to this encoder.
  */
-void dpu_rm_init_hw_iter(
-		struct dpu_rm_hw_iter *iter,
-		uint32_t enc_id,
-		enum dpu_hw_blk_type type);
-/**
- * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
- *	Meant to do a single pass through the hardware list to iteratively
- *	retrieve hardware blocks of a given type for a given encoder.
- *	Initialize an iterator object.
- *	Set hw block type of interest. Set encoder id of interest, 0 for any.
- *	Function returns first hw of type for that encoder.
- *	Subsequent calls will return the next reserved hw of that type in-order.
- *	Iterator HW pointer will be null on failure to find hw.
- * @rm: DPU Resource Manager handle
- * @iter: iterator object
- * @Return: true on match found, false on no match found
- */
-bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+	struct dpu_global_state *global_state, uint32_t enc_id,
+	enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
 #endif /* __DPU_RM_H__ */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 93ab36b..5e8c3f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
 	int rc;
 
 	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
-		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
 		return -EINVAL;
 	}
 
@@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
 	u32 val;
 
 	if (!vbif || !vbif->cap) {
-		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
 		return -EINVAL;
 	}
 
@@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
 
 	if (!vbif || !mdp) {
 		DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
-				vbif != 0, mdp != 0);
+				vbif != NULL, mdp != NULL);
 		return;
 	}
 
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index f34dca5..c9239b0 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -481,6 +481,8 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
 	.reset = drm_atomic_helper_crtc_reset,
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.enable_vblank  = msm_crtc_enable_vblank,
+	.disable_vblank = msm_crtc_disable_vblank,
 };
 
 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e1cc541..998bef1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -405,6 +405,83 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
 }
 
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	drm_for_each_encoder(encoder, dev)
+		if (encoder->crtc == crtc)
+			return encoder;
+
+	return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+					   bool in_vblank_irq,
+					   int *vpos, int *hpos,
+					   ktime_t *stime, ktime_t *etime,
+					   const struct drm_display_mode *mode)
+{
+	unsigned int pipe = crtc->index;
+	struct drm_encoder *encoder;
+	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+	encoder = get_encoder_from_crtc(crtc);
+	if (!encoder) {
+		DRM_ERROR("no encoder found for crtc %d\n", pipe);
+		return false;
+	}
+
+	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+	/*
+	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+	 * the end of VFP. Translate the porch values relative to the line
+	 * counter positions.
+	 */
+
+	vactive_start = vsw + vbp + 1;
+
+	vactive_end = vactive_start + mode->crtc_vdisplay;
+
+	/* last scan line before VSYNC */
+	vfp_end = mode->crtc_vtotal;
+
+	if (stime)
+		*stime = ktime_get();
+
+	line = mdp5_encoder_get_linecount(encoder);
+
+	if (line < vactive_start)
+		line -= vactive_start;
+	else if (line > vactive_end)
+		line = line - vfp_end - vactive_start;
+	else
+		line -= vactive_start;
+
+	*vpos = line;
+	*hpos = 0;
+
+	if (etime)
+		*etime = ktime_get();
+
+	return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+
+	encoder = get_encoder_from_crtc(crtc);
+	if (!encoder)
+		return 0;
+
+	return mdp5_encoder_get_framecount(encoder);
+}
+
 static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
 				     struct drm_crtc_state *old_state)
 {
@@ -1054,6 +1131,10 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
 	.cursor_set = mdp5_crtc_cursor_set,
 	.cursor_move = mdp5_crtc_cursor_move,
 	.atomic_print_state = mdp5_crtc_atomic_print_state,
+	.get_vblank_counter = mdp5_crtc_get_vblank_counter,
+	.enable_vblank  = msm_crtc_enable_vblank,
+	.disable_vblank = msm_crtc_disable_vblank,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -1063,6 +1144,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
 	.atomic_flush = mdp5_crtc_atomic_flush,
 	.atomic_enable = mdp5_crtc_atomic_enable,
 	.atomic_disable = mdp5_crtc_atomic_disable,
+	.get_scanout_position = mdp5_crtc_get_scanout_position,
 };
 
 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index e43ecd4..6650f47 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -583,98 +583,6 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
 	return 0;
 }
 
-static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_encoder *encoder;
-
-	drm_for_each_encoder(encoder, dev)
-		if (encoder->crtc == crtc)
-			return encoder;
-
-	return NULL;
-}
-
-static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
-				bool in_vblank_irq, int *vpos, int *hpos,
-				ktime_t *stime, ktime_t *etime,
-				const struct drm_display_mode *mode)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	struct drm_encoder *encoder;
-	int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
-
-	crtc = priv->crtcs[pipe];
-	if (!crtc) {
-		DRM_ERROR("Invalid crtc %d\n", pipe);
-		return false;
-	}
-
-	encoder = get_encoder_from_crtc(crtc);
-	if (!encoder) {
-		DRM_ERROR("no encoder found for crtc %d\n", pipe);
-		return false;
-	}
-
-	vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
-	vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
-
-	/*
-	 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
-	 * the end of VFP. Translate the porch values relative to the line
-	 * counter positions.
-	 */
-
-	vactive_start = vsw + vbp + 1;
-
-	vactive_end = vactive_start + mode->crtc_vdisplay;
-
-	/* last scan line before VSYNC */
-	vfp_end = mode->crtc_vtotal;
-
-	if (stime)
-		*stime = ktime_get();
-
-	line = mdp5_encoder_get_linecount(encoder);
-
-	if (line < vactive_start) {
-		line -= vactive_start;
-	} else if (line > vactive_end) {
-		line = line - vfp_end - vactive_start;
-	} else {
-		line -= vactive_start;
-	}
-
-	*vpos = line;
-	*hpos = 0;
-
-	if (etime)
-		*etime = ktime_get();
-
-	return true;
-}
-
-static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	struct drm_encoder *encoder;
-
-	if (pipe >= priv->num_crtcs)
-		return 0;
-
-	crtc = priv->crtcs[pipe];
-	if (!crtc)
-		return 0;
-
-	encoder = get_encoder_from_crtc(crtc);
-	if (!encoder)
-		return 0;
-
-	return mdp5_encoder_get_framecount(encoder);
-}
-
 struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -762,9 +670,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 	dev->mode_config.max_width = 0xffff;
 	dev->mode_config.max_height = 0xffff;
 
-	dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
-	dev->driver->get_scanout_position = mdp5_get_scanoutpos;
-	dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
 	dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
 	dev->vblank_disable_immediate = true;
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 4864b95..4b363bd 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -689,7 +689,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
 	bridge = &dsi_bridge->base;
 	bridge->funcs = &dsi_mgr_bridge_funcs;
 
-	ret = drm_bridge_attach(encoder, bridge, NULL);
+	ret = drm_bridge_attach(encoder, bridge, NULL, 0);
 	if (ret)
 		goto fail;
 
@@ -718,7 +718,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
 	encoder = msm_dsi->encoder;
 
 	/* link the internal dsi bridge to the external bridge */
-	drm_bridge_attach(encoder, ext_bridge, int_bridge);
+	drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
 
 	/*
 	 * we need the drm_connector created by the external bridge
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index ad4e963..106a674 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
 		goto fail;
 	}
 
-	ret = drm_bridge_attach(encoder, edp->bridge, NULL);
-	if (ret)
-		goto fail;
-
 	priv->bridges[priv->num_bridges++]       = edp->bridge;
 	priv->connectors[priv->num_connectors++] = edp->connector;
 
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index b65b5cc..c69a37e 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -97,7 +97,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
 	bridge = &edp_bridge->base;
 	bridge->funcs = &edp_bridge_funcs;
 
-	ret = drm_bridge_attach(edp->encoder, bridge, NULL);
+	ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1a9b628..737453b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
 		goto fail;
 	}
 
-	ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
-	if (ret)
-		goto fail;
-
 	priv->bridges[priv->num_bridges++]       = hdmi->bridge;
 	priv->connectors[priv->num_connectors++] = hdmi->connector;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index ba81338..6e380db 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -287,7 +287,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
 	bridge = &hdmi_bridge->base;
 	bridge->funcs = &msm_hdmi_bridge_funcs;
 
-	ret = drm_bridge_attach(hdmi->encoder, bridge, NULL);
+	ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e4b750b..29295de 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	if (!dev->dma_parms) {
 		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
 					      GFP_KERNEL);
-		if (!dev->dma_parms)
-			return -ENOMEM;
+		if (!dev->dma_parms) {
+			ret = -ENOMEM;
+			goto err_msm_uninit;
+		}
 	}
 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
 
@@ -668,8 +670,10 @@ static void msm_irq_uninstall(struct drm_device *dev)
 	kms->funcs->irq_uninstall(kms);
 }
 
-static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	if (!kms)
@@ -678,8 +682,10 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	return vblank_ctrl_queue_work(priv, pipe, true);
 }
 
-static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	if (!kms)
@@ -1004,8 +1010,6 @@ static struct drm_driver msm_driver = {
 	.irq_preinstall     = msm_irq_preinstall,
 	.irq_postinstall    = msm_irq_postinstall,
 	.irq_uninstall      = msm_irq_uninstall,
-	.enable_vblank      = msm_enable_vblank,
-	.disable_vblank     = msm_disable_vblank,
 	.gem_free_object_unlocked = msm_gem_free_object,
 	.gem_vm_ops         = &vm_ops,
 	.dumb_create        = msm_gem_dumb_create,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 740bf7c..194d900 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -232,6 +232,9 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
+int msm_crtc_enable_vblank(struct drm_crtc *crtc);
+void msm_crtc_disable_vblank(struct drm_crtc *crtc);
+
 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, int npages);
 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index db48867..47235f8 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -160,16 +160,12 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+	ret = drm_fb_helper_init(dev, helper);
 	if (ret) {
 		DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
 		goto fail;
 	}
 
-	ret = drm_fb_helper_single_add_all_connectors(helper);
-	if (ret)
-		goto fini;
-
 	/* the fw fb could be anywhere in memory */
 	drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9e0953c..30584ea 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -157,7 +157,17 @@ struct msm_gem_submit {
 			uint32_t handle;
 		};
 		uint64_t iova;
-	} bos[0];
+	} bos[];
 };
 
+/* helper to determine of a buffer in submit should be dumped, used for both
+ * devcoredump and debugfs cmdstream dumping:
+ */
+static inline bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+	extern bool rd_full;
+	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
 #endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 18f3a5c..615c5cd 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -355,16 +355,34 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 	state->cmd = kstrdup(cmd, GFP_KERNEL);
 
 	if (submit) {
-		int i;
+		int i, nr = 0;
 
-		state->bos = kcalloc(submit->nr_cmds,
+		/* count # of buffers to dump: */
+		for (i = 0; i < submit->nr_bos; i++)
+			if (should_dump(submit, i))
+				nr++;
+		/* always dump cmd bo's, but don't double count them: */
+		for (i = 0; i < submit->nr_cmds; i++)
+			if (!should_dump(submit, submit->cmd[i].idx))
+				nr++;
+
+		state->bos = kcalloc(nr,
 			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 
+		for (i = 0; i < submit->nr_bos; i++) {
+			if (should_dump(submit, i)) {
+				msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+					submit->bos[i].iova, submit->bos[i].flags);
+			}
+		}
+
 		for (i = 0; state->bos && i < submit->nr_cmds; i++) {
 			int idx = submit->cmd[i].idx;
 
-			msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
-				submit->bos[idx].iova, submit->bos[idx].flags);
+			if (!should_dump(submit, submit->cmd[i].idx)) {
+				msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+					submit->bos[idx].iova, submit->bos[idx].flags);
+			}
 		}
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index af7ceb2..732f65d 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -43,7 +43,7 @@
 #include "msm_gpu.h"
 #include "msm_gem.h"
 
-static bool rd_full = false;
+bool rd_full = false;
 MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
 module_param_named(rd_full, rd_full, bool, 0600);
 
@@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
 	msm_gem_put_vaddr(&obj->base);
 }
 
-static bool
-should_dump(struct msm_gem_submit *submit, int idx)
-{
-	return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
-}
-
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 		const char *fmt, ...)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 37c50ea..1f08de4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1248,6 +1248,9 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
 	.set_config = drm_crtc_helper_set_config,
 	.page_flip = nv04_crtc_page_flip,
 	.destroy = nv_crtc_destroy,
+	.enable_vblank = nouveau_display_vblank_enable,
+	.disable_vblank = nouveau_display_vblank_disable,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
@@ -1258,6 +1261,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
 	.mode_set_base = nv04_crtc_mode_set_base,
 	.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
 	.disable = nv_crtc_disable,
+	.get_scanout_position = nouveau_display_scanoutpos,
 };
 
 static const uint32_t modeset_formats[] = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a3dc2ba..4d1c584 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1256,30 +1256,6 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
 	}
 }
 
-static void
-nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
-			    struct drm_connector *connector)
-{
-	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nv50_mstc *mstc = nv50_mstc(connector);
-
-	drm_connector_unregister(&mstc->connector);
-
-	drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
-
-	drm_connector_put(&mstc->connector);
-}
-
-static void
-nv50_mstm_register_connector(struct drm_connector *connector)
-{
-	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-
-	drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
-
-	drm_connector_register(connector);
-}
-
 static struct drm_connector *
 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
 			struct drm_dp_mst_port *port, const char *path)
@@ -1298,8 +1274,6 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
 static const struct drm_dp_mst_topology_cbs
 nv50_mstm = {
 	.add_connector = nv50_mstm_add_connector,
-	.register_connector = nv50_mstm_register_connector,
-	.destroy_connector = nv50_mstm_destroy_connector,
 };
 
 void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d9d6460..8f64556 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -29,6 +29,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_vblank.h>
 #include "nouveau_connector.h"
 void
 nv50_head_flush_clr(struct nv50_head *head,
@@ -413,6 +414,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
 static const struct drm_crtc_helper_funcs
 nv50_head_help = {
 	.atomic_check = nv50_head_atomic_check,
+	.get_scanout_position = nouveau_display_scanoutpos,
 };
 
 static void
@@ -481,6 +483,9 @@ nv50_head_func = {
 	.page_flip = drm_atomic_helper_page_flip,
 	.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
 	.atomic_destroy_state = nv50_head_atomic_destroy_state,
+	.enable_vblank = nouveau_display_vblank_enable,
+	.disable_vblank = nouveau_display_vblank_disable,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 struct nv50_head *
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1b62ccc..2b4b21b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -647,13 +647,6 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
 }
 
 static int
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-	/* We'll do this from user space. */
-	return 0;
-}
-
-static int
 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 			 struct ttm_mem_type_manager *man)
 {
@@ -1697,7 +1690,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
 	.ttm_tt_create = &nouveau_ttm_tt_create,
 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
-	.invalidate_caches = nouveau_bo_invalidate_caches,
 	.init_mem_type = nouveau_bo_init_mem_type,
 	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 53f9bce..700817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -54,15 +54,10 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
 }
 
 int
-nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_enable(struct drm_crtc *crtc)
 {
-	struct drm_crtc *crtc;
 	struct nouveau_crtc *nv_crtc;
 
-	crtc = drm_crtc_from_index(dev, pipe);
-	if (!crtc)
-		return -EINVAL;
-
 	nv_crtc = nouveau_crtc(crtc);
 	nvif_notify_get(&nv_crtc->vblank);
 
@@ -70,15 +65,10 @@ nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
 }
 
 void
-nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_disable(struct drm_crtc *crtc)
 {
-	struct drm_crtc *crtc;
 	struct nouveau_crtc *nv_crtc;
 
-	crtc = drm_crtc_from_index(dev, pipe);
-	if (!crtc)
-		return;
-
 	nv_crtc = nouveau_crtc(crtc);
 	nvif_notify_put(&nv_crtc->vblank);
 }
@@ -136,21 +126,13 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
 }
 
 bool
-nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
+nouveau_display_scanoutpos(struct drm_crtc *crtc,
 			   bool in_vblank_irq, int *vpos, int *hpos,
 			   ktime_t *stime, ktime_t *etime,
 			   const struct drm_display_mode *mode)
 {
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (nouveau_crtc(crtc)->index == pipe) {
-			return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
-							       stime, etime);
-		}
-	}
-
-	return false;
+	return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+					       stime, etime);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e8e668..de00401 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -61,11 +61,12 @@ int  nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
 void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
 int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
 void nouveau_display_resume(struct drm_device *dev, bool runtime);
-int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
-void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
-bool  nouveau_display_scanoutpos(struct drm_device *, unsigned int,
-				 bool, int *, int *, ktime_t *,
-				 ktime_t *, const struct drm_display_mode *);
+int  nouveau_display_vblank_enable(struct drm_crtc *crtc);
+void nouveau_display_vblank_disable(struct drm_crtc *crtc);
+bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
+				bool in_vblank_irq, int *vpos, int *hpos,
+				ktime_t *stime, ktime_t *etime,
+				const struct drm_display_mode *mode);
 
 int  nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
 				 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index b65ae81..6b1629c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1120,11 +1120,6 @@ driver_stub = {
 	.debugfs_init = nouveau_drm_debugfs_init,
 #endif
 
-	.enable_vblank = nouveau_display_vblank_enable,
-	.disable_vblank = nouveau_display_vblank_disable,
-	.get_scanout_position = nouveau_display_scanoutpos,
-	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
 	.ioctls = nouveau_ioctls,
 	.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
 	.fops = &nouveau_driver_fops,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0c5cdda..24d543a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -558,14 +558,10 @@ nouveau_fbcon_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
+	ret = drm_fb_helper_init(dev, &fbcon->helper);
 	if (ret)
 		goto free;
 
-	ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
-	if (ret)
-		goto fini;
-
 	if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
 		if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
 			preferred_bpp = 8;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index d865d8a..c85dd8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
 	 * locking inversion with the driver load path. And the access here is
 	 * completely racy anyway. So don't bother with locking for now.
 	 */
-	return dev->open_count == 0;
+	return atomic_read(&dev->open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index b562a8c..f2be594 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,28 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menu "OMAPDRM External Display Device Drivers"
 
-config DRM_OMAP_ENCODER_OPA362
-	tristate "OPA362 external analog amplifier"
-	help
-	  Driver for OPA362 external analog TV amplifier controlled
-	  through a GPIO.
-
-config DRM_OMAP_ENCODER_TPD12S015
-	tristate "TPD12S015 HDMI ESD protection and level shifter"
-	help
-	  Driver for TPD12S015, which offers HDMI ESD protection and level
-	  shifting.
-
-config DRM_OMAP_CONNECTOR_HDMI
-	tristate "HDMI Connector"
-	help
-	  Driver for a generic HDMI connector.
-
-config DRM_OMAP_CONNECTOR_ANALOG_TV
-	tristate "Analog TV Connector"
-	help
-	  Driver for a generic analog TV connector.
-
 config DRM_OMAP_PANEL_DSI_CM
 	tristate "Generic DSI Command Mode Panel"
 	depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index cb76859..488ddf1 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,6 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
 obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
deleted file mode 100644
index 0d20fab..0000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Analog TV Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
-	struct omap_dss_device dssdev;
-
-	struct device *dev;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tvc_connect(struct omap_dss_device *src,
-		       struct omap_dss_device *dst)
-{
-	return 0;
-}
-
-static void tvc_disconnect(struct omap_dss_device *src,
-			   struct omap_dss_device *dst)
-{
-}
-
-static const struct omap_dss_device_ops tvc_ops = {
-	.connect		= tvc_connect,
-	.disconnect		= tvc_disconnect,
-};
-
-static int tvc_probe(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata;
-	struct omap_dss_device *dssdev;
-
-	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
-	if (!ddata)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, ddata);
-	ddata->dev = &pdev->dev;
-
-	dssdev = &ddata->dssdev;
-	dssdev->ops = &tvc_ops;
-	dssdev->dev = &pdev->dev;
-	dssdev->type = OMAP_DISPLAY_TYPE_VENC;
-	dssdev->display = true;
-	dssdev->owner = THIS_MODULE;
-	dssdev->of_ports = BIT(0);
-
-	omapdss_display_init(dssdev);
-	omapdss_device_register(dssdev);
-
-	return 0;
-}
-
-static int __exit tvc_remove(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
-	omapdss_device_unregister(&ddata->dssdev);
-
-	return 0;
-}
-
-static const struct of_device_id tvc_of_match[] = {
-	{ .compatible = "omapdss,svideo-connector", },
-	{ .compatible = "omapdss,composite-video-connector", },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, tvc_of_match);
-
-static struct platform_driver tvc_connector_driver = {
-	.probe	= tvc_probe,
-	.remove	= __exit_p(tvc_remove),
-	.driver	= {
-		.name	= "connector-analog-tv",
-		.of_match_table = tvc_of_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-module_platform_driver(tvc_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Analog TV Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
deleted file mode 100644
index f5d69d8..0000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HDMI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
-	struct omap_dss_device dssdev;
-	void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
-	void *hpd_cb_data;
-	struct mutex hpd_lock;
-
-	struct device *dev;
-
-	struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int hdmic_connect(struct omap_dss_device *src,
-			 struct omap_dss_device *dst)
-{
-	return 0;
-}
-
-static void hdmic_disconnect(struct omap_dss_device *src,
-			     struct omap_dss_device *dst)
-{
-}
-
-static bool hdmic_detect(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
-				  void (*cb)(void *cb_data,
-					    enum drm_connector_status status),
-				  void *cb_data)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	mutex_lock(&ddata->hpd_lock);
-	ddata->hpd_cb = cb;
-	ddata->hpd_cb_data = cb_data;
-	mutex_unlock(&ddata->hpd_lock);
-}
-
-static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	mutex_lock(&ddata->hpd_lock);
-	ddata->hpd_cb = NULL;
-	ddata->hpd_cb_data = NULL;
-	mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops hdmic_ops = {
-	.connect		= hdmic_connect,
-	.disconnect		= hdmic_disconnect,
-
-	.detect			= hdmic_detect,
-	.register_hpd_cb	= hdmic_register_hpd_cb,
-	.unregister_hpd_cb	= hdmic_unregister_hpd_cb,
-};
-
-static irqreturn_t hdmic_hpd_isr(int irq, void *data)
-{
-	struct panel_drv_data *ddata = data;
-
-	mutex_lock(&ddata->hpd_lock);
-	if (ddata->hpd_cb) {
-		enum drm_connector_status status;
-
-		if (hdmic_detect(&ddata->dssdev))
-			status = connector_status_connected;
-		else
-			status = connector_status_disconnected;
-
-		ddata->hpd_cb(ddata->hpd_cb_data, status);
-	}
-	mutex_unlock(&ddata->hpd_lock);
-
-	return IRQ_HANDLED;
-}
-
-static int hdmic_probe(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata;
-	struct omap_dss_device *dssdev;
-	struct gpio_desc *gpio;
-	int r;
-
-	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
-	if (!ddata)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, ddata);
-	ddata->dev = &pdev->dev;
-
-	mutex_init(&ddata->hpd_lock);
-
-	/* HPD GPIO */
-	gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
-	if (IS_ERR(gpio)) {
-		dev_err(&pdev->dev, "failed to parse HPD gpio\n");
-		return PTR_ERR(gpio);
-	}
-
-	ddata->hpd_gpio = gpio;
-
-	if (ddata->hpd_gpio) {
-		r = devm_request_threaded_irq(&pdev->dev,
-				gpiod_to_irq(ddata->hpd_gpio),
-				NULL, hdmic_hpd_isr,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
-				IRQF_ONESHOT,
-				"hdmic hpd", ddata);
-		if (r)
-			return r;
-	}
-
-	dssdev = &ddata->dssdev;
-	dssdev->ops = &hdmic_ops;
-	dssdev->dev = &pdev->dev;
-	dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
-	dssdev->display = true;
-	dssdev->owner = THIS_MODULE;
-	dssdev->of_ports = BIT(0);
-	dssdev->ops_flags = ddata->hpd_gpio
-			  ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
-			  : 0;
-
-	omapdss_display_init(dssdev);
-	omapdss_device_register(dssdev);
-
-	return 0;
-}
-
-static int __exit hdmic_remove(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
-	omapdss_device_unregister(&ddata->dssdev);
-
-	return 0;
-}
-
-static const struct of_device_id hdmic_of_match[] = {
-	{ .compatible = "omapdss,hdmi-connector", },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, hdmic_of_match);
-
-static struct platform_driver hdmi_connector_driver = {
-	.probe	= hdmic_probe,
-	.remove	= __exit_p(hdmic_remove),
-	.driver	= {
-		.name	= "connector-hdmi",
-		.of_match_table = hdmic_of_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-module_platform_driver(hdmi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("HDMI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
deleted file mode 100644
index b992387e..0000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OPA362 analog video amplifier with output/power control
- *
- * Copyright (C) 2014 Golden Delicious Computers
- * Author: H. Nikolaus Schaller <hns@goldelico.com>
- *
- * based on encoder-tfp410
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
-	struct omap_dss_device dssdev;
-
-	struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int opa362_connect(struct omap_dss_device *src,
-			  struct omap_dss_device *dst)
-{
-	return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void opa362_disconnect(struct omap_dss_device *src,
-			      struct omap_dss_device *dst)
-{
-	omapdss_device_disconnect(dst, dst->next);
-}
-
-static void opa362_enable(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	if (ddata->enable_gpio)
-		gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void opa362_disable(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	if (ddata->enable_gpio)
-		gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static const struct omap_dss_device_ops opa362_ops = {
-	.connect	= opa362_connect,
-	.disconnect	= opa362_disconnect,
-	.enable		= opa362_enable,
-	.disable	= opa362_disable,
-};
-
-static int opa362_probe(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata;
-	struct omap_dss_device *dssdev;
-	struct gpio_desc *gpio;
-
-	dev_dbg(&pdev->dev, "probe\n");
-
-	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
-	if (!ddata)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, ddata);
-
-	gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
-	if (IS_ERR(gpio))
-		return PTR_ERR(gpio);
-
-	ddata->enable_gpio = gpio;
-
-	dssdev = &ddata->dssdev;
-	dssdev->ops = &opa362_ops;
-	dssdev->dev = &pdev->dev;
-	dssdev->type = OMAP_DISPLAY_TYPE_VENC;
-	dssdev->owner = THIS_MODULE;
-	dssdev->of_ports = BIT(1) | BIT(0);
-
-	dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
-	if (IS_ERR(dssdev->next)) {
-		if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
-			dev_err(&pdev->dev, "failed to find video sink\n");
-		return PTR_ERR(dssdev->next);
-	}
-
-	omapdss_device_register(dssdev);
-
-	return 0;
-}
-
-static int __exit opa362_remove(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct omap_dss_device *dssdev = &ddata->dssdev;
-
-	if (dssdev->next)
-		omapdss_device_put(dssdev->next);
-	omapdss_device_unregister(&ddata->dssdev);
-
-	opa362_disable(dssdev);
-
-	return 0;
-}
-
-static const struct of_device_id opa362_of_match[] = {
-	{ .compatible = "omapdss,ti,opa362", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, opa362_of_match);
-
-static struct platform_driver opa362_driver = {
-	.probe	= opa362_probe,
-	.remove	= __exit_p(opa362_remove),
-	.driver	= {
-		.name	= "amplifier-opa362",
-		.of_match_table = opa362_of_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-module_platform_driver(opa362_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
-MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
deleted file mode 100644
index 089105c..0000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TPD12S015 HDMI ESD protection & level shifter chip driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/mutex.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
-	struct omap_dss_device dssdev;
-	void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
-	void *hpd_cb_data;
-	struct mutex hpd_lock;
-
-	struct gpio_desc *ct_cp_hpd_gpio;
-	struct gpio_desc *ls_oe_gpio;
-	struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tpd_connect(struct omap_dss_device *src,
-		       struct omap_dss_device *dst)
-{
-	struct panel_drv_data *ddata = to_panel_data(dst);
-	int r;
-
-	r = omapdss_device_connect(dst->dss, dst, dst->next);
-	if (r)
-		return r;
-
-	gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
-	gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
-	/* DC-DC converter needs at max 300us to get to 90% of 5V */
-	udelay(300);
-
-	return 0;
-}
-
-static void tpd_disconnect(struct omap_dss_device *src,
-			   struct omap_dss_device *dst)
-{
-	struct panel_drv_data *ddata = to_panel_data(dst);
-
-	gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
-	gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
-	omapdss_device_disconnect(dst, dst->next);
-}
-
-static bool tpd_detect(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
-				void (*cb)(void *cb_data,
-					  enum drm_connector_status status),
-				void *cb_data)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	mutex_lock(&ddata->hpd_lock);
-	ddata->hpd_cb = cb;
-	ddata->hpd_cb_data = cb_data;
-	mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
-	struct panel_drv_data *ddata = to_panel_data(dssdev);
-
-	mutex_lock(&ddata->hpd_lock);
-	ddata->hpd_cb = NULL;
-	ddata->hpd_cb_data = NULL;
-	mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops tpd_ops = {
-	.connect		= tpd_connect,
-	.disconnect		= tpd_disconnect,
-	.detect			= tpd_detect,
-	.register_hpd_cb	= tpd_register_hpd_cb,
-	.unregister_hpd_cb	= tpd_unregister_hpd_cb,
-};
-
-static irqreturn_t tpd_hpd_isr(int irq, void *data)
-{
-	struct panel_drv_data *ddata = data;
-
-	mutex_lock(&ddata->hpd_lock);
-	if (ddata->hpd_cb) {
-		enum drm_connector_status status;
-
-		if (tpd_detect(&ddata->dssdev))
-			status = connector_status_connected;
-		else
-			status = connector_status_disconnected;
-
-		ddata->hpd_cb(ddata->hpd_cb_data, status);
-	}
-	mutex_unlock(&ddata->hpd_lock);
-
-	return IRQ_HANDLED;
-}
-
-static int tpd_probe(struct platform_device *pdev)
-{
-	struct omap_dss_device *dssdev;
-	struct panel_drv_data *ddata;
-	int r;
-	struct gpio_desc *gpio;
-
-	ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
-	if (!ddata)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, ddata);
-
-	gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
-		 GPIOD_OUT_LOW);
-	if (IS_ERR(gpio))
-		return PTR_ERR(gpio);
-
-	ddata->ct_cp_hpd_gpio = gpio;
-
-	gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
-		 GPIOD_OUT_LOW);
-	if (IS_ERR(gpio))
-		return PTR_ERR(gpio);
-
-	ddata->ls_oe_gpio = gpio;
-
-	gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
-		GPIOD_IN);
-	if (IS_ERR(gpio))
-		return PTR_ERR(gpio);
-
-	ddata->hpd_gpio = gpio;
-
-	mutex_init(&ddata->hpd_lock);
-
-	r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
-		NULL, tpd_hpd_isr,
-		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-		"tpd12s015 hpd", ddata);
-	if (r)
-		return r;
-
-	dssdev = &ddata->dssdev;
-	dssdev->ops = &tpd_ops;
-	dssdev->dev = &pdev->dev;
-	dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
-	dssdev->owner = THIS_MODULE;
-	dssdev->of_ports = BIT(1) | BIT(0);
-	dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
-			  | OMAP_DSS_DEVICE_OP_HPD;
-
-	dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
-	if (IS_ERR(dssdev->next)) {
-		if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
-			dev_err(&pdev->dev, "failed to find video sink\n");
-		return PTR_ERR(dssdev->next);
-	}
-
-	omapdss_device_register(dssdev);
-
-	return 0;
-}
-
-static int __exit tpd_remove(struct platform_device *pdev)
-{
-	struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-	struct omap_dss_device *dssdev = &ddata->dssdev;
-
-	if (dssdev->next)
-		omapdss_device_put(dssdev->next);
-	omapdss_device_unregister(&ddata->dssdev);
-
-	return 0;
-}
-
-static const struct of_device_id tpd_of_match[] = {
-	{ .compatible = "omapdss,ti,tpd12s015", },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, tpd_of_match);
-
-static struct platform_driver tpd_driver = {
-	.probe	= tpd_probe,
-	.remove	= __exit_p(tpd_remove),
-	.driver	= {
-		.name	= "tpd12s015",
-		.of_match_table = tpd_of_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-module_platform_driver(tpd_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("TPD12S015 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 564e3e1..3484b5d 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -678,7 +678,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
 	if (r)
 		goto err;
 
-	ddata->enabled = 1;
+	ddata->enabled = true;
 
 	if (!ddata->intro_printed) {
 		dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
@@ -729,7 +729,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
 	if (ddata->vpnl)
 		regulator_disable(ddata->vpnl);
 
-	ddata->enabled = 0;
+	ddata->enabled = false;
 }
 
 static int dsicm_panel_reset(struct panel_drv_data *ddata)
@@ -1265,7 +1265,7 @@ static int dsicm_probe(struct platform_device *pdev)
 	dssdev->type = OMAP_DISPLAY_TYPE_DSI;
 	dssdev->display = true;
 	dssdev->owner = THIS_MODULE;
-	dssdev->of_ports = BIT(0);
+	dssdev->of_port = 0;
 	dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
 
 	dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 5950c3f..f967e69 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -2,7 +2,7 @@
 obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
 
 obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o dss-of.o output.o
+omapdss-base-y := base.o display.o output.o
 
 obj-$(CONFIG_OMAP2_DSS) += omapdss.o
 # Core DSS files
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index a1970b9..c7650a7c 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -149,8 +149,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
 			goto done;
 		}
 
-		if (dssdev->id &&
-		    (dssdev->next || dssdev->bridge || dssdev->panel))
+		if (dssdev->id && (dssdev->next || dssdev->bridge))
 			goto done;
 	}
 
@@ -185,11 +184,10 @@ int omapdss_device_connect(struct dss_device *dss,
 	if (!dst) {
 		/*
 		 * The destination is NULL when the source is connected to a
-		 * bridge or panel instead of a DSS device. Stop here, we will
-		 * attach the bridge or panel later when we will have a DRM
-		 * encoder.
+		 * bridge instead of a DSS device. Stop here, we will attach
+		 * the bridge later when we will have a DRM encoder.
 		 */
-		return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+		return src && src->bridge ? 0 : -EINVAL;
 	}
 
 	if (omapdss_device_is_connected(dst))
@@ -197,10 +195,12 @@ int omapdss_device_connect(struct dss_device *dss,
 
 	dst->dss = dss;
 
-	ret = dst->ops->connect(src, dst);
-	if (ret < 0) {
-		dst->dss = NULL;
-		return ret;
+	if (dst->ops && dst->ops->connect) {
+		ret = dst->ops->connect(src, dst);
+		if (ret < 0) {
+			dst->dss = NULL;
+			return ret;
+		}
 	}
 
 	return 0;
@@ -217,7 +217,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
 		dst ? dev_name(dst->dev) : "NULL");
 
 	if (!dst) {
-		WARN_ON(!src->bridge && !src->panel);
+		WARN_ON(!src->bridge);
 		return;
 	}
 
@@ -228,29 +228,18 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
 
 	WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
 
-	dst->ops->disconnect(src, dst);
+	if (dst->ops && dst->ops->disconnect)
+		dst->ops->disconnect(src, dst);
 	dst->dss = NULL;
 }
 EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
 
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
-{
-	if (!dssdev)
-		return;
-
-	omapdss_device_pre_enable(dssdev->next);
-
-	if (dssdev->ops->pre_enable)
-		dssdev->ops->pre_enable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
-
 void omapdss_device_enable(struct omap_dss_device *dssdev)
 {
 	if (!dssdev)
 		return;
 
-	if (dssdev->ops->enable)
+	if (dssdev->ops && dssdev->ops->enable)
 		dssdev->ops->enable(dssdev);
 
 	omapdss_device_enable(dssdev->next);
@@ -266,25 +255,11 @@ void omapdss_device_disable(struct omap_dss_device *dssdev)
 
 	omapdss_device_disable(dssdev->next);
 
-	if (dssdev->ops->disable)
+	if (dssdev->ops && dssdev->ops->disable)
 		dssdev->ops->disable(dssdev);
 }
 EXPORT_SYMBOL_GPL(omapdss_device_disable);
 
-void omapdss_device_post_disable(struct omap_dss_device *dssdev)
-{
-	if (!dssdev)
-		return;
-
-	if (dssdev->ops->post_disable)
-		dssdev->ops->post_disable(dssdev);
-
-	omapdss_device_post_disable(dssdev->next);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
-
 /* -----------------------------------------------------------------------------
  * Components Handling
  */
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8a3f61f..3b82158 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -40,15 +40,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
 }
 EXPORT_SYMBOL_GPL(omapdss_display_init);
 
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
-{
-	while (output->next)
-		output = output->next;
-
-	return omapdss_device_get(output);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get);
-
 int omapdss_display_get_modes(struct drm_connector *connector,
 			      const struct videomode *vm)
 {
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 9514743..5110acb0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -9,20 +9,22 @@
 
 #define DSS_SUBSYS_NAME "DPI"
 
-#include <linux/kernel.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/export.h>
 #include <linux/err.h>
 #include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/string.h>
-#include <linux/of.h>
-#include <linux/clk.h>
 #include <linux/sys_soc.h>
 
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
 #include "dss.h"
+#include "omapdss.h"
 
 struct dpi_data {
 	struct platform_device *pdev;
@@ -34,19 +36,19 @@ struct dpi_data {
 	enum dss_clk_source clk_src;
 	struct dss_pll *pll;
 
-	struct mutex lock;
-
 	struct dss_lcd_mgr_config mgr_config;
 	unsigned long pixelclock;
 	int data_lines;
 
 	struct omap_dss_device output;
+	struct drm_bridge bridge;
 };
 
-static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
-{
-	return container_of(dssdev, struct dpi_data, output);
-}
+#define drm_bridge_to_dpi(bridge) container_of(bridge, struct dpi_data, bridge)
+
+/* -----------------------------------------------------------------------------
+ * Clock Handling and PLL
+ */
 
 static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
 						  enum omap_channel channel)
@@ -283,9 +285,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
 
 
 
-static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
-		unsigned long pck_req, unsigned long *fck, int *lck_div,
-		int *pck_div)
+static int dpi_set_pll_clk(struct dpi_data *dpi, unsigned long pck_req)
 {
 	struct dpi_clk_calc_ctx ctx;
 	int r;
@@ -299,19 +299,15 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
 	if (r)
 		return r;
 
-	dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
+	dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+				  dpi->clk_src);
 
 	dpi->mgr_config.clock_info = ctx.dispc_cinfo;
 
-	*fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
-	*lck_div = ctx.dispc_cinfo.lck_div;
-	*pck_div = ctx.dispc_cinfo.pck_div;
-
 	return 0;
 }
 
-static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
-		unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req)
 {
 	struct dpi_clk_calc_ctx ctx;
 	int r;
@@ -327,29 +323,19 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
 
 	dpi->mgr_config.clock_info = ctx.dispc_cinfo;
 
-	*fck = ctx.fck;
-	*lck_div = ctx.dispc_cinfo.lck_div;
-	*pck_div = ctx.dispc_cinfo.pck_div;
-
 	return 0;
 }
 
 static int dpi_set_mode(struct dpi_data *dpi)
 {
-	int lck_div = 0, pck_div = 0;
-	unsigned long fck = 0;
-	int r = 0;
+	int r;
 
 	if (dpi->pll)
-		r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
-				    dpi->pixelclock, &fck, &lck_div, &pck_div);
+		r = dpi_set_pll_clk(dpi, dpi->pixelclock);
 	else
-		r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
-				&lck_div, &pck_div);
-	if (r)
-		return r;
+		r = dpi_set_dispc_clk(dpi, dpi->pixelclock);
 
-	return 0;
+	return r;
 }
 
 static void dpi_config_lcd_manager(struct dpi_data *dpi)
@@ -366,125 +352,19 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
 	dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
 }
 
-static void dpi_display_enable(struct omap_dss_device *dssdev)
+static int dpi_clock_update(struct dpi_data *dpi, unsigned long *clock)
 {
-	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-	struct omap_dss_device *out = &dpi->output;
-	int r;
-
-	mutex_lock(&dpi->lock);
-
-	if (dpi->vdds_dsi_reg) {
-		r = regulator_enable(dpi->vdds_dsi_reg);
-		if (r)
-			goto err_reg_enable;
-	}
-
-	r = dispc_runtime_get(dpi->dss->dispc);
-	if (r)
-		goto err_get_dispc;
-
-	r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
-	if (r)
-		goto err_src_sel;
-
-	if (dpi->pll) {
-		r = dss_pll_enable(dpi->pll);
-		if (r)
-			goto err_pll_init;
-	}
-
-	r = dpi_set_mode(dpi);
-	if (r)
-		goto err_set_mode;
-
-	dpi_config_lcd_manager(dpi);
-
-	mdelay(2);
-
-	r = dss_mgr_enable(&dpi->output);
-	if (r)
-		goto err_mgr_enable;
-
-	mutex_unlock(&dpi->lock);
-
-	return;
-
-err_mgr_enable:
-err_set_mode:
-	if (dpi->pll)
-		dss_pll_disable(dpi->pll);
-err_pll_init:
-err_src_sel:
-	dispc_runtime_put(dpi->dss->dispc);
-err_get_dispc:
-	if (dpi->vdds_dsi_reg)
-		regulator_disable(dpi->vdds_dsi_reg);
-err_reg_enable:
-	mutex_unlock(&dpi->lock);
-}
-
-static void dpi_display_disable(struct omap_dss_device *dssdev)
-{
-	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
-	mutex_lock(&dpi->lock);
-
-	dss_mgr_disable(&dpi->output);
-
-	if (dpi->pll) {
-		dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
-					  DSS_CLK_SRC_FCK);
-		dss_pll_disable(dpi->pll);
-	}
-
-	dispc_runtime_put(dpi->dss->dispc);
-
-	if (dpi->vdds_dsi_reg)
-		regulator_disable(dpi->vdds_dsi_reg);
-
-	mutex_unlock(&dpi->lock);
-}
-
-static void dpi_set_timings(struct omap_dss_device *dssdev,
-			    const struct drm_display_mode *mode)
-{
-	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
-	DSSDBG("dpi_set_timings\n");
-
-	mutex_lock(&dpi->lock);
-
-	dpi->pixelclock = mode->clock * 1000;
-
-	mutex_unlock(&dpi->lock);
-}
-
-static int dpi_check_timings(struct omap_dss_device *dssdev,
-			     struct drm_display_mode *mode)
-{
-	struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
 	int lck_div, pck_div;
 	unsigned long fck;
-	unsigned long pck;
 	struct dpi_clk_calc_ctx ctx;
-	bool ok;
-
-	if (mode->hdisplay % 8 != 0)
-		return -EINVAL;
-
-	if (mode->clock == 0)
-		return -EINVAL;
 
 	if (dpi->pll) {
-		ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
-		if (!ok)
+		if (!dpi_pll_clk_calc(dpi, *clock, &ctx))
 			return -EINVAL;
 
 		fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
 	} else {
-		ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
-		if (!ok)
+		if (!dpi_dss_clk_calc(dpi, *clock, &ctx))
 			return -EINVAL;
 
 		fck = ctx.fck;
@@ -493,9 +373,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
 	lck_div = ctx.dispc_cinfo.lck_div;
 	pck_div = ctx.dispc_cinfo.pck_div;
 
-	pck = fck / lck_div / pck_div;
-
-	mode->clock = pck / 1000;
+	*clock = fck / lck_div / pck_div;
 
 	return 0;
 }
@@ -536,6 +414,167 @@ static void dpi_init_pll(struct dpi_data *dpi)
 	dpi->pll = pll;
 }
 
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dpi_bridge_attach(struct drm_bridge *bridge,
+			     enum drm_bridge_attach_flags flags)
+{
+	struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	dpi_init_pll(dpi);
+
+	return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+				 bridge, flags);
+}
+
+static enum drm_mode_status
+dpi_bridge_mode_valid(struct drm_bridge *bridge,
+		       const struct drm_display_mode *mode)
+{
+	struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+	unsigned long clock = mode->clock * 1000;
+	int ret;
+
+	if (mode->hdisplay % 8 != 0)
+		return MODE_BAD_WIDTH;
+
+	if (mode->clock == 0)
+		return MODE_NOCLOCK;
+
+	ret = dpi_clock_update(dpi, &clock);
+	if (ret < 0)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static bool dpi_bridge_mode_fixup(struct drm_bridge *bridge,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+	unsigned long clock = mode->clock * 1000;
+	int ret;
+
+	ret = dpi_clock_update(dpi, &clock);
+	if (ret < 0)
+		return false;
+
+	adjusted_mode->clock = clock / 1000;
+
+	return true;
+}
+
+static void dpi_bridge_mode_set(struct drm_bridge *bridge,
+				 const struct drm_display_mode *mode,
+				 const struct drm_display_mode *adjusted_mode)
+{
+	struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+	dpi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void dpi_bridge_enable(struct drm_bridge *bridge)
+{
+	struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+	int r;
+
+	if (dpi->vdds_dsi_reg) {
+		r = regulator_enable(dpi->vdds_dsi_reg);
+		if (r)
+			return;
+	}
+
+	r = dispc_runtime_get(dpi->dss->dispc);
+	if (r)
+		goto err_get_dispc;
+
+	r = dss_dpi_select_source(dpi->dss, dpi->id, dpi->output.dispc_channel);
+	if (r)
+		goto err_src_sel;
+
+	if (dpi->pll) {
+		r = dss_pll_enable(dpi->pll);
+		if (r)
+			goto err_pll_init;
+	}
+
+	r = dpi_set_mode(dpi);
+	if (r)
+		goto err_set_mode;
+
+	dpi_config_lcd_manager(dpi);
+
+	mdelay(2);
+
+	r = dss_mgr_enable(&dpi->output);
+	if (r)
+		goto err_mgr_enable;
+
+	return;
+
+err_mgr_enable:
+err_set_mode:
+	if (dpi->pll)
+		dss_pll_disable(dpi->pll);
+err_pll_init:
+err_src_sel:
+	dispc_runtime_put(dpi->dss->dispc);
+err_get_dispc:
+	if (dpi->vdds_dsi_reg)
+		regulator_disable(dpi->vdds_dsi_reg);
+}
+
+static void dpi_bridge_disable(struct drm_bridge *bridge)
+{
+	struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+	dss_mgr_disable(&dpi->output);
+
+	if (dpi->pll) {
+		dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+					  DSS_CLK_SRC_FCK);
+		dss_pll_disable(dpi->pll);
+	}
+
+	dispc_runtime_put(dpi->dss->dispc);
+
+	if (dpi->vdds_dsi_reg)
+		regulator_disable(dpi->vdds_dsi_reg);
+}
+
+static const struct drm_bridge_funcs dpi_bridge_funcs = {
+	.attach = dpi_bridge_attach,
+	.mode_valid = dpi_bridge_mode_valid,
+	.mode_fixup = dpi_bridge_mode_fixup,
+	.mode_set = dpi_bridge_mode_set,
+	.enable = dpi_bridge_enable,
+	.disable = dpi_bridge_disable,
+};
+
+static void dpi_bridge_init(struct dpi_data *dpi)
+{
+	dpi->bridge.funcs = &dpi_bridge_funcs;
+	dpi->bridge.of_node = dpi->pdev->dev.of_node;
+	dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
+
+	drm_bridge_add(&dpi->bridge);
+}
+
+static void dpi_bridge_cleanup(struct dpi_data *dpi)
+{
+	drm_bridge_remove(&dpi->bridge);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
 /*
  * Return a hardcoded channel for the DPI output. This should work for
  * current use cases, but this can be later expanded to either resolve
@@ -572,39 +611,14 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
 	}
 }
 
-static int dpi_connect(struct omap_dss_device *src,
-		       struct omap_dss_device *dst)
-{
-	struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
-
-	dpi_init_pll(dpi);
-
-	return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void dpi_disconnect(struct omap_dss_device *src,
-			   struct omap_dss_device *dst)
-{
-	omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops dpi_ops = {
-	.connect = dpi_connect,
-	.disconnect = dpi_disconnect,
-
-	.enable = dpi_display_enable,
-	.disable = dpi_display_disable,
-
-	.check_timings = dpi_check_timings,
-	.set_timings = dpi_set_timings,
-};
-
 static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
 {
 	struct omap_dss_device *out = &dpi->output;
 	u32 port_num = 0;
 	int r;
 
+	dpi_bridge_init(dpi);
+
 	of_property_read_u32(port, "reg", &port_num);
 	dpi->id = port_num <= 2 ? port_num : 0;
 
@@ -625,13 +639,14 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
 	out->id = OMAP_DSS_OUTPUT_DPI;
 	out->type = OMAP_DISPLAY_TYPE_DPI;
 	out->dispc_channel = dpi_get_channel(dpi);
-	out->of_ports = BIT(port_num);
-	out->ops = &dpi_ops;
+	out->of_port = port_num;
 	out->owner = THIS_MODULE;
 
-	r = omapdss_device_init_output(out);
-	if (r < 0)
+	r = omapdss_device_init_output(out, &dpi->bridge);
+	if (r < 0) {
+		dpi_bridge_cleanup(dpi);
 		return r;
+	}
 
 	omapdss_device_register(out);
 
@@ -645,8 +660,14 @@ static void dpi_uninit_output_port(struct device_node *port)
 
 	omapdss_device_unregister(out);
 	omapdss_device_cleanup_output(out);
+
+	dpi_bridge_cleanup(dpi);
 }
 
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
 static const struct soc_device_attribute dpi_soc_devices[] = {
 	{ .machine = "OMAP3[456]*" },
 	{ .machine = "[AD]M37*" },
@@ -706,8 +727,6 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
 	dpi->dss = dss;
 	port->data = dpi;
 
-	mutex_init(&dpi->lock);
-
 	r = dpi_init_regulator(dpi);
 	if (r)
 		return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index da16ea0..79ddfbf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5116,12 +5116,12 @@ static int dsi_init_output(struct dsi_data *dsi)
 	out->dispc_channel = dsi_get_channel(dsi);
 	out->ops = &dsi_ops;
 	out->owner = THIS_MODULE;
-	out->of_ports = BIT(0);
+	out->of_port = 0;
 	out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
 		       | DRM_BUS_FLAG_DE_HIGH
 		       | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
 
-	r = omapdss_device_init_output(out);
+	r = omapdss_device_init_output(out, NULL);
 	if (r < 0)
 		return r;
 
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
deleted file mode 100644
index b7981f3..0000000
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- */
-
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-
-#include "omapdss.h"
-
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
-{
-	struct device_node *remote_node;
-	struct omap_dss_device *dssdev;
-
-	remote_node = of_graph_get_remote_node(node, port, 0);
-	if (!remote_node)
-		return NULL;
-
-	dssdev = omapdss_find_device_by_node(remote_node);
-	of_node_put(remote_node);
-
-	return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
-}
-EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 225ec80..b76fc2b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1151,6 +1151,31 @@ static const struct dss_features dra7xx_dss_feats = {
 	.has_lcd_clk_src	=	true,
 };
 
+static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
+{
+	struct platform_device *pdev = dss->pdev;
+	struct device_node *parent = pdev->dev.of_node;
+	struct device_node *port;
+	unsigned int i;
+
+	for (i = 0; i < num_ports; i++) {
+		port = of_graph_get_port_by_id(parent, i);
+		if (!port)
+			continue;
+
+		switch (dss->feat->ports[i]) {
+		case OMAP_DISPLAY_TYPE_DPI:
+			dpi_uninit_port(port);
+			break;
+		case OMAP_DISPLAY_TYPE_SDI:
+			sdi_uninit_port(port);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
 static int dss_init_ports(struct dss_device *dss)
 {
 	struct platform_device *pdev = dss->pdev;
@@ -1168,13 +1193,13 @@ static int dss_init_ports(struct dss_device *dss)
 		case OMAP_DISPLAY_TYPE_DPI:
 			r = dpi_init_port(dss, pdev, port, dss->feat->model);
 			if (r)
-				return r;
+				goto error;
 			break;
 
 		case OMAP_DISPLAY_TYPE_SDI:
 			r = sdi_init_port(dss, pdev, port);
 			if (r)
-				return r;
+				goto error;
 			break;
 
 		default:
@@ -1183,31 +1208,15 @@ static int dss_init_ports(struct dss_device *dss)
 	}
 
 	return 0;
+
+error:
+	__dss_uninit_ports(dss, i);
+	return r;
 }
 
 static void dss_uninit_ports(struct dss_device *dss)
 {
-	struct platform_device *pdev = dss->pdev;
-	struct device_node *parent = pdev->dev.of_node;
-	struct device_node *port;
-	int i;
-
-	for (i = 0; i < dss->feat->num_ports; i++) {
-		port = of_graph_get_port_by_id(parent, i);
-		if (!port)
-			continue;
-
-		switch (dss->feat->ports[i]) {
-		case OMAP_DISPLAY_TYPE_DPI:
-			dpi_uninit_port(port);
-			break;
-		case OMAP_DISPLAY_TYPE_SDI:
-			sdi_uninit_port(port);
-			break;
-		default:
-			break;
-		}
-	}
+	__dss_uninit_ports(dss, dss->feat->num_ports);
 }
 
 static int dss_video_pll_probe(struct dss_device *dss)
@@ -1543,7 +1552,8 @@ static void dss_shutdown(struct platform_device *pdev)
 	DSSDBG("shutdown\n");
 
 	for_each_dss_output(dssdev) {
-		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+		if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+		    dssdev->ops && dssdev->ops->disable)
 			dssdev->ops->disable(dssdev);
 	}
 }
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c867552..3a40833 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -14,6 +14,7 @@
 #include <linux/hdmi.h>
 #include <sound/omap-hdmi-audio.h>
 #include <media/cec.h>
+#include <drm/drm_bridge.h>
 
 #include "omapdss.h"
 #include "dss.h"
@@ -364,6 +365,7 @@ struct omap_hdmi {
 	bool core_enabled;
 
 	struct omap_dss_device output;
+	struct drm_bridge bridge;
 
 	struct platform_device *audio_pdev;
 	void (*audio_abort_cb)(struct device *dev);
@@ -378,6 +380,6 @@ struct omap_hdmi {
 	bool display_enabled;
 };
 
-#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+#define drm_bridge_to_hdmi(b) container_of(b, struct omap_hdmi, bridge)
 
 #endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 0f557fa..2578c95 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -28,6 +28,9 @@
 #include <sound/omap-hdmi-audio.h>
 #include <media/cec.h>
 
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
 #include "omapdss.h"
 #include "hdmi4_core.h"
 #include "hdmi4_cec.h"
@@ -237,20 +240,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
 	hdmi_power_off_core(hdmi);
 }
 
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
-				     const struct drm_display_mode *mode)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	mutex_lock(&hdmi->lock);
-
-	drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
-	dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
-	mutex_unlock(&hdmi->lock);
-}
-
 static int hdmi_dump_regs(struct seq_file *s, void *p)
 {
 	struct omap_hdmi *hdmi = s->private;
@@ -272,23 +261,6 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
 	return 0;
 }
 
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
-{
-	int r;
-
-	mutex_lock(&hdmi->lock);
-
-	r = hdmi_runtime_get(hdmi);
-	BUG_ON(r);
-
-	r = hdmi4_read_edid(&hdmi->core,  buf, len);
-
-	hdmi_runtime_put(hdmi);
-	mutex_unlock(&hdmi->lock);
-
-	return r;
-}
-
 static void hdmi_start_audio_stream(struct omap_hdmi *hd)
 {
 	hdmi_wp_audio_enable(&hd->wp, true);
@@ -301,62 +273,6 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
 	hdmi_wp_audio_enable(&hd->wp, false);
 }
 
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-	unsigned long flags;
-	int r;
-
-	DSSDBG("ENTER hdmi_display_enable\n");
-
-	mutex_lock(&hdmi->lock);
-
-	r = hdmi_power_on_full(hdmi);
-	if (r) {
-		DSSERR("failed to power on device\n");
-		goto done;
-	}
-
-	if (hdmi->audio_configured) {
-		r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
-				       &hdmi->audio_config,
-				       hdmi->cfg.vm.pixelclock);
-		if (r) {
-			DSSERR("Error restoring audio configuration: %d", r);
-			hdmi->audio_abort_cb(&hdmi->pdev->dev);
-			hdmi->audio_configured = false;
-		}
-	}
-
-	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
-	if (hdmi->audio_configured && hdmi->audio_playing)
-		hdmi_start_audio_stream(hdmi);
-	hdmi->display_enabled = true;
-	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
-
-done:
-	mutex_unlock(&hdmi->lock);
-}
-
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-	unsigned long flags;
-
-	DSSDBG("Enter hdmi_display_disable\n");
-
-	mutex_lock(&hdmi->lock);
-
-	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
-	hdmi_stop_audio_stream(hdmi);
-	hdmi->display_enabled = false;
-	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
-
-	hdmi_power_off_full(hdmi);
-
-	mutex_unlock(&hdmi->lock);
-}
-
 int hdmi4_core_enable(struct hdmi_core_data *core)
 {
 	struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
@@ -393,22 +309,139 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
 	mutex_unlock(&hdmi->lock);
 }
 
-static int hdmi_connect(struct omap_dss_device *src,
-			struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+			       enum drm_bridge_attach_flags flags)
 {
-	return omapdss_device_connect(dst->dss, dst, dst->next);
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+				 bridge, flags);
 }
 
-static void hdmi_disconnect(struct omap_dss_device *src,
-			    struct omap_dss_device *dst)
+static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
+				  const struct drm_display_mode *mode,
+				  const struct drm_display_mode *adjusted_mode)
 {
-	omapdss_device_disconnect(dst, dst->next);
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+	mutex_lock(&hdmi->lock);
+
+	drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+	dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+	mutex_unlock(&hdmi->lock);
 }
 
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
-		u8 *edid, int len)
+static void hdmi4_bridge_enable(struct drm_bridge *bridge,
+				struct drm_bridge_state *bridge_state)
 {
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+	struct drm_atomic_state *state = bridge_state->base.state;
+	struct drm_connector_state *conn_state;
+	struct drm_connector *connector;
+	struct drm_crtc_state *crtc_state;
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * None of these should fail, as the bridge can't be enabled without a
+	 * valid CRTC to connector path with fully populated new states.
+	 */
+	connector = drm_atomic_get_new_connector_for_encoder(state,
+							     bridge->encoder);
+	if (WARN_ON(!connector))
+		return;
+	conn_state = drm_atomic_get_new_connector_state(state, connector);
+	if (WARN_ON(!conn_state))
+		return;
+	crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+	if (WARN_ON(!crtc_state))
+		return;
+
+	hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+				? HDMI_HDMI : HDMI_DVI;
+
+	if (connector->display_info.is_hdmi) {
+		const struct drm_display_mode *mode;
+		struct hdmi_avi_infoframe avi;
+
+		mode = &crtc_state->adjusted_mode;
+		ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+							       mode);
+		if (ret == 0)
+			hdmi->cfg.infoframe = avi;
+	}
+
+	mutex_lock(&hdmi->lock);
+
+	ret = hdmi_power_on_full(hdmi);
+	if (ret) {
+		DSSERR("failed to power on device\n");
+		goto done;
+	}
+
+	if (hdmi->audio_configured) {
+		ret = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+					 &hdmi->audio_config,
+					 hdmi->cfg.vm.pixelclock);
+		if (ret) {
+			DSSERR("Error restoring audio configuration: %d", ret);
+			hdmi->audio_abort_cb(&hdmi->pdev->dev);
+			hdmi->audio_configured = false;
+		}
+	}
+
+	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+	if (hdmi->audio_configured && hdmi->audio_playing)
+		hdmi_start_audio_stream(hdmi);
+	hdmi->display_enabled = true;
+	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+
+done:
+	mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi4_bridge_disable(struct drm_bridge *bridge,
+				 struct drm_bridge_state *bridge_state)
+{
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+	unsigned long flags;
+
+	mutex_lock(&hdmi->lock);
+
+	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+	hdmi_stop_audio_stream(hdmi);
+	hdmi->display_enabled = false;
+	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+
+	hdmi_power_off_full(hdmi);
+
+	mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
+				    enum drm_connector_status status)
+{
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+	if (status == connector_status_disconnected)
+		hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
+}
+
+static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
+					  struct drm_connector *connector)
+{
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+	struct edid *edid = NULL;
+	unsigned int cec_addr;
 	bool need_enable;
 	int r;
 
@@ -417,64 +450,66 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
 	if (need_enable) {
 		r = hdmi4_core_enable(&hdmi->core);
 		if (r)
-			return r;
+			return NULL;
 	}
 
-	r = read_edid(hdmi, edid, len);
-	if (r >= 256)
-		hdmi4_cec_set_phys_addr(&hdmi->core,
-					cec_get_edid_phys_addr(edid, r, NULL));
-	else
-		hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
+	mutex_lock(&hdmi->lock);
+	r = hdmi_runtime_get(hdmi);
+	BUG_ON(r);
+
+	r = hdmi4_core_ddc_init(&hdmi->core);
+	if (r)
+		goto done;
+
+	edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
+
+done:
+	hdmi_runtime_put(hdmi);
+	mutex_unlock(&hdmi->lock);
+
+	if (edid && edid->extensions) {
+		unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
+
+		cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
+	} else {
+		cec_addr = CEC_PHYS_ADDR_INVALID;
+	}
+
+	hdmi4_cec_set_phys_addr(&hdmi->core, cec_addr);
+
 	if (need_enable)
 		hdmi4_core_disable(&hdmi->core);
 
-	return r;
+	return edid;
 }
 
-static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
-}
-
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
-		const struct hdmi_avi_infoframe *avi)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	hdmi->cfg.infoframe = *avi;
-	return 0;
-}
-
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
-		bool hdmi_mode)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
-	return 0;
-}
-
-static const struct omap_dss_device_ops hdmi_ops = {
-	.connect		= hdmi_connect,
-	.disconnect		= hdmi_disconnect,
-
-	.enable			= hdmi_display_enable,
-	.disable		= hdmi_display_disable,
-
-	.set_timings		= hdmi_display_set_timings,
-
-	.read_edid		= hdmi_read_edid,
-
-	.hdmi = {
-		.lost_hotplug		= hdmi_lost_hotplug,
-		.set_infoframe		= hdmi_set_infoframe,
-		.set_hdmi_mode		= hdmi_set_hdmi_mode,
-	},
+static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
+	.attach = hdmi4_bridge_attach,
+	.mode_set = hdmi4_bridge_mode_set,
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
+	.atomic_enable = hdmi4_bridge_enable,
+	.atomic_disable = hdmi4_bridge_disable,
+	.hpd_notify = hdmi4_bridge_hpd_notify,
+	.get_edid = hdmi4_bridge_get_edid,
 };
 
+static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
+{
+	hdmi->bridge.funcs = &hdmi4_bridge_funcs;
+	hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+	hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+	hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+
+	drm_bridge_add(&hdmi->bridge);
+}
+
+static void hdmi4_bridge_cleanup(struct omap_hdmi *hdmi)
+{
+	drm_bridge_remove(&hdmi->bridge);
+}
+
 /* -----------------------------------------------------------------------------
  * Audio Callbacks
  */
@@ -666,19 +701,21 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
 	struct omap_dss_device *out = &hdmi->output;
 	int r;
 
+	hdmi4_bridge_init(hdmi);
+
 	out->dev = &hdmi->pdev->dev;
 	out->id = OMAP_DSS_OUTPUT_HDMI;
 	out->type = OMAP_DISPLAY_TYPE_HDMI;
 	out->name = "hdmi.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->ops = &hdmi_ops;
 	out->owner = THIS_MODULE;
-	out->of_ports = BIT(0);
-	out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+	out->of_port = 0;
 
-	r = omapdss_device_init_output(out);
-	if (r < 0)
+	r = omapdss_device_init_output(out, &hdmi->bridge);
+	if (r < 0) {
+		hdmi4_bridge_cleanup(hdmi);
 		return r;
+	}
 
 	omapdss_device_register(out);
 
@@ -691,6 +728,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
 
 	omapdss_device_unregister(out);
 	omapdss_device_cleanup_output(out);
+
+	hdmi4_bridge_cleanup(hdmi);
 }
 
 static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ea5d5c2..751985a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -32,7 +32,7 @@ static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
 	return core->base + HDMI_CORE_AV;
 }
 
-static int hdmi_core_ddc_init(struct hdmi_core_data *core)
+int hdmi4_core_ddc_init(struct hdmi_core_data *core)
 {
 	void __iomem *base = core->base;
 
@@ -74,13 +74,11 @@ static int hdmi_core_ddc_init(struct hdmi_core_data *core)
 	return 0;
 }
 
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
-		u8 *pedid, int ext)
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
 {
+	struct hdmi_core_data *core = data;
 	void __iomem *base = core->base;
 	u32 i;
-	char checksum;
-	u32 offset = 0;
 
 	/* HDMI_CORE_DDC_STATUS_IN_PROG */
 	if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
@@ -89,24 +87,21 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
 		return -ETIMEDOUT;
 	}
 
-	if (ext % 2 != 0)
-		offset = 0x80;
-
 	/* Load Segment Address Register */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+	REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, block / 2, 7, 0);
 
 	/* Load Slave Address Register */
 	REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
 
 	/* Load Offset Address Register */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+	REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, block % 2 ? 0x80 : 0, 7, 0);
 
 	/* Load Byte Count */
-	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, len, 7, 0);
 	REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
 
 	/* Set DDC_CMD */
-	if (ext)
+	if (block)
 		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
 	else
 		REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
@@ -122,7 +117,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
 		return -EIO;
 	}
 
-	for (i = 0; i < 0x80; ++i) {
+	for (i = 0; i < len; ++i) {
 		int t;
 
 		/* IN_PROG */
@@ -141,48 +136,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
 			udelay(1);
 		}
 
-		pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
-	}
-
-	checksum = 0;
-	for (i = 0; i < 0x80; ++i)
-		checksum += pedid[i];
-
-	if (checksum != 0) {
-		DSSERR("E-EDID checksum failed!!\n");
-		return -EIO;
+		buf[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
 	}
 
 	return 0;
 }
 
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
-	int r, l;
-
-	if (len < 128)
-		return -EINVAL;
-
-	r = hdmi_core_ddc_init(core);
-	if (r)
-		return r;
-
-	r = hdmi_core_ddc_edid(core, edid, 0);
-	if (r)
-		return r;
-
-	l = 128;
-
-	if (len >= 128 * 2 && edid[0x7e] > 0) {
-		r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
-		if (r)
-			return r;
-		l += 128;
-	}
-
-	return l;
-}
-
 static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
 {
 	DSSDBG("Enter hdmi_core_init\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index 11c4b7b..dc64ae2 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -249,7 +249,9 @@ struct hdmi_core_packet_enable_repeat {
 	u32	generic_pkt_repeat;
 };
 
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+int hdmi4_core_ddc_init(struct hdmi_core_data *core);
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+
 void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
 		struct hdmi_config *cfg);
 void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d9463b3..4d4c1fa 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -31,6 +31,9 @@
 #include <linux/of_graph.h>
 #include <sound/omap-hdmi-audio.h>
 
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
 #include "omapdss.h"
 #include "hdmi5_core.h"
 #include "dss.h"
@@ -236,20 +239,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
 	hdmi_power_off_core(hdmi);
 }
 
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
-				     const struct drm_display_mode *mode)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	mutex_lock(&hdmi->lock);
-
-	drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
-	dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
-	mutex_unlock(&hdmi->lock);
-}
-
 static int hdmi_dump_regs(struct seq_file *s, void *p)
 {
 	struct omap_hdmi *hdmi = s->private;
@@ -271,30 +260,6 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
 	return 0;
 }
 
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
-{
-	int r;
-	int idlemode;
-
-	mutex_lock(&hdmi->lock);
-
-	r = hdmi_runtime_get(hdmi);
-	BUG_ON(r);
-
-	idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
-	/* No-idle mode */
-	REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
-
-	r = hdmi5_read_edid(&hdmi->core,  buf, len);
-
-	REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
-
-	hdmi_runtime_put(hdmi);
-	mutex_unlock(&hdmi->lock);
-
-	return r;
-}
-
 static void hdmi_start_audio_stream(struct omap_hdmi *hd)
 {
 	REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
@@ -309,62 +274,6 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
 	REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
 }
 
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-	unsigned long flags;
-	int r;
-
-	DSSDBG("ENTER hdmi_display_enable\n");
-
-	mutex_lock(&hdmi->lock);
-
-	r = hdmi_power_on_full(hdmi);
-	if (r) {
-		DSSERR("failed to power on device\n");
-		goto done;
-	}
-
-	if (hdmi->audio_configured) {
-		r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
-				       &hdmi->audio_config,
-				       hdmi->cfg.vm.pixelclock);
-		if (r) {
-			DSSERR("Error restoring audio configuration: %d", r);
-			hdmi->audio_abort_cb(&hdmi->pdev->dev);
-			hdmi->audio_configured = false;
-		}
-	}
-
-	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
-	if (hdmi->audio_configured && hdmi->audio_playing)
-		hdmi_start_audio_stream(hdmi);
-	hdmi->display_enabled = true;
-	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
-
-done:
-	mutex_unlock(&hdmi->lock);
-}
-
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-	unsigned long flags;
-
-	DSSDBG("Enter hdmi_display_disable\n");
-
-	mutex_lock(&hdmi->lock);
-
-	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
-	hdmi_stop_audio_stream(hdmi);
-	hdmi->display_enabled = false;
-	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
-
-	hdmi_power_off_full(hdmi);
-
-	mutex_unlock(&hdmi->lock);
-}
-
 static int hdmi_core_enable(struct omap_hdmi *hdmi)
 {
 	int r = 0;
@@ -398,23 +307,131 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
 	mutex_unlock(&hdmi->lock);
 }
 
-static int hdmi_connect(struct omap_dss_device *src,
-			struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+			       enum drm_bridge_attach_flags flags)
 {
-	return omapdss_device_connect(dst->dss, dst, dst->next);
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+				 bridge, flags);
 }
 
-static void hdmi_disconnect(struct omap_dss_device *src,
-			    struct omap_dss_device *dst)
+static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
+				  const struct drm_display_mode *mode,
+				  const struct drm_display_mode *adjusted_mode)
 {
-	omapdss_device_disconnect(dst, dst->next);
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+	mutex_lock(&hdmi->lock);
+
+	drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+	dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+	mutex_unlock(&hdmi->lock);
 }
 
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
-		u8 *edid, int len)
+static void hdmi5_bridge_enable(struct drm_bridge *bridge,
+				struct drm_bridge_state *bridge_state)
 {
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+	struct drm_atomic_state *state = bridge_state->base.state;
+	struct drm_connector_state *conn_state;
+	struct drm_connector *connector;
+	struct drm_crtc_state *crtc_state;
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * None of these should fail, as the bridge can't be enabled without a
+	 * valid CRTC to connector path with fully populated new states.
+	 */
+	connector = drm_atomic_get_new_connector_for_encoder(state,
+							     bridge->encoder);
+	if (WARN_ON(!connector))
+		return;
+	conn_state = drm_atomic_get_new_connector_state(state, connector);
+	if (WARN_ON(!conn_state))
+		return;
+	crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+	if (WARN_ON(!crtc_state))
+		return;
+
+	hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+				? HDMI_HDMI : HDMI_DVI;
+
+	if (connector->display_info.is_hdmi) {
+		const struct drm_display_mode *mode;
+		struct hdmi_avi_infoframe avi;
+
+		mode = &crtc_state->adjusted_mode;
+		ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+							       mode);
+		if (ret == 0)
+			hdmi->cfg.infoframe = avi;
+	}
+
+	mutex_lock(&hdmi->lock);
+
+	ret = hdmi_power_on_full(hdmi);
+	if (ret) {
+		DSSERR("failed to power on device\n");
+		goto done;
+	}
+
+	if (hdmi->audio_configured) {
+		ret = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+					 &hdmi->audio_config,
+					 hdmi->cfg.vm.pixelclock);
+		if (ret) {
+			DSSERR("Error restoring audio configuration: %d", ret);
+			hdmi->audio_abort_cb(&hdmi->pdev->dev);
+			hdmi->audio_configured = false;
+		}
+	}
+
+	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+	if (hdmi->audio_configured && hdmi->audio_playing)
+		hdmi_start_audio_stream(hdmi);
+	hdmi->display_enabled = true;
+	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+
+done:
+	mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi5_bridge_disable(struct drm_bridge *bridge,
+				 struct drm_bridge_state *bridge_state)
+{
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+	unsigned long flags;
+
+	mutex_lock(&hdmi->lock);
+
+	spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+	hdmi_stop_audio_stream(hdmi);
+	hdmi->display_enabled = false;
+	spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+
+	hdmi_power_off_full(hdmi);
+
+	mutex_unlock(&hdmi->lock);
+}
+
+static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
+					  struct drm_connector *connector)
+{
+	struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+	struct edid *edid;
 	bool need_enable;
+	int idlemode;
 	int r;
 
 	need_enable = hdmi->core_enabled == false;
@@ -422,52 +439,60 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
 	if (need_enable) {
 		r = hdmi_core_enable(hdmi);
 		if (r)
-			return r;
+			return NULL;
 	}
 
-	r = read_edid(hdmi, edid, len);
+	mutex_lock(&hdmi->lock);
+	r = hdmi_runtime_get(hdmi);
+	BUG_ON(r);
+
+	idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+	/* No-idle mode */
+	REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+
+	hdmi5_core_ddc_init(&hdmi->core);
+
+	edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
+
+	hdmi5_core_ddc_uninit(&hdmi->core);
+
+	REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+
+	hdmi_runtime_put(hdmi);
+	mutex_unlock(&hdmi->lock);
 
 	if (need_enable)
 		hdmi_core_disable(hdmi);
 
-	return r;
+	return (struct edid *)edid;
 }
 
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
-		const struct hdmi_avi_infoframe *avi)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	hdmi->cfg.infoframe = *avi;
-	return 0;
-}
-
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
-		bool hdmi_mode)
-{
-	struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
-	hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
-	return 0;
-}
-
-static const struct omap_dss_device_ops hdmi_ops = {
-	.connect		= hdmi_connect,
-	.disconnect		= hdmi_disconnect,
-
-	.enable			= hdmi_display_enable,
-	.disable		= hdmi_display_disable,
-
-	.set_timings		= hdmi_display_set_timings,
-
-	.read_edid		= hdmi_read_edid,
-
-	.hdmi = {
-		.set_infoframe		= hdmi_set_infoframe,
-		.set_hdmi_mode		= hdmi_set_hdmi_mode,
-	},
+static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
+	.attach = hdmi5_bridge_attach,
+	.mode_set = hdmi5_bridge_mode_set,
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
+	.atomic_enable = hdmi5_bridge_enable,
+	.atomic_disable = hdmi5_bridge_disable,
+	.get_edid = hdmi5_bridge_get_edid,
 };
 
+static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
+{
+	hdmi->bridge.funcs = &hdmi5_bridge_funcs;
+	hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+	hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+	hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+
+	drm_bridge_add(&hdmi->bridge);
+}
+
+static void hdmi5_bridge_cleanup(struct omap_hdmi *hdmi)
+{
+	drm_bridge_remove(&hdmi->bridge);
+}
+
 /* -----------------------------------------------------------------------------
  * Audio Callbacks
  */
@@ -650,19 +675,21 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
 	struct omap_dss_device *out = &hdmi->output;
 	int r;
 
+	hdmi5_bridge_init(hdmi);
+
 	out->dev = &hdmi->pdev->dev;
 	out->id = OMAP_DSS_OUTPUT_HDMI;
 	out->type = OMAP_DISPLAY_TYPE_HDMI;
 	out->name = "hdmi.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->ops = &hdmi_ops;
 	out->owner = THIS_MODULE;
-	out->of_ports = BIT(0);
-	out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+	out->of_port = 0;
 
-	r = omapdss_device_init_output(out);
-	if (r < 0)
+	r = omapdss_device_init_output(out, &hdmi->bridge);
+	if (r < 0) {
+		hdmi5_bridge_cleanup(hdmi);
 		return r;
+	}
 
 	omapdss_device_register(out);
 
@@ -675,6 +702,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
 
 	omapdss_device_unregister(out);
 	omapdss_device_cleanup_output(out);
+
+	hdmi5_bridge_cleanup(hdmi);
 }
 
 static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ff4d35c..7dd5870 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,7 +23,7 @@
 
 #include "hdmi5_core.h"
 
-static void hdmi_core_ddc_init(struct hdmi_core_data *core)
+void hdmi5_core_ddc_init(struct hdmi_core_data *core)
 {
 	void __iomem *base = core->base;
 	const unsigned long long iclk = 266000000;	/* DSS L3 ICLK */
@@ -102,7 +102,7 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
 	REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2);
 }
 
-static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core)
 {
 	void __iomem *base = core->base;
 
@@ -112,14 +112,14 @@ static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
 	REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
 }
 
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
 {
+	struct hdmi_core_data *core = data;
 	void __iomem *base = core->base;
 	u8 cur_addr;
-	char checksum = 0;
 	const int retries = 1000;
-	u8 seg_ptr = ext / 2;
-	u8 edidbase = ((ext % 2) * 0x80);
+	u8 seg_ptr = block / 2;
+	u8 edidbase = ((block % 2) * EDID_LENGTH);
 
 	REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0);
 
@@ -127,7 +127,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
 	 * TODO: We use polling here, although we probably should use proper
 	 * interrupts.
 	 */
-	for (cur_addr = 0; cur_addr < 128; ++cur_addr) {
+	for (cur_addr = 0; cur_addr < len; ++cur_addr) {
 		int i;
 
 		/* clear ERROR and DONE */
@@ -164,45 +164,13 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
 			return -EIO;
 		}
 
-		pedid[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
-		checksum += pedid[cur_addr];
+		buf[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
 	}
 
 	return 0;
 
 }
 
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
-	int r, n, i;
-	int max_ext_blocks = (len / 128) - 1;
-
-	if (len < 128)
-		return -EINVAL;
-
-	hdmi_core_ddc_init(core);
-
-	r = hdmi_core_ddc_edid(core, edid, 0);
-	if (r)
-		goto out;
-
-	n = edid[0x7e];
-
-	if (n > max_ext_blocks)
-		n = max_ext_blocks;
-
-	for (i = 1; i <= n; i++) {
-		r = hdmi_core_ddc_edid(core, edid + i * EDID_LENGTH, i);
-		if (r)
-			goto out;
-	}
-
-out:
-	hdmi_core_ddc_uninit(core);
-
-	return r ? r : len;
-}
-
 void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
 {
 
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index f10b8a2..65eadef 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -281,7 +281,10 @@ struct csc_table {
 	u16 c1, c2, c3, c4;
 };
 
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi5_core_ddc_init(struct hdmi_core_data *core);
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
+
 void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
 int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
 void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 3150285..00372f4 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -174,12 +174,7 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
 };
 
 static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
-	{ .compatible = "composite-video-connector" },
-	{ .compatible = "hdmi-connector" },
 	{ .compatible = "panel-dsi-cm" },
-	{ .compatible = "svideo-connector" },
-	{ .compatible = "ti,opa362" },
-	{ .compatible = "ti,tpd12s015" },
 	{},
 };
 
@@ -192,7 +187,7 @@ static int __init omapdss_boot_init(void)
 	dss = of_find_matching_node(NULL, omapdss_of_match);
 
 	if (dss == NULL || !of_device_is_available(dss))
-		return 0;
+		goto put_node;
 
 	omapdss_walk_device(dss, true);
 
@@ -217,6 +212,8 @@ static int __init omapdss_boot_init(void)
 		kfree(n);
 	}
 
+put_node:
+	of_node_put(dss);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 79f6b19..ab19d4a 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -285,13 +285,6 @@ struct omap_dss_writeback_info {
 	u8 pre_mult_alpha;
 };
 
-struct omapdss_hdmi_ops {
-	void (*lost_hotplug)(struct omap_dss_device *dssdev);
-	int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
-	int (*set_infoframe)(struct omap_dss_device *dssdev,
-		const struct hdmi_avi_infoframe *avi);
-};
-
 struct omapdss_dsi_ops {
 	void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
 			bool enter_ulps);
@@ -349,46 +342,23 @@ struct omap_dss_device_ops {
 	void (*disconnect)(struct omap_dss_device *dssdev,
 			struct omap_dss_device *dst);
 
-	void (*pre_enable)(struct omap_dss_device *dssdev);
 	void (*enable)(struct omap_dss_device *dssdev);
 	void (*disable)(struct omap_dss_device *dssdev);
-	void (*post_disable)(struct omap_dss_device *dssdev);
 
 	int (*check_timings)(struct omap_dss_device *dssdev,
 			     struct drm_display_mode *mode);
-	void (*set_timings)(struct omap_dss_device *dssdev,
-			    const struct drm_display_mode *mode);
-
-	bool (*detect)(struct omap_dss_device *dssdev);
-
-	void (*register_hpd_cb)(struct omap_dss_device *dssdev,
-				void (*cb)(void *cb_data,
-					  enum drm_connector_status status),
-				void *cb_data);
-	void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
-
-	int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
 
 	int (*get_modes)(struct omap_dss_device *dssdev,
 			 struct drm_connector *connector);
 
-	union {
-		const struct omapdss_hdmi_ops hdmi;
-		const struct omapdss_dsi_ops dsi;
-	};
+	const struct omapdss_dsi_ops dsi;
 };
 
 /**
  * enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
- * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
  * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
  */
 enum omap_dss_device_ops_flag {
-	OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
-	OMAP_DSS_DEVICE_OP_HPD = BIT(1),
-	OMAP_DSS_DEVICE_OP_EDID = BIT(2),
 	OMAP_DSS_DEVICE_OP_MODES = BIT(3),
 };
 
@@ -400,6 +370,7 @@ struct omap_dss_device {
 	struct dss_device *dss;
 	struct omap_dss_device *next;
 	struct drm_bridge *bridge;
+	struct drm_bridge *next_bridge;
 	struct drm_panel *panel;
 
 	struct list_head list;
@@ -436,8 +407,8 @@ struct omap_dss_device {
 	/* output instance */
 	enum omap_dss_output_id id;
 
-	/* bitmask of port numbers in DT */
-	unsigned int of_ports;
+	/* port number in DT */
+	unsigned int of_port;
 };
 
 struct omap_dss_driver {
@@ -461,7 +432,6 @@ static inline bool omapdss_is_initialized(void)
 }
 
 void omapdss_display_init(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
 int omapdss_display_get_modes(struct drm_connector *connector,
 			      const struct videomode *vm);
 
@@ -475,10 +445,8 @@ int omapdss_device_connect(struct dss_device *dss,
 			   struct omap_dss_device *dst);
 void omapdss_device_disconnect(struct omap_dss_device *src,
 			       struct omap_dss_device *dst);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
 void omapdss_device_enable(struct omap_dss_device *dssdev);
 void omapdss_device_disable(struct omap_dss_device *dssdev);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev);
 
 int omap_dss_get_num_overlay_managers(void);
 
@@ -487,7 +455,8 @@ int omap_dss_get_num_overlays(void);
 #define for_each_dss_output(d) \
 	while ((d = omapdss_device_next_output(d)) != NULL)
 struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
-int omapdss_device_init_output(struct omap_dss_device *out);
+int omapdss_device_init_output(struct omap_dss_device *out,
+			       struct drm_bridge *local_bridge);
 void omapdss_device_cleanup_output(struct omap_dss_device *out);
 
 typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
@@ -502,9 +471,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
 	return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
 }
 
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
-
 enum dss_writeback_channel {
 	DSS_WB_LCD1_MGR =	0,
 	DSS_WB_LCD2_MGR =	1,
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 0693d34..ce21c79 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,7 +4,6 @@
  * Author: Archit Taneja <archit@ti.com>
  */
 
-#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -18,12 +17,14 @@
 #include "dss.h"
 #include "omapdss.h"
 
-int omapdss_device_init_output(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out,
+			       struct drm_bridge *local_bridge)
 {
 	struct device_node *remote_node;
+	int ret;
 
 	remote_node = of_graph_get_remote_node(out->dev->of_node,
-					       ffs(out->of_ports) - 1, 0);
+					       out->of_port, 0);
 	if (!remote_node) {
 		dev_dbg(out->dev, "failed to find video sink\n");
 		return 0;
@@ -39,17 +40,55 @@ int omapdss_device_init_output(struct omap_dss_device *out)
 
 	if (out->next && out->type != out->next->type) {
 		dev_err(out->dev, "output type and display type don't match\n");
-		omapdss_device_put(out->next);
-		out->next = NULL;
-		return -EINVAL;
+		ret = -EINVAL;
+		goto error;
 	}
 
-	return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+	if (out->panel) {
+		struct drm_bridge *bridge;
+
+		bridge = drm_panel_bridge_add(out->panel);
+		if (IS_ERR(bridge)) {
+			dev_err(out->dev,
+				"unable to create panel bridge (%ld)\n",
+				PTR_ERR(bridge));
+			ret = PTR_ERR(bridge);
+			goto error;
+		}
+
+		out->bridge = bridge;
+	}
+
+	if (local_bridge) {
+		if (!out->bridge) {
+			ret = -EPROBE_DEFER;
+			goto error;
+		}
+
+		out->next_bridge = out->bridge;
+		out->bridge = local_bridge;
+	}
+
+	if (!out->next && !out->bridge) {
+		ret = -EPROBE_DEFER;
+		goto error;
+	}
+
+	return 0;
+
+error:
+	omapdss_device_cleanup_output(out);
+	out->next = NULL;
+	return ret;
 }
 EXPORT_SYMBOL(omapdss_device_init_output);
 
 void omapdss_device_cleanup_output(struct omap_dss_device *out)
 {
+	if (out->bridge && out->panel)
+		drm_panel_bridge_remove(out->next_bridge ?
+					out->next_bridge : out->bridge);
+
 	if (out->next)
 		omapdss_device_put(out->next);
 }
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 3b447c0..417a874 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -6,17 +6,19 @@
 
 #define DSS_SUBSYS_NAME "SDI"
 
-#include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/err.h>
-#include <linux/regulator/consumer.h>
 #include <linux/export.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
+#include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/string.h>
 
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
 #include "dss.h"
+#include "omapdss.h"
 
 struct sdi_device {
 	struct platform_device *pdev;
@@ -30,9 +32,11 @@ struct sdi_device {
 	int datapairs;
 
 	struct omap_dss_device output;
+	struct drm_bridge bridge;
 };
 
-#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
+#define drm_bridge_to_sdi(bridge) \
+	container_of(bridge, struct sdi_device, bridge)
 
 struct sdi_clk_calc_ctx {
 	struct sdi_device *sdi;
@@ -118,9 +122,82 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
 	dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
 }
 
-static void sdi_display_enable(struct omap_dss_device *dssdev)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int sdi_bridge_attach(struct drm_bridge *bridge,
+			     enum drm_bridge_attach_flags flags)
 {
-	struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+				 bridge, flags);
+}
+
+static enum drm_mode_status
+sdi_bridge_mode_valid(struct drm_bridge *bridge,
+		      const struct drm_display_mode *mode)
+{
+	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+	unsigned long pixelclock = mode->clock * 1000;
+	struct dispc_clock_info dispc_cinfo;
+	unsigned long fck;
+	int ret;
+
+	if (pixelclock == 0)
+		return MODE_NOCLOCK;
+
+	ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+	if (ret < 0)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static bool sdi_bridge_mode_fixup(struct drm_bridge *bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+	unsigned long pixelclock = mode->clock * 1000;
+	struct dispc_clock_info dispc_cinfo;
+	unsigned long fck;
+	unsigned long pck;
+	int ret;
+
+	ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+	if (ret < 0)
+		return false;
+
+	pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
+
+	if (pck != pixelclock)
+		dev_dbg(&sdi->pdev->dev,
+			"pixel clock adjusted from %lu Hz to %lu Hz\n",
+			pixelclock, pck);
+
+	adjusted_mode->clock = pck / 1000;
+
+	return true;
+}
+
+static void sdi_bridge_mode_set(struct drm_bridge *bridge,
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
+{
+	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+	sdi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void sdi_bridge_enable(struct drm_bridge *bridge,
+			      struct drm_bridge_state *bridge_state)
+{
+	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
 	struct dispc_clock_info dispc_cinfo;
 	unsigned long fck;
 	int r;
@@ -181,9 +258,10 @@ static void sdi_display_enable(struct omap_dss_device *dssdev)
 	regulator_disable(sdi->vdds_sdi_reg);
 }
 
-static void sdi_display_disable(struct omap_dss_device *dssdev)
+static void sdi_bridge_disable(struct drm_bridge *bridge,
+			       struct drm_bridge_state *bridge_state)
 {
-	struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+	struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
 
 	dss_mgr_disable(&sdi->output);
 
@@ -194,86 +272,56 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
 	regulator_disable(sdi->vdds_sdi_reg);
 }
 
-static void sdi_set_timings(struct omap_dss_device *dssdev,
-			    const struct drm_display_mode *mode)
-{
-	struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
-	sdi->pixelclock = mode->clock * 1000;
-}
-
-static int sdi_check_timings(struct omap_dss_device *dssdev,
-			     struct drm_display_mode *mode)
-{
-	struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-	struct dispc_clock_info dispc_cinfo;
-	unsigned long pixelclock = mode->clock * 1000;
-	unsigned long fck;
-	unsigned long pck;
-	int r;
-
-	if (pixelclock == 0)
-		return -EINVAL;
-
-	r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
-	if (r)
-		return r;
-
-	pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
-	if (pck != pixelclock) {
-		DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
-			pixelclock, pck);
-
-		mode->clock = pck / 1000;
-	}
-
-	return 0;
-}
-
-static int sdi_connect(struct omap_dss_device *src,
-		       struct omap_dss_device *dst)
-{
-	return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void sdi_disconnect(struct omap_dss_device *src,
-			   struct omap_dss_device *dst)
-{
-	omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops sdi_ops = {
-	.connect = sdi_connect,
-	.disconnect = sdi_disconnect,
-
-	.enable = sdi_display_enable,
-	.disable = sdi_display_disable,
-
-	.check_timings = sdi_check_timings,
-	.set_timings = sdi_set_timings,
+static const struct drm_bridge_funcs sdi_bridge_funcs = {
+	.attach = sdi_bridge_attach,
+	.mode_valid = sdi_bridge_mode_valid,
+	.mode_fixup = sdi_bridge_mode_fixup,
+	.mode_set = sdi_bridge_mode_set,
+	.atomic_enable = sdi_bridge_enable,
+	.atomic_disable = sdi_bridge_disable,
 };
 
+static void sdi_bridge_init(struct sdi_device *sdi)
+{
+	sdi->bridge.funcs = &sdi_bridge_funcs;
+	sdi->bridge.of_node = sdi->pdev->dev.of_node;
+	sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
+
+	drm_bridge_add(&sdi->bridge);
+}
+
+static void sdi_bridge_cleanup(struct sdi_device *sdi)
+{
+	drm_bridge_remove(&sdi->bridge);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
 static int sdi_init_output(struct sdi_device *sdi)
 {
 	struct omap_dss_device *out = &sdi->output;
 	int r;
 
+	sdi_bridge_init(sdi);
+
 	out->dev = &sdi->pdev->dev;
 	out->id = OMAP_DSS_OUTPUT_SDI;
 	out->type = OMAP_DISPLAY_TYPE_SDI;
 	out->name = "sdi.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
 	/* We have SDI only on OMAP3, where it's on port 1 */
-	out->of_ports = BIT(1);
-	out->ops = &sdi_ops;
+	out->of_port = 1;
 	out->owner = THIS_MODULE;
 	out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE	/* 15.5.9.1.2 */
 		       | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
 
-	r = omapdss_device_init_output(out);
-	if (r < 0)
+	r = omapdss_device_init_output(out, &sdi->bridge);
+	if (r < 0) {
+		sdi_bridge_cleanup(sdi);
 		return r;
+	}
 
 	omapdss_device_register(out);
 
@@ -284,6 +332,8 @@ static void sdi_uninit_output(struct sdi_device *sdi)
 {
 	omapdss_device_unregister(&sdi->output);
 	omapdss_device_cleanup_output(&sdi->output);
+
+	sdi_bridge_cleanup(sdi);
 }
 
 int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 596a297..766553b 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -13,7 +13,6 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/mutex.h>
 #include <linux/completion.h>
 #include <linux/delay.h>
 #include <linux/string.h>
@@ -26,6 +25,8 @@
 #include <linux/component.h>
 #include <linux/sys_soc.h>
 
+#include <drm/drm_bridge.h>
+
 #include "omapdss.h"
 #include "dss.h"
 
@@ -289,7 +290,6 @@ static const struct drm_display_mode omap_dss_ntsc_mode = {
 struct venc_device {
 	struct platform_device *pdev;
 	void __iomem *base;
-	struct mutex venc_lock;
 	struct regulator *vdda_dac_reg;
 	struct dss_device *dss;
 
@@ -303,9 +303,10 @@ struct venc_device {
 	bool requires_tv_dac_clk;
 
 	struct omap_dss_device output;
+	struct drm_bridge bridge;
 };
 
-#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
+#define drm_bridge_to_venc(b) container_of(b, struct venc_device, bridge)
 
 static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
 {
@@ -477,56 +478,6 @@ static void venc_power_off(struct venc_device *venc)
 	venc_runtime_put(venc);
 }
 
-static void venc_display_enable(struct omap_dss_device *dssdev)
-{
-	struct venc_device *venc = dssdev_to_venc(dssdev);
-
-	DSSDBG("venc_display_enable\n");
-
-	mutex_lock(&venc->venc_lock);
-
-	venc_power_on(venc);
-
-	mutex_unlock(&venc->venc_lock);
-}
-
-static void venc_display_disable(struct omap_dss_device *dssdev)
-{
-	struct venc_device *venc = dssdev_to_venc(dssdev);
-
-	DSSDBG("venc_display_disable\n");
-
-	mutex_lock(&venc->venc_lock);
-
-	venc_power_off(venc);
-
-	mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_get_modes(struct omap_dss_device *dssdev,
-			  struct drm_connector *connector)
-{
-	static const struct drm_display_mode *modes[] = {
-		&omap_dss_pal_mode,
-		&omap_dss_ntsc_mode,
-	};
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(modes); ++i) {
-		struct drm_display_mode *mode;
-
-		mode = drm_mode_duplicate(connector->dev, modes[i]);
-		if (!mode)
-			return i;
-
-		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-		drm_mode_set_name(mode);
-		drm_mode_probed_add(connector, mode);
-	}
-
-	return ARRAY_SIZE(modes);
-}
-
 static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
 {
 	if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
@@ -545,57 +496,6 @@ static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mod
 	return VENC_MODE_UNKNOWN;
 }
 
-static void venc_set_timings(struct omap_dss_device *dssdev,
-			     const struct drm_display_mode *mode)
-{
-	struct venc_device *venc = dssdev_to_venc(dssdev);
-	enum venc_videomode venc_mode = venc_get_videomode(mode);
-
-	DSSDBG("venc_set_timings\n");
-
-	mutex_lock(&venc->venc_lock);
-
-	switch (venc_mode) {
-	default:
-		WARN_ON_ONCE(1);
-		/* Fall-through */
-	case VENC_MODE_PAL:
-		venc->config = &venc_config_pal_trm;
-		break;
-
-	case VENC_MODE_NTSC:
-		venc->config = &venc_config_ntsc_trm;
-		break;
-	}
-
-	dispc_set_tv_pclk(venc->dss->dispc, 13500000);
-
-	mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_check_timings(struct omap_dss_device *dssdev,
-			      struct drm_display_mode *mode)
-{
-	DSSDBG("venc_check_timings\n");
-
-	switch (venc_get_videomode(mode)) {
-	case VENC_MODE_PAL:
-		drm_mode_copy(mode, &omap_dss_pal_mode);
-		break;
-
-	case VENC_MODE_NTSC:
-		drm_mode_copy(mode, &omap_dss_ntsc_mode);
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-	drm_mode_set_name(mode);
-	return 0;
-}
-
 static int venc_dump_regs(struct seq_file *s, void *p)
 {
 	struct venc_device *venc = s->private;
@@ -673,31 +573,149 @@ static int venc_get_clocks(struct venc_device *venc)
 	return 0;
 }
 
-static int venc_connect(struct omap_dss_device *src,
-			struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int venc_bridge_attach(struct drm_bridge *bridge,
+			      enum drm_bridge_attach_flags flags)
 {
-	return omapdss_device_connect(dst->dss, dst, dst->next);
+	struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+				 bridge, flags);
 }
 
-static void venc_disconnect(struct omap_dss_device *src,
-			    struct omap_dss_device *dst)
+static enum drm_mode_status
+venc_bridge_mode_valid(struct drm_bridge *bridge,
+		       const struct drm_display_mode *mode)
 {
-	omapdss_device_disconnect(dst, dst->next);
+	switch (venc_get_videomode(mode)) {
+	case VENC_MODE_PAL:
+	case VENC_MODE_NTSC:
+		return MODE_OK;
+
+	default:
+		return MODE_BAD;
+	}
 }
 
-static const struct omap_dss_device_ops venc_ops = {
-	.connect = venc_connect,
-	.disconnect = venc_disconnect,
+static bool venc_bridge_mode_fixup(struct drm_bridge *bridge,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	const struct drm_display_mode *venc_mode;
 
-	.enable = venc_display_enable,
-	.disable = venc_display_disable,
+	switch (venc_get_videomode(adjusted_mode)) {
+	case VENC_MODE_PAL:
+		venc_mode = &omap_dss_pal_mode;
+		break;
 
-	.check_timings = venc_check_timings,
-	.set_timings = venc_set_timings,
+	case VENC_MODE_NTSC:
+		venc_mode = &omap_dss_ntsc_mode;
+		break;
 
-	.get_modes = venc_get_modes,
+	default:
+		return false;
+	}
+
+	drm_mode_copy(adjusted_mode, venc_mode);
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+	drm_mode_set_name(adjusted_mode);
+
+	return true;
+}
+
+static void venc_bridge_mode_set(struct drm_bridge *bridge,
+				 const struct drm_display_mode *mode,
+				 const struct drm_display_mode *adjusted_mode)
+{
+	struct venc_device *venc = drm_bridge_to_venc(bridge);
+	enum venc_videomode venc_mode = venc_get_videomode(adjusted_mode);
+
+	switch (venc_mode) {
+	default:
+		WARN_ON_ONCE(1);
+		/* Fall-through */
+	case VENC_MODE_PAL:
+		venc->config = &venc_config_pal_trm;
+		break;
+
+	case VENC_MODE_NTSC:
+		venc->config = &venc_config_ntsc_trm;
+		break;
+	}
+
+	dispc_set_tv_pclk(venc->dss->dispc, 13500000);
+}
+
+static void venc_bridge_enable(struct drm_bridge *bridge)
+{
+	struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+	venc_power_on(venc);
+}
+
+static void venc_bridge_disable(struct drm_bridge *bridge)
+{
+	struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+	venc_power_off(venc);
+}
+
+static int venc_bridge_get_modes(struct drm_bridge *bridge,
+				 struct drm_connector *connector)
+{
+	static const struct drm_display_mode *modes[] = {
+		&omap_dss_pal_mode,
+		&omap_dss_ntsc_mode,
+	};
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+		struct drm_display_mode *mode;
+
+		mode = drm_mode_duplicate(connector->dev, modes[i]);
+		if (!mode)
+			return i;
+
+		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_set_name(mode);
+		drm_mode_probed_add(connector, mode);
+	}
+
+	return ARRAY_SIZE(modes);
+}
+
+static const struct drm_bridge_funcs venc_bridge_funcs = {
+	.attach = venc_bridge_attach,
+	.mode_valid = venc_bridge_mode_valid,
+	.mode_fixup = venc_bridge_mode_fixup,
+	.mode_set = venc_bridge_mode_set,
+	.enable = venc_bridge_enable,
+	.disable = venc_bridge_disable,
+	.get_modes = venc_bridge_get_modes,
 };
 
+static void venc_bridge_init(struct venc_device *venc)
+{
+	venc->bridge.funcs = &venc_bridge_funcs;
+	venc->bridge.of_node = venc->pdev->dev.of_node;
+	venc->bridge.ops = DRM_BRIDGE_OP_MODES;
+	venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
+	venc->bridge.interlace_allowed = true;
+
+	drm_bridge_add(&venc->bridge);
+}
+
+static void venc_bridge_cleanup(struct venc_device *venc)
+{
+	drm_bridge_remove(&venc->bridge);
+}
+
 /* -----------------------------------------------------------------------------
  * Component Bind & Unbind
  */
@@ -747,19 +765,22 @@ static int venc_init_output(struct venc_device *venc)
 	struct omap_dss_device *out = &venc->output;
 	int r;
 
+	venc_bridge_init(venc);
+
 	out->dev = &venc->pdev->dev;
 	out->id = OMAP_DSS_OUTPUT_VENC;
 	out->type = OMAP_DISPLAY_TYPE_VENC;
 	out->name = "venc.0";
 	out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
-	out->ops = &venc_ops;
 	out->owner = THIS_MODULE;
-	out->of_ports = BIT(0);
+	out->of_port = 0;
 	out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
 
-	r = omapdss_device_init_output(out);
-	if (r < 0)
+	r = omapdss_device_init_output(out, &venc->bridge);
+	if (r < 0) {
+		venc_bridge_cleanup(venc);
 		return r;
+	}
 
 	omapdss_device_register(out);
 
@@ -770,6 +791,8 @@ static void venc_uninit_output(struct venc_device *venc)
 {
 	omapdss_device_unregister(&venc->output);
 	omapdss_device_cleanup_output(&venc->output);
+
+	venc_bridge_cleanup(venc);
 }
 
 static int venc_probe_of(struct venc_device *venc)
@@ -839,8 +862,6 @@ static int venc_probe(struct platform_device *pdev)
 	if (soc_device_match(venc_soc_devices))
 		venc->requires_tv_dac_clk = true;
 
-	mutex_init(&venc->venc_lock);
-
 	venc->config = &venc_config_pal_trm;
 
 	venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 94cded3..5287645 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -6,7 +6,6 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
 
 #include "omap_drv.h"
@@ -20,124 +19,12 @@
 struct omap_connector {
 	struct drm_connector base;
 	struct omap_dss_device *output;
-	struct omap_dss_device *hpd;
-	bool hdmi_mode;
 };
 
-static void omap_connector_hpd_notify(struct drm_connector *connector,
-				      enum drm_connector_status status)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct omap_dss_device *dssdev;
-
-	if (status != connector_status_disconnected)
-		return;
-
-	/*
-	 * Notify all devics in the pipeline of disconnection. This is required
-	 * to let the HDMI encoders reset their internal state related to
-	 * connection status, such as the CEC address.
-	 */
-	for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
-		if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
-			dssdev->ops->hdmi.lost_hotplug(dssdev);
-	}
-}
-
-static void omap_connector_hpd_cb(void *cb_data,
-				  enum drm_connector_status status)
-{
-	struct omap_connector *omap_connector = cb_data;
-	struct drm_connector *connector = &omap_connector->base;
-	struct drm_device *dev = connector->dev;
-	enum drm_connector_status old_status;
-
-	mutex_lock(&dev->mode_config.mutex);
-	old_status = connector->status;
-	connector->status = status;
-	mutex_unlock(&dev->mode_config.mutex);
-
-	if (old_status == status)
-		return;
-
-	omap_connector_hpd_notify(connector, status);
-
-	drm_kms_helper_hotplug_event(dev);
-}
-
-void omap_connector_enable_hpd(struct drm_connector *connector)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct omap_dss_device *hpd = omap_connector->hpd;
-
-	if (hpd)
-		hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
-					  omap_connector);
-}
-
-void omap_connector_disable_hpd(struct drm_connector *connector)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct omap_dss_device *hpd = omap_connector->hpd;
-
-	if (hpd)
-		hpd->ops->unregister_hpd_cb(hpd);
-}
-
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-
-	return omap_connector->hdmi_mode;
-}
-
-static struct omap_dss_device *
-omap_connector_find_device(struct drm_connector *connector,
-			   enum omap_dss_device_ops_flag op)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct omap_dss_device *dssdev = NULL;
-	struct omap_dss_device *d;
-
-	for (d = omap_connector->output; d; d = d->next) {
-		if (d->ops_flags & op)
-			dssdev = d;
-	}
-
-	return dssdev;
-}
-
 static enum drm_connector_status omap_connector_detect(
 		struct drm_connector *connector, bool force)
 {
-	struct omap_dss_device *dssdev;
-	enum drm_connector_status status;
-
-	dssdev = omap_connector_find_device(connector,
-					    OMAP_DSS_DEVICE_OP_DETECT);
-
-	if (dssdev) {
-		status = dssdev->ops->detect(dssdev)
-		       ? connector_status_connected
-		       : connector_status_disconnected;
-
-		omap_connector_hpd_notify(connector, status);
-	} else {
-		switch (connector->connector_type) {
-		case DRM_MODE_CONNECTOR_DPI:
-		case DRM_MODE_CONNECTOR_LVDS:
-		case DRM_MODE_CONNECTOR_DSI:
-			status = connector_status_connected;
-			break;
-		default:
-			status = connector_status_unknown;
-			break;
-		}
-	}
-
-	VERB("%s: %d (force=%d)", connector->name, status, force);
-
-	return status;
+	return connector_status_connected;
 }
 
 static void omap_connector_destroy(struct drm_connector *connector)
@@ -146,14 +33,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
 
 	DBG("%s", connector->name);
 
-	if (omap_connector->hpd) {
-		struct omap_dss_device *hpd = omap_connector->hpd;
-
-		hpd->ops->unregister_hpd_cb(hpd);
-		omapdss_device_put(hpd);
-		omap_connector->hpd = NULL;
-	}
-
 	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 
@@ -162,81 +41,27 @@ static void omap_connector_destroy(struct drm_connector *connector)
 	kfree(omap_connector);
 }
 
-#define MAX_EDID  512
-
-static int omap_connector_get_modes_edid(struct drm_connector *connector,
-					 struct omap_dss_device *dssdev)
-{
-	struct omap_connector *omap_connector = to_omap_connector(connector);
-	enum drm_connector_status status;
-	void *edid;
-	int n;
-
-	status = omap_connector_detect(connector, false);
-	if (status != connector_status_connected)
-		goto no_edid;
-
-	edid = kzalloc(MAX_EDID, GFP_KERNEL);
-	if (!edid)
-		goto no_edid;
-
-	if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
-	    !drm_edid_is_valid(edid)) {
-		kfree(edid);
-		goto no_edid;
-	}
-
-	drm_connector_update_edid_property(connector, edid);
-	n = drm_add_edid_modes(connector, edid);
-
-	omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
-
-	kfree(edid);
-	return n;
-
-no_edid:
-	drm_connector_update_edid_property(connector, NULL);
-	return 0;
-}
-
 static int omap_connector_get_modes(struct drm_connector *connector)
 {
 	struct omap_connector *omap_connector = to_omap_connector(connector);
-	struct omap_dss_device *dssdev;
+	struct omap_dss_device *dssdev = NULL;
+	struct omap_dss_device *d;
 
 	DBG("%s", connector->name);
 
 	/*
-	 * If display exposes EDID, then we parse that in the normal way to
-	 * build table of supported modes.
+	 * If the display pipeline reports modes (e.g. with a fixed resolution
+	 * panel or an analog TV output), query it.
 	 */
-	dssdev = omap_connector_find_device(connector,
-					    OMAP_DSS_DEVICE_OP_EDID);
-	if (dssdev)
-		return omap_connector_get_modes_edid(connector, dssdev);
+	for (d = omap_connector->output; d; d = d->next) {
+		if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
+			dssdev = d;
+	}
 
-	/*
-	 * Otherwise if the display pipeline reports modes (e.g. with a fixed
-	 * resolution panel or an analog TV output), query it.
-	 */
-	dssdev = omap_connector_find_device(connector,
-					    OMAP_DSS_DEVICE_OP_MODES);
 	if (dssdev)
 		return dssdev->ops->get_modes(dssdev, connector);
 
-	/*
-	 * Otherwise if the display pipeline uses a drm_panel, we delegate the
-	 * operation to the panel API.
-	 */
-	if (omap_connector->output->panel)
-		return drm_panel_get_modes(omap_connector->output->panel,
-					   connector);
-
-	/*
-	 * We can't retrieve modes, which can happen for instance for a DVI or
-	 * VGA output with the DDC bus unconnected. The KMS core will add the
-	 * default modes.
-	 */
+	/* We can't retrieve modes. The KMS core will add the default modes. */
 	return 0;
 }
 
@@ -249,7 +74,7 @@ enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
 	drm_mode_copy(adjusted_mode, mode);
 
 	for (; dssdev; dssdev = dssdev->next) {
-		if (!dssdev->ops->check_timings)
+		if (!dssdev->ops || !dssdev->ops->check_timings)
 			continue;
 
 		ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
@@ -298,35 +123,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
 	.mode_valid = omap_connector_mode_valid,
 };
 
-static int omap_connector_get_type(struct omap_dss_device *output)
-{
-	struct omap_dss_device *display;
-	enum omap_display_type type;
-
-	display = omapdss_display_get(output);
-	type = display->type;
-	omapdss_device_put(display);
-
-	switch (type) {
-	case OMAP_DISPLAY_TYPE_HDMI:
-		return DRM_MODE_CONNECTOR_HDMIA;
-	case OMAP_DISPLAY_TYPE_DVI:
-		return DRM_MODE_CONNECTOR_DVID;
-	case OMAP_DISPLAY_TYPE_DSI:
-		return DRM_MODE_CONNECTOR_DSI;
-	case OMAP_DISPLAY_TYPE_DPI:
-	case OMAP_DISPLAY_TYPE_DBI:
-		return DRM_MODE_CONNECTOR_DPI;
-	case OMAP_DISPLAY_TYPE_VENC:
-		/* TODO: This could also be composite */
-		return DRM_MODE_CONNECTOR_SVIDEO;
-	case OMAP_DISPLAY_TYPE_SDI:
-		return DRM_MODE_CONNECTOR_LVDS;
-	default:
-		return DRM_MODE_CONNECTOR_Unknown;
-	}
-}
-
 /* initialize connector */
 struct drm_connector *omap_connector_init(struct drm_device *dev,
 					  struct omap_dss_device *output,
@@ -334,7 +130,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
 {
 	struct drm_connector *connector = NULL;
 	struct omap_connector *omap_connector;
-	struct omap_dss_device *dssdev;
 
 	DBG("%s", output->name);
 
@@ -349,27 +144,9 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
 	connector->doublescan_allowed = 0;
 
 	drm_connector_init(dev, connector, &omap_connector_funcs,
-			   omap_connector_get_type(output));
+			   DRM_MODE_CONNECTOR_DSI);
 	drm_connector_helper_add(connector, &omap_connector_helper_funcs);
 
-	/*
-	 * Initialize connector status handling. First try to find a device that
-	 * supports hot-plug reporting. If it fails, fall back to a device that
-	 * support polling. If that fails too, we don't support hot-plug
-	 * detection at all.
-	 */
-	dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
-	if (dssdev) {
-		omap_connector->hpd = omapdss_device_get(dssdev);
-		connector->polled = DRM_CONNECTOR_POLL_HPD;
-	} else {
-		dssdev = omap_connector_find_device(connector,
-						    OMAP_DSS_DEVICE_OP_DETECT);
-		if (dssdev)
-			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
-					    DRM_CONNECTOR_POLL_DISCONNECT;
-	}
-
 	return connector;
 
 fail:
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 13607bd..0ecd4f16 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -21,9 +21,6 @@ struct omap_dss_device;
 struct drm_connector *omap_connector_init(struct drm_device *dev,
 					  struct omap_dss_device *output,
 					  struct drm_encoder *encoder);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void omap_connector_enable_hpd(struct drm_connector *connector);
-void omap_connector_disable_hpd(struct drm_connector *connector);
 enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
 					const struct drm_display_mode *mode,
 					struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 3c5ddbf..fce7e94 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -831,7 +831,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 	 * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
 	 * tables so lets use that. Size of HW gamma table can be
 	 * extracted with dispc_mgr_gamma_size(). If it returns 0
-	 * gamma table is not supprted.
+	 * gamma table is not supported.
 	 */
 	if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
 		unsigned int gamma_lut_size = 256;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 252f5eb..42ec51b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -82,12 +82,11 @@ static const u32 reg[][4] = {
 
 static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
 {
-	struct dma_device *dma_dev = dmm->wa_dma_chan->device;
 	struct dma_async_tx_descriptor *tx;
 	enum dma_status status;
 	dma_cookie_t cookie;
 
-	tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+	tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
 	if (!tx) {
 		dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
 		return -EIO;
@@ -99,7 +98,6 @@ static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
 		return -EIO;
 	}
 
-	dma_async_issue_pending(dmm->wa_dma_chan);
 	status = dma_sync_wait(dmm->wa_dma_chan, cookie);
 	if (status != DMA_COMPLETE)
 		dev_err(dmm->dev, "i878 wa DMA copy failure\n");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d2750f6..cdafd7e 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -12,6 +12,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_file.h>
@@ -134,9 +135,6 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
 	for (i = 0; i < priv->num_pipes; i++) {
 		struct omap_drm_pipeline *pipe = &priv->pipes[i];
 
-		if (pipe->output->panel)
-			drm_panel_detach(pipe->output->panel);
-
 		omapdss_device_disconnect(NULL, pipe->output);
 
 		omapdss_device_put(pipe->output);
@@ -209,11 +207,12 @@ static int omap_display_id(struct omap_dss_device *output)
 	struct device_node *node = NULL;
 
 	if (output->next) {
-		struct omap_dss_device *display;
+		struct omap_dss_device *display = output;
 
-		display = omapdss_display_get(output);
+		while (display->next)
+			display = display->next;
+
 		node = display->dev->of_node;
-		omapdss_device_put(display);
 	} else if (output->bridge) {
 		struct drm_bridge *bridge = output->bridge;
 
@@ -221,8 +220,6 @@ static int omap_display_id(struct omap_dss_device *output)
 			bridge = drm_bridge_get_next_bridge(bridge);
 
 		node = bridge->of_node;
-	} else if (output->panel) {
-		node = output->panel->dev->of_node;
 	}
 
 	return node ? of_alias_get_id(node, "display") : -ENODEV;
@@ -297,9 +294,14 @@ static int omap_modeset_init(struct drm_device *dev)
 
 		if (pipe->output->bridge) {
 			ret = drm_bridge_attach(pipe->encoder,
-						pipe->output->bridge, NULL);
-			if (ret < 0)
+						pipe->output->bridge, NULL,
+						DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+			if (ret < 0) {
+				dev_err(priv->dev,
+					"unable to attach bridge %pOF\n",
+					pipe->output->bridge->of_node);
 				return ret;
+			}
 		}
 
 		id = omap_display_id(pipe->output);
@@ -330,22 +332,30 @@ static int omap_modeset_init(struct drm_device *dev)
 		struct drm_encoder *encoder = pipe->encoder;
 		struct drm_crtc *crtc;
 
-		if (!pipe->output->bridge) {
+		if (pipe->output->next) {
 			pipe->connector = omap_connector_init(dev, pipe->output,
 							      encoder);
 			if (!pipe->connector)
 				return -ENOMEM;
-
-			drm_connector_attach_encoder(pipe->connector, encoder);
-
-			if (pipe->output->panel) {
-				ret = drm_panel_attach(pipe->output->panel,
-						       pipe->connector);
-				if (ret < 0)
-					return ret;
+		} else {
+			pipe->connector = drm_bridge_connector_init(dev, encoder);
+			if (IS_ERR(pipe->connector)) {
+				dev_err(priv->dev,
+					"unable to create bridge connector for %s\n",
+					pipe->output->name);
+				return PTR_ERR(pipe->connector);
 			}
 		}
 
+		drm_connector_attach_encoder(pipe->connector, encoder);
+
+		if (pipe->output->panel) {
+			ret = drm_panel_attach(pipe->output->panel,
+					       pipe->connector);
+			if (ret < 0)
+				return ret;
+		}
+
 		crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
 		if (IS_ERR(crtc))
 			return PTR_ERR(crtc);
@@ -382,6 +392,23 @@ static int omap_modeset_init(struct drm_device *dev)
 	return 0;
 }
 
+static void omap_modeset_fini(struct drm_device *ddev)
+{
+	struct omap_drm_private *priv = ddev->dev_private;
+	unsigned int i;
+
+	omap_drm_irq_uninstall(ddev);
+
+	for (i = 0; i < priv->num_pipes; i++) {
+		struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+		if (pipe->output->panel)
+			drm_panel_detach(pipe->output->panel);
+	}
+
+	drm_mode_config_cleanup(ddev);
+}
+
 /*
  * Enable the HPD in external components if supported
  */
@@ -391,8 +418,13 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
 	unsigned int i;
 
 	for (i = 0; i < priv->num_pipes; i++) {
-		if (priv->pipes[i].connector)
-			omap_connector_enable_hpd(priv->pipes[i].connector);
+		struct drm_connector *connector = priv->pipes[i].connector;
+
+		if (!connector)
+			continue;
+
+		if (priv->pipes[i].output->bridge)
+			drm_bridge_connector_enable_hpd(connector);
 	}
 }
 
@@ -405,8 +437,13 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
 	unsigned int i;
 
 	for (i = 0; i < priv->num_pipes; i++) {
-		if (priv->pipes[i].connector)
-			omap_connector_disable_hpd(priv->pipes[i].connector);
+		struct drm_connector *connector = priv->pipes[i].connector;
+
+		if (!connector)
+			continue;
+
+		if (priv->pipes[i].output->bridge)
+			drm_bridge_connector_disable_hpd(connector);
 	}
 }
 
@@ -629,8 +666,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
 
 	omap_fbdev_fini(ddev);
 err_cleanup_modeset:
-	drm_mode_config_cleanup(ddev);
-	omap_drm_irq_uninstall(ddev);
+	omap_modeset_fini(ddev);
 err_gem_deinit:
 	omap_gem_deinit(ddev);
 	destroy_workqueue(priv->wq);
@@ -655,9 +691,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
 
 	drm_atomic_helper_shutdown(ddev);
 
-	drm_mode_config_cleanup(ddev);
-
-	omap_drm_irq_uninstall(ddev);
+	omap_modeset_fini(ddev);
 	omap_gem_deinit(ddev);
 
 	destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4f2165a..ae4b867 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -10,7 +10,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
 
 #include "omap_drv.h"
 
@@ -70,30 +69,6 @@ static void omap_encoder_update_videomode_flags(struct videomode *vm,
 	}
 }
 
-static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
-				       struct drm_encoder *encoder,
-				       struct drm_display_mode *adjusted_mode)
-{
-	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-	struct omap_dss_device *dssdev = omap_encoder->output;
-	bool hdmi_mode;
-
-	hdmi_mode = omap_connector_get_hdmi_mode(connector);
-
-	if (dssdev->ops->hdmi.set_hdmi_mode)
-		dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
-	if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
-		struct hdmi_avi_infoframe avi;
-		int r;
-
-		r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
-							     adjusted_mode);
-		if (r == 0)
-			dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
-	}
-}
-
 static void omap_encoder_mode_set(struct drm_encoder *encoder,
 				  struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
@@ -138,17 +113,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
 	bus_flags = connector->display_info.bus_flags;
 	omap_encoder_update_videomode_flags(&vm, bus_flags);
 
-	/* Set timings for all devices in the display pipeline. */
+	/* Set timings for the dss manager. */
 	dss_mgr_set_timings(output, &vm);
-
-	for (dssdev = output; dssdev; dssdev = dssdev->next) {
-		if (dssdev->ops->set_timings)
-			dssdev->ops->set_timings(dssdev, adjusted_mode);
-	}
-
-	/* Set the HDMI mode and HDMI infoframe if applicable. */
-	if (output->type == OMAP_DISPLAY_TYPE_HDMI)
-		omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
 }
 
 static void omap_encoder_disable(struct drm_encoder *encoder)
@@ -159,33 +125,12 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
 
 	dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
 
-	/* Disable the panel if present. */
-	if (dssdev->panel) {
-		drm_panel_disable(dssdev->panel);
-		drm_panel_unprepare(dssdev->panel);
-	}
-
 	/*
 	 * Disable the chain of external devices, starting at the one at the
-	 * internal encoder's output.
+	 * internal encoder's output. This is used for DSI outputs only, as
+	 * dssdev->next is NULL for all other outputs.
 	 */
 	omapdss_device_disable(dssdev->next);
-
-	/*
-	 * Disable the internal encoder. This will disable the DSS output. The
-	 * DSI is treated as an exception as DSI pipelines still use the legacy
-	 * flow where the pipeline output controls the encoder.
-	 */
-	if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
-		dssdev->ops->disable(dssdev);
-		dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-	}
-
-	/*
-	 * Perform the post-disable operations on the chain of external devices
-	 * to complete the display pipeline disable.
-	 */
-	omapdss_device_post_disable(dssdev->next);
 }
 
 static void omap_encoder_enable(struct drm_encoder *encoder)
@@ -196,30 +141,12 @@ static void omap_encoder_enable(struct drm_encoder *encoder)
 
 	dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
 
-	/* Prepare the chain of external devices for pipeline enable. */
-	omapdss_device_pre_enable(dssdev->next);
-
-	/*
-	 * Enable the internal encoder. This will enable the DSS output. The
-	 * DSI is treated as an exception as DSI pipelines still use the legacy
-	 * flow where the pipeline output controls the encoder.
-	 */
-	if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
-		dssdev->ops->enable(dssdev);
-		dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-	}
-
 	/*
 	 * Enable the chain of external devices, starting at the one at the
-	 * internal encoder's output.
+	 * internal encoder's output. This is used for DSI outputs only, as
+	 * dssdev->next is NULL for all other outputs.
 	 */
 	omapdss_device_enable(dssdev->next);
-
-	/* Enable the panel if present. */
-	if (dssdev->panel) {
-		drm_panel_prepare(dssdev->panel);
-		drm_panel_enable(dssdev->panel);
-	}
 }
 
 static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b06e5cb..09a8491 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -242,14 +242,10 @@ void omap_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
+	ret = drm_fb_helper_init(dev, helper);
 	if (ret)
 		goto fail;
 
-	ret = drm_fb_helper_single_add_all_connectors(helper);
-	if (ret)
-		goto fini;
-
 	ret = drm_fb_helper_initial_config(helper, 32);
 	if (ret)
 		goto fini;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index ae44ac2..a1723c1 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -29,6 +29,15 @@
 	  24 bit RGB per pixel. It provides a MIPI DSI interface to
 	  the host and has a built-in LED backlight.
 
+config DRM_PANEL_BOE_TV101WUM_NL6
+	tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+	  45NA WUXGA PANEL DSI Video Mode panel
+
 config DRM_PANEL_LVDS
 	tristate "Generic LVDS panel driver"
 	depends on OF
@@ -50,6 +59,25 @@
 	  that it can be automatically turned off when the panel goes into a
 	  low power state.
 
+config DRM_PANEL_ELIDA_KD35T133
+	tristate "Elida KD35T133 panel driver"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for the Elida
+	  KD35T133 controller for 320x480 LCD panels with MIPI-DSI
+	  system interfaces.
+
+config DRM_PANEL_FEIXIN_K101_IM2BA02
+	tristate "Feixin K101 IM2BA02 panel"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for the Feixin K101 IM2BA02
+	  4-lane 800x1280 MIPI DSI panel.
+
 config DRM_PANEL_FEIYANG_FY07024DI26A30D
 	tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
 	depends on OF
@@ -149,6 +177,16 @@
 	  panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
 	  as a module, choose M here.
 
+config DRM_PANEL_NOVATEK_NT35510
+	tristate "Novatek NT35510 RGB panel driver"
+	depends on OF
+	depends on DRM_MIPI_DSI
+	depends on BACKLIGHT_CLASS_DEVICE
+	help
+	  Say Y here if you want to enable support for the panels built
+	  around the Novatek NT35510 display controller, such as some
+	  Hydis panels.
+
 config DRM_PANEL_NOVATEK_NT39016
 	tristate "Novatek NT39016 RGB/SPI panel"
 	depends on OF && SPI
@@ -275,6 +313,12 @@
 	  Say Y here if you want to enable support for Samsung S6E63M0
 	  AMOLED LCD panel.
 
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+	tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+	depends on OF
+	select DRM_MIPI_DSI
+	select VIDEOMODE_HELPERS
+
 config DRM_PANEL_SAMSUNG_S6E8AA0
 	tristate "Samsung S6E8AA0 DSI video mode panel"
 	depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7c4d3c5..96a883c 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,8 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
 obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
+obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
 obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
 obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
 obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -13,6 +16,7 @@
 obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
 obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
 obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
 obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
 obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -28,6 +32,7 @@
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
 obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
 obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
new file mode 100644
index 0000000..48a1642
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct panel_desc {
+	const struct drm_display_mode *modes;
+	unsigned int bpc;
+
+	/**
+	 * @width_mm: width of the panel's active display area
+	 * @height_mm: height of the panel's active display area
+	 */
+	struct {
+		unsigned int width_mm;
+		unsigned int height_mm;
+	} size;
+
+	unsigned long mode_flags;
+	enum mipi_dsi_pixel_format format;
+	const struct panel_init_cmd *init_cmds;
+	unsigned int lanes;
+	bool discharge_on_disable;
+};
+
+struct boe_panel {
+	struct drm_panel base;
+	struct mipi_dsi_device *dsi;
+
+	const struct panel_desc *desc;
+
+	struct regulator *pp1800;
+	struct regulator *avee;
+	struct regulator *avdd;
+	struct gpio_desc *enable_gpio;
+
+	bool prepared;
+};
+
+enum dsi_cmd_type {
+	INIT_DCS_CMD,
+	DELAY_CMD,
+};
+
+struct panel_init_cmd {
+	enum dsi_cmd_type type;
+	size_t len;
+	const char *data;
+};
+
+#define _INIT_DCS_CMD(...) { \
+	.type = INIT_DCS_CMD, \
+	.len = sizeof((char[]){__VA_ARGS__}), \
+	.data = (char[]){__VA_ARGS__} }
+
+#define _INIT_DELAY_CMD(...) { \
+	.type = DELAY_CMD,\
+	.len = sizeof((char[]){__VA_ARGS__}), \
+	.data = (char[]){__VA_ARGS__} }
+
+static const struct panel_init_cmd boe_init_cmd[] = {
+	_INIT_DELAY_CMD(24),
+	_INIT_DCS_CMD(0xB0, 0x05),
+	_INIT_DCS_CMD(0xB1, 0xE5),
+	_INIT_DCS_CMD(0xB3, 0x52),
+	_INIT_DCS_CMD(0xB0, 0x00),
+	_INIT_DCS_CMD(0xB3, 0x88),
+	_INIT_DCS_CMD(0xB0, 0x04),
+	_INIT_DCS_CMD(0xB8, 0x00),
+	_INIT_DCS_CMD(0xB0, 0x00),
+	_INIT_DCS_CMD(0xB6, 0x03),
+	_INIT_DCS_CMD(0xBA, 0x8B),
+	_INIT_DCS_CMD(0xBF, 0x1A),
+	_INIT_DCS_CMD(0xC0, 0x0F),
+	_INIT_DCS_CMD(0xC2, 0x0C),
+	_INIT_DCS_CMD(0xC3, 0x02),
+	_INIT_DCS_CMD(0xC4, 0x0C),
+	_INIT_DCS_CMD(0xC5, 0x02),
+	_INIT_DCS_CMD(0xB0, 0x01),
+	_INIT_DCS_CMD(0xE0, 0x26),
+	_INIT_DCS_CMD(0xE1, 0x26),
+	_INIT_DCS_CMD(0xDC, 0x00),
+	_INIT_DCS_CMD(0xDD, 0x00),
+	_INIT_DCS_CMD(0xCC, 0x26),
+	_INIT_DCS_CMD(0xCD, 0x26),
+	_INIT_DCS_CMD(0xC8, 0x00),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xD2, 0x03),
+	_INIT_DCS_CMD(0xD3, 0x03),
+	_INIT_DCS_CMD(0xE6, 0x04),
+	_INIT_DCS_CMD(0xE7, 0x04),
+	_INIT_DCS_CMD(0xC4, 0x09),
+	_INIT_DCS_CMD(0xC5, 0x09),
+	_INIT_DCS_CMD(0xD8, 0x0A),
+	_INIT_DCS_CMD(0xD9, 0x0A),
+	_INIT_DCS_CMD(0xC2, 0x0B),
+	_INIT_DCS_CMD(0xC3, 0x0B),
+	_INIT_DCS_CMD(0xD6, 0x0C),
+	_INIT_DCS_CMD(0xD7, 0x0C),
+	_INIT_DCS_CMD(0xC0, 0x05),
+	_INIT_DCS_CMD(0xC1, 0x05),
+	_INIT_DCS_CMD(0xD4, 0x06),
+	_INIT_DCS_CMD(0xD5, 0x06),
+	_INIT_DCS_CMD(0xCA, 0x07),
+	_INIT_DCS_CMD(0xCB, 0x07),
+	_INIT_DCS_CMD(0xDE, 0x08),
+	_INIT_DCS_CMD(0xDF, 0x08),
+	_INIT_DCS_CMD(0xB0, 0x02),
+	_INIT_DCS_CMD(0xC0, 0x00),
+	_INIT_DCS_CMD(0xC1, 0x0D),
+	_INIT_DCS_CMD(0xC2, 0x17),
+	_INIT_DCS_CMD(0xC3, 0x26),
+	_INIT_DCS_CMD(0xC4, 0x31),
+	_INIT_DCS_CMD(0xC5, 0x1C),
+	_INIT_DCS_CMD(0xC6, 0x2C),
+	_INIT_DCS_CMD(0xC7, 0x33),
+	_INIT_DCS_CMD(0xC8, 0x31),
+	_INIT_DCS_CMD(0xC9, 0x37),
+	_INIT_DCS_CMD(0xCA, 0x37),
+	_INIT_DCS_CMD(0xCB, 0x37),
+	_INIT_DCS_CMD(0xCC, 0x39),
+	_INIT_DCS_CMD(0xCD, 0x2E),
+	_INIT_DCS_CMD(0xCE, 0x2F),
+	_INIT_DCS_CMD(0xCF, 0x2F),
+	_INIT_DCS_CMD(0xD0, 0x07),
+	_INIT_DCS_CMD(0xD2, 0x00),
+	_INIT_DCS_CMD(0xD3, 0x0D),
+	_INIT_DCS_CMD(0xD4, 0x17),
+	_INIT_DCS_CMD(0xD5, 0x26),
+	_INIT_DCS_CMD(0xD6, 0x31),
+	_INIT_DCS_CMD(0xD7, 0x3F),
+	_INIT_DCS_CMD(0xD8, 0x3F),
+	_INIT_DCS_CMD(0xD9, 0x3F),
+	_INIT_DCS_CMD(0xDA, 0x3F),
+	_INIT_DCS_CMD(0xDB, 0x37),
+	_INIT_DCS_CMD(0xDC, 0x37),
+	_INIT_DCS_CMD(0xDD, 0x37),
+	_INIT_DCS_CMD(0xDE, 0x39),
+	_INIT_DCS_CMD(0xDF, 0x2E),
+	_INIT_DCS_CMD(0xE0, 0x2F),
+	_INIT_DCS_CMD(0xE1, 0x2F),
+	_INIT_DCS_CMD(0xE2, 0x07),
+	_INIT_DCS_CMD(0xB0, 0x03),
+	_INIT_DCS_CMD(0xC8, 0x0B),
+	_INIT_DCS_CMD(0xC9, 0x07),
+	_INIT_DCS_CMD(0xC3, 0x00),
+	_INIT_DCS_CMD(0xE7, 0x00),
+	_INIT_DCS_CMD(0xC5, 0x2A),
+	_INIT_DCS_CMD(0xDE, 0x2A),
+	_INIT_DCS_CMD(0xCA, 0x43),
+	_INIT_DCS_CMD(0xC9, 0x07),
+	_INIT_DCS_CMD(0xE4, 0xC0),
+	_INIT_DCS_CMD(0xE5, 0x0D),
+	_INIT_DCS_CMD(0xCB, 0x00),
+	_INIT_DCS_CMD(0xB0, 0x06),
+	_INIT_DCS_CMD(0xB8, 0xA5),
+	_INIT_DCS_CMD(0xC0, 0xA5),
+	_INIT_DCS_CMD(0xC7, 0x0F),
+	_INIT_DCS_CMD(0xD5, 0x32),
+	_INIT_DCS_CMD(0xB8, 0x00),
+	_INIT_DCS_CMD(0xC0, 0x00),
+	_INIT_DCS_CMD(0xBC, 0x00),
+	_INIT_DCS_CMD(0xB0, 0x07),
+	_INIT_DCS_CMD(0xB1, 0x00),
+	_INIT_DCS_CMD(0xB2, 0x02),
+	_INIT_DCS_CMD(0xB3, 0x0F),
+	_INIT_DCS_CMD(0xB4, 0x25),
+	_INIT_DCS_CMD(0xB5, 0x39),
+	_INIT_DCS_CMD(0xB6, 0x4E),
+	_INIT_DCS_CMD(0xB7, 0x72),
+	_INIT_DCS_CMD(0xB8, 0x97),
+	_INIT_DCS_CMD(0xB9, 0xDC),
+	_INIT_DCS_CMD(0xBA, 0x22),
+	_INIT_DCS_CMD(0xBB, 0xA4),
+	_INIT_DCS_CMD(0xBC, 0x2B),
+	_INIT_DCS_CMD(0xBD, 0x2F),
+	_INIT_DCS_CMD(0xBE, 0xA9),
+	_INIT_DCS_CMD(0xBF, 0x25),
+	_INIT_DCS_CMD(0xC0, 0x61),
+	_INIT_DCS_CMD(0xC1, 0x97),
+	_INIT_DCS_CMD(0xC2, 0xB2),
+	_INIT_DCS_CMD(0xC3, 0xCD),
+	_INIT_DCS_CMD(0xC4, 0xD9),
+	_INIT_DCS_CMD(0xC5, 0xE7),
+	_INIT_DCS_CMD(0xC6, 0xF4),
+	_INIT_DCS_CMD(0xC7, 0xFA),
+	_INIT_DCS_CMD(0xC8, 0xFC),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xCA, 0x00),
+	_INIT_DCS_CMD(0xCB, 0x16),
+	_INIT_DCS_CMD(0xCC, 0xAF),
+	_INIT_DCS_CMD(0xCD, 0xFF),
+	_INIT_DCS_CMD(0xCE, 0xFF),
+	_INIT_DCS_CMD(0xB0, 0x08),
+	_INIT_DCS_CMD(0xB1, 0x04),
+	_INIT_DCS_CMD(0xB2, 0x05),
+	_INIT_DCS_CMD(0xB3, 0x11),
+	_INIT_DCS_CMD(0xB4, 0x24),
+	_INIT_DCS_CMD(0xB5, 0x39),
+	_INIT_DCS_CMD(0xB6, 0x4F),
+	_INIT_DCS_CMD(0xB7, 0x72),
+	_INIT_DCS_CMD(0xB8, 0x98),
+	_INIT_DCS_CMD(0xB9, 0xDC),
+	_INIT_DCS_CMD(0xBA, 0x23),
+	_INIT_DCS_CMD(0xBB, 0xA6),
+	_INIT_DCS_CMD(0xBC, 0x2C),
+	_INIT_DCS_CMD(0xBD, 0x30),
+	_INIT_DCS_CMD(0xBE, 0xAA),
+	_INIT_DCS_CMD(0xBF, 0x26),
+	_INIT_DCS_CMD(0xC0, 0x62),
+	_INIT_DCS_CMD(0xC1, 0x9B),
+	_INIT_DCS_CMD(0xC2, 0xB5),
+	_INIT_DCS_CMD(0xC3, 0xCF),
+	_INIT_DCS_CMD(0xC4, 0xDB),
+	_INIT_DCS_CMD(0xC5, 0xE8),
+	_INIT_DCS_CMD(0xC6, 0xF5),
+	_INIT_DCS_CMD(0xC7, 0xFA),
+	_INIT_DCS_CMD(0xC8, 0xFC),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xCA, 0x00),
+	_INIT_DCS_CMD(0xCB, 0x16),
+	_INIT_DCS_CMD(0xCC, 0xAF),
+	_INIT_DCS_CMD(0xCD, 0xFF),
+	_INIT_DCS_CMD(0xCE, 0xFF),
+	_INIT_DCS_CMD(0xB0, 0x09),
+	_INIT_DCS_CMD(0xB1, 0x04),
+	_INIT_DCS_CMD(0xB2, 0x02),
+	_INIT_DCS_CMD(0xB3, 0x16),
+	_INIT_DCS_CMD(0xB4, 0x24),
+	_INIT_DCS_CMD(0xB5, 0x3B),
+	_INIT_DCS_CMD(0xB6, 0x4F),
+	_INIT_DCS_CMD(0xB7, 0x73),
+	_INIT_DCS_CMD(0xB8, 0x99),
+	_INIT_DCS_CMD(0xB9, 0xE0),
+	_INIT_DCS_CMD(0xBA, 0x26),
+	_INIT_DCS_CMD(0xBB, 0xAD),
+	_INIT_DCS_CMD(0xBC, 0x36),
+	_INIT_DCS_CMD(0xBD, 0x3A),
+	_INIT_DCS_CMD(0xBE, 0xAE),
+	_INIT_DCS_CMD(0xBF, 0x2A),
+	_INIT_DCS_CMD(0xC0, 0x66),
+	_INIT_DCS_CMD(0xC1, 0x9E),
+	_INIT_DCS_CMD(0xC2, 0xB8),
+	_INIT_DCS_CMD(0xC3, 0xD1),
+	_INIT_DCS_CMD(0xC4, 0xDD),
+	_INIT_DCS_CMD(0xC5, 0xE9),
+	_INIT_DCS_CMD(0xC6, 0xF6),
+	_INIT_DCS_CMD(0xC7, 0xFA),
+	_INIT_DCS_CMD(0xC8, 0xFC),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xCA, 0x00),
+	_INIT_DCS_CMD(0xCB, 0x16),
+	_INIT_DCS_CMD(0xCC, 0xAF),
+	_INIT_DCS_CMD(0xCD, 0xFF),
+	_INIT_DCS_CMD(0xCE, 0xFF),
+	_INIT_DCS_CMD(0xB0, 0x0A),
+	_INIT_DCS_CMD(0xB1, 0x00),
+	_INIT_DCS_CMD(0xB2, 0x02),
+	_INIT_DCS_CMD(0xB3, 0x0F),
+	_INIT_DCS_CMD(0xB4, 0x25),
+	_INIT_DCS_CMD(0xB5, 0x39),
+	_INIT_DCS_CMD(0xB6, 0x4E),
+	_INIT_DCS_CMD(0xB7, 0x72),
+	_INIT_DCS_CMD(0xB8, 0x97),
+	_INIT_DCS_CMD(0xB9, 0xDC),
+	_INIT_DCS_CMD(0xBA, 0x22),
+	_INIT_DCS_CMD(0xBB, 0xA4),
+	_INIT_DCS_CMD(0xBC, 0x2B),
+	_INIT_DCS_CMD(0xBD, 0x2F),
+	_INIT_DCS_CMD(0xBE, 0xA9),
+	_INIT_DCS_CMD(0xBF, 0x25),
+	_INIT_DCS_CMD(0xC0, 0x61),
+	_INIT_DCS_CMD(0xC1, 0x97),
+	_INIT_DCS_CMD(0xC2, 0xB2),
+	_INIT_DCS_CMD(0xC3, 0xCD),
+	_INIT_DCS_CMD(0xC4, 0xD9),
+	_INIT_DCS_CMD(0xC5, 0xE7),
+	_INIT_DCS_CMD(0xC6, 0xF4),
+	_INIT_DCS_CMD(0xC7, 0xFA),
+	_INIT_DCS_CMD(0xC8, 0xFC),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xCA, 0x00),
+	_INIT_DCS_CMD(0xCB, 0x16),
+	_INIT_DCS_CMD(0xCC, 0xAF),
+	_INIT_DCS_CMD(0xCD, 0xFF),
+	_INIT_DCS_CMD(0xCE, 0xFF),
+	_INIT_DCS_CMD(0xB0, 0x0B),
+	_INIT_DCS_CMD(0xB1, 0x04),
+	_INIT_DCS_CMD(0xB2, 0x05),
+	_INIT_DCS_CMD(0xB3, 0x11),
+	_INIT_DCS_CMD(0xB4, 0x24),
+	_INIT_DCS_CMD(0xB5, 0x39),
+	_INIT_DCS_CMD(0xB6, 0x4F),
+	_INIT_DCS_CMD(0xB7, 0x72),
+	_INIT_DCS_CMD(0xB8, 0x98),
+	_INIT_DCS_CMD(0xB9, 0xDC),
+	_INIT_DCS_CMD(0xBA, 0x23),
+	_INIT_DCS_CMD(0xBB, 0xA6),
+	_INIT_DCS_CMD(0xBC, 0x2C),
+	_INIT_DCS_CMD(0xBD, 0x30),
+	_INIT_DCS_CMD(0xBE, 0xAA),
+	_INIT_DCS_CMD(0xBF, 0x26),
+	_INIT_DCS_CMD(0xC0, 0x62),
+	_INIT_DCS_CMD(0xC1, 0x9B),
+	_INIT_DCS_CMD(0xC2, 0xB5),
+	_INIT_DCS_CMD(0xC3, 0xCF),
+	_INIT_DCS_CMD(0xC4, 0xDB),
+	_INIT_DCS_CMD(0xC5, 0xE8),
+	_INIT_DCS_CMD(0xC6, 0xF5),
+	_INIT_DCS_CMD(0xC7, 0xFA),
+	_INIT_DCS_CMD(0xC8, 0xFC),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xCA, 0x00),
+	_INIT_DCS_CMD(0xCB, 0x16),
+	_INIT_DCS_CMD(0xCC, 0xAF),
+	_INIT_DCS_CMD(0xCD, 0xFF),
+	_INIT_DCS_CMD(0xCE, 0xFF),
+	_INIT_DCS_CMD(0xB0, 0x0C),
+	_INIT_DCS_CMD(0xB1, 0x04),
+	_INIT_DCS_CMD(0xB2, 0x02),
+	_INIT_DCS_CMD(0xB3, 0x16),
+	_INIT_DCS_CMD(0xB4, 0x24),
+	_INIT_DCS_CMD(0xB5, 0x3B),
+	_INIT_DCS_CMD(0xB6, 0x4F),
+	_INIT_DCS_CMD(0xB7, 0x73),
+	_INIT_DCS_CMD(0xB8, 0x99),
+	_INIT_DCS_CMD(0xB9, 0xE0),
+	_INIT_DCS_CMD(0xBA, 0x26),
+	_INIT_DCS_CMD(0xBB, 0xAD),
+	_INIT_DCS_CMD(0xBC, 0x36),
+	_INIT_DCS_CMD(0xBD, 0x3A),
+	_INIT_DCS_CMD(0xBE, 0xAE),
+	_INIT_DCS_CMD(0xBF, 0x2A),
+	_INIT_DCS_CMD(0xC0, 0x66),
+	_INIT_DCS_CMD(0xC1, 0x9E),
+	_INIT_DCS_CMD(0xC2, 0xB8),
+	_INIT_DCS_CMD(0xC3, 0xD1),
+	_INIT_DCS_CMD(0xC4, 0xDD),
+	_INIT_DCS_CMD(0xC5, 0xE9),
+	_INIT_DCS_CMD(0xC6, 0xF6),
+	_INIT_DCS_CMD(0xC7, 0xFA),
+	_INIT_DCS_CMD(0xC8, 0xFC),
+	_INIT_DCS_CMD(0xC9, 0x00),
+	_INIT_DCS_CMD(0xCA, 0x00),
+	_INIT_DCS_CMD(0xCB, 0x16),
+	_INIT_DCS_CMD(0xCC, 0xAF),
+	_INIT_DCS_CMD(0xCD, 0xFF),
+	_INIT_DCS_CMD(0xCE, 0xFF),
+	_INIT_DCS_CMD(0xB0, 0x00),
+	_INIT_DCS_CMD(0xB3, 0x08),
+	_INIT_DCS_CMD(0xB0, 0x04),
+	_INIT_DCS_CMD(0xB8, 0x68),
+	_INIT_DELAY_CMD(150),
+	{},
+};
+
+static const struct panel_init_cmd auo_kd101n80_45na_init_cmd[] = {
+	_INIT_DELAY_CMD(24),
+	_INIT_DCS_CMD(0x11),
+	_INIT_DELAY_CMD(120),
+	_INIT_DCS_CMD(0x29),
+	_INIT_DELAY_CMD(120),
+	{},
+};
+
+static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
+	_INIT_DELAY_CMD(24),
+	_INIT_DCS_CMD(0xB0, 0x01),
+	_INIT_DCS_CMD(0xC0, 0x48),
+	_INIT_DCS_CMD(0xC1, 0x48),
+	_INIT_DCS_CMD(0xC2, 0x47),
+	_INIT_DCS_CMD(0xC3, 0x47),
+	_INIT_DCS_CMD(0xC4, 0x46),
+	_INIT_DCS_CMD(0xC5, 0x46),
+	_INIT_DCS_CMD(0xC6, 0x45),
+	_INIT_DCS_CMD(0xC7, 0x45),
+	_INIT_DCS_CMD(0xC8, 0x64),
+	_INIT_DCS_CMD(0xC9, 0x64),
+	_INIT_DCS_CMD(0xCA, 0x4F),
+	_INIT_DCS_CMD(0xCB, 0x4F),
+	_INIT_DCS_CMD(0xCC, 0x40),
+	_INIT_DCS_CMD(0xCD, 0x40),
+	_INIT_DCS_CMD(0xCE, 0x66),
+	_INIT_DCS_CMD(0xCF, 0x66),
+	_INIT_DCS_CMD(0xD0, 0x4F),
+	_INIT_DCS_CMD(0xD1, 0x4F),
+	_INIT_DCS_CMD(0xD2, 0x41),
+	_INIT_DCS_CMD(0xD3, 0x41),
+	_INIT_DCS_CMD(0xD4, 0x48),
+	_INIT_DCS_CMD(0xD5, 0x48),
+	_INIT_DCS_CMD(0xD6, 0x47),
+	_INIT_DCS_CMD(0xD7, 0x47),
+	_INIT_DCS_CMD(0xD8, 0x46),
+	_INIT_DCS_CMD(0xD9, 0x46),
+	_INIT_DCS_CMD(0xDA, 0x45),
+	_INIT_DCS_CMD(0xDB, 0x45),
+	_INIT_DCS_CMD(0xDC, 0x64),
+	_INIT_DCS_CMD(0xDD, 0x64),
+	_INIT_DCS_CMD(0xDE, 0x4F),
+	_INIT_DCS_CMD(0xDF, 0x4F),
+	_INIT_DCS_CMD(0xE0, 0x40),
+	_INIT_DCS_CMD(0xE1, 0x40),
+	_INIT_DCS_CMD(0xE2, 0x66),
+	_INIT_DCS_CMD(0xE3, 0x66),
+	_INIT_DCS_CMD(0xE4, 0x4F),
+	_INIT_DCS_CMD(0xE5, 0x4F),
+	_INIT_DCS_CMD(0xE6, 0x41),
+	_INIT_DCS_CMD(0xE7, 0x41),
+	_INIT_DELAY_CMD(150),
+	{},
+};
+
+static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
+{
+	return container_of(panel, struct boe_panel, base);
+}
+
+static int boe_panel_init_dcs_cmd(struct boe_panel *boe)
+{
+	struct mipi_dsi_device *dsi = boe->dsi;
+	struct drm_panel *panel = &boe->base;
+	int i, err = 0;
+
+	if (boe->desc->init_cmds) {
+		const struct panel_init_cmd *init_cmds = boe->desc->init_cmds;
+
+		for (i = 0; init_cmds[i].len != 0; i++) {
+			const struct panel_init_cmd *cmd = &init_cmds[i];
+
+			switch (cmd->type) {
+			case DELAY_CMD:
+				msleep(cmd->data[0]);
+				err = 0;
+				break;
+
+			case INIT_DCS_CMD:
+				err = mipi_dsi_dcs_write(dsi, cmd->data[0],
+							 cmd->len <= 1 ? NULL :
+							 &cmd->data[1],
+							 cmd->len - 1);
+				break;
+
+			default:
+				err = -EINVAL;
+			}
+
+			if (err < 0) {
+				dev_err(panel->dev,
+					"failed to write command %u\n", i);
+				return err;
+			}
+		}
+	}
+	return 0;
+}
+
+static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
+{
+	struct mipi_dsi_device *dsi = boe->dsi;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+	struct boe_panel *boe = to_boe_panel(panel);
+	int ret;
+
+	if (!boe->prepared)
+		return 0;
+
+	ret = boe_panel_enter_sleep_mode(boe);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+		return ret;
+	}
+
+	msleep(150);
+
+	if (boe->desc->discharge_on_disable) {
+		regulator_disable(boe->avee);
+		regulator_disable(boe->avdd);
+		usleep_range(5000, 7000);
+		gpiod_set_value(boe->enable_gpio, 0);
+		usleep_range(5000, 7000);
+		regulator_disable(boe->pp1800);
+	} else {
+		gpiod_set_value(boe->enable_gpio, 0);
+		usleep_range(500, 1000);
+		regulator_disable(boe->avee);
+		regulator_disable(boe->avdd);
+		usleep_range(5000, 7000);
+		regulator_disable(boe->pp1800);
+	}
+
+	boe->prepared = false;
+
+	return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+	struct boe_panel *boe = to_boe_panel(panel);
+	int ret;
+
+	if (boe->prepared)
+		return 0;
+
+	gpiod_set_value(boe->enable_gpio, 0);
+	usleep_range(1000, 1500);
+
+	ret = regulator_enable(boe->pp1800);
+	if (ret < 0)
+		return ret;
+
+	usleep_range(3000, 5000);
+
+	ret = regulator_enable(boe->avdd);
+	if (ret < 0)
+		goto poweroff1v8;
+	ret = regulator_enable(boe->avee);
+	if (ret < 0)
+		goto poweroffavdd;
+
+	usleep_range(5000, 10000);
+
+	gpiod_set_value(boe->enable_gpio, 1);
+	usleep_range(1000, 2000);
+	gpiod_set_value(boe->enable_gpio, 0);
+	usleep_range(1000, 2000);
+	gpiod_set_value(boe->enable_gpio, 1);
+	usleep_range(6000, 10000);
+
+	ret = boe_panel_init_dcs_cmd(boe);
+	if (ret < 0) {
+		dev_err(panel->dev, "failed to init panel: %d\n", ret);
+		goto poweroff;
+	}
+
+	boe->prepared = true;
+
+	return 0;
+
+poweroff:
+	regulator_disable(boe->avee);
+poweroffavdd:
+	regulator_disable(boe->avdd);
+poweroff1v8:
+	usleep_range(5000, 7000);
+	regulator_disable(boe->pp1800);
+	gpiod_set_value(boe->enable_gpio, 0);
+
+	return ret;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+	msleep(130);
+	return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
+	.clock = 159425,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 100,
+	.hsync_end = 1200 + 100 + 40,
+	.htotal = 1200 + 100 + 40 + 24,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 10,
+	.vsync_end = 1920 + 10 + 14,
+	.vtotal = 1920 + 10 + 14 + 4,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc boe_tv101wum_nl6_desc = {
+	.modes = &boe_tv101wum_nl6_default_mode,
+	.bpc = 8,
+	.size = {
+		.width_mm = 135,
+		.height_mm = 216,
+	},
+	.lanes = 4,
+	.format = MIPI_DSI_FMT_RGB888,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+		      MIPI_DSI_MODE_LPM,
+	.init_cmds = boe_init_cmd,
+	.discharge_on_disable = false,
+};
+
+static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
+	.clock = 157000,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 80,
+	.hsync_end = 1200 + 80 + 24,
+	.htotal = 1200 + 80 + 24 + 36,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 16,
+	.vsync_end = 1920 + 16 + 4,
+	.vtotal = 1920 + 16 + 4 + 16,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc auo_kd101n80_45na_desc = {
+	.modes = &auo_kd101n80_45na_default_mode,
+	.bpc = 8,
+	.size = {
+		.width_mm = 135,
+		.height_mm = 216,
+	},
+	.lanes = 4,
+	.format = MIPI_DSI_FMT_RGB888,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+		      MIPI_DSI_MODE_LPM,
+	.init_cmds = auo_kd101n80_45na_init_cmd,
+	.discharge_on_disable = true,
+};
+
+static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
+	.clock = 159916,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 80,
+	.hsync_end = 1200 + 80 + 24,
+	.htotal = 1200 + 80 + 24 + 60,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 20,
+	.vsync_end = 1920 + 20 + 4,
+	.vtotal = 1920 + 20 + 4 + 10,
+	.vrefresh = 60,
+	.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv101wum_n53_desc = {
+	.modes = &boe_tv101wum_n53_default_mode,
+	.bpc = 8,
+	.size = {
+		.width_mm = 135,
+		.height_mm = 216,
+	},
+	.lanes = 4,
+	.format = MIPI_DSI_FMT_RGB888,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+		      MIPI_DSI_MODE_LPM,
+	.init_cmds = boe_init_cmd,
+};
+
+static const struct drm_display_mode auo_b101uan08_3_default_mode = {
+	.clock = 159667,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 60,
+	.hsync_end = 1200 + 60 + 4,
+	.htotal = 1200 + 60 + 4 + 80,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 34,
+	.vsync_end = 1920 + 34 + 2,
+	.vtotal = 1920 + 34 + 2 + 24,
+	.vrefresh = 60,
+	.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc auo_b101uan08_3_desc = {
+	.modes = &auo_b101uan08_3_default_mode,
+	.bpc = 8,
+	.size = {
+		.width_mm = 135,
+		.height_mm = 216,
+	},
+	.lanes = 4,
+	.format = MIPI_DSI_FMT_RGB888,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+		      MIPI_DSI_MODE_LPM,
+	.init_cmds = auo_b101uan08_3_init_cmd,
+};
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+			       struct drm_connector *connector)
+{
+	struct boe_panel *boe = to_boe_panel(panel);
+	const struct drm_display_mode *m = boe->desc->modes;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, m);
+	if (!mode) {
+		dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+			m->hdisplay, m->vdisplay, m->vrefresh);
+		return -ENOMEM;
+	}
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+
+	connector->display_info.width_mm = boe->desc->size.width_mm;
+	connector->display_info.height_mm = boe->desc->size.height_mm;
+	connector->display_info.bpc = boe->desc->bpc;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs boe_panel_funcs = {
+	.unprepare = boe_panel_unprepare,
+	.prepare = boe_panel_prepare,
+	.enable = boe_panel_enable,
+	.get_modes = boe_panel_get_modes,
+};
+
+static int boe_panel_add(struct boe_panel *boe)
+{
+	struct device *dev = &boe->dsi->dev;
+	int err;
+
+	boe->avdd = devm_regulator_get(dev, "avdd");
+	if (IS_ERR(boe->avdd))
+		return PTR_ERR(boe->avdd);
+
+	boe->avee = devm_regulator_get(dev, "avee");
+	if (IS_ERR(boe->avee))
+		return PTR_ERR(boe->avee);
+
+	boe->pp1800 = devm_regulator_get(dev, "pp1800");
+	if (IS_ERR(boe->pp1800))
+		return PTR_ERR(boe->pp1800);
+
+	boe->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+	if (IS_ERR(boe->enable_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(boe->enable_gpio));
+		return PTR_ERR(boe->enable_gpio);
+	}
+
+	gpiod_set_value(boe->enable_gpio, 0);
+
+	drm_panel_init(&boe->base, dev, &boe_panel_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	err = drm_panel_of_backlight(&boe->base);
+	if (err)
+		return err;
+
+	boe->base.funcs = &boe_panel_funcs;
+	boe->base.dev = &boe->dsi->dev;
+
+	return drm_panel_add(&boe->base);
+}
+
+static int boe_panel_probe(struct mipi_dsi_device *dsi)
+{
+	struct boe_panel *boe;
+	int ret;
+	const struct panel_desc *desc;
+
+	boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
+	if (!boe)
+		return -ENOMEM;
+
+	desc = of_device_get_match_data(&dsi->dev);
+	dsi->lanes = desc->lanes;
+	dsi->format = desc->format;
+	dsi->mode_flags = desc->mode_flags;
+	boe->desc = desc;
+	boe->dsi = dsi;
+	ret = boe_panel_add(boe);
+	if (ret < 0)
+		return ret;
+
+	mipi_dsi_set_drvdata(dsi, boe);
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret)
+		drm_panel_remove(&boe->base);
+
+	return ret;
+}
+
+static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+
+	drm_panel_disable(&boe->base);
+	drm_panel_unprepare(&boe->base);
+}
+
+static int boe_panel_remove(struct mipi_dsi_device *dsi)
+{
+	struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	boe_panel_shutdown(dsi);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+	if (boe->base.dev)
+		drm_panel_remove(&boe->base);
+
+	return 0;
+}
+
+static const struct of_device_id boe_of_match[] = {
+	{ .compatible = "boe,tv101wum-nl6",
+	  .data = &boe_tv101wum_nl6_desc
+	},
+	{ .compatible = "auo,kd101n80-45na",
+	  .data = &auo_kd101n80_45na_desc
+	},
+	{ .compatible = "boe,tv101wum-n53",
+	  .data = &boe_tv101wum_n53_desc
+	},
+	{ .compatible = "auo,b101uan08.3",
+	  .data = &auo_b101uan08_3_desc
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_of_match);
+
+static struct mipi_dsi_driver boe_panel_driver = {
+	.driver = {
+		.name = "panel-boe-tv101wum-nl6",
+		.of_match_table = boe_of_match,
+	},
+	.probe = boe_panel_probe,
+	.remove = boe_panel_remove,
+	.shutdown = boe_panel_shutdown,
+};
+module_mipi_dsi_driver(boe_panel_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_DESCRIPTION("BOE tv101wum-nl6 1200x1920 video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
new file mode 100644
index 0000000..711ded4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define KD35T133_CMD_INTERFACEMODECTRL		0xb0
+#define KD35T133_CMD_FRAMERATECTRL		0xb1
+#define KD35T133_CMD_DISPLAYINVERSIONCTRL	0xb4
+#define KD35T133_CMD_DISPLAYFUNCTIONCTRL	0xb6
+#define KD35T133_CMD_POWERCONTROL1		0xc0
+#define KD35T133_CMD_POWERCONTROL2		0xc1
+#define KD35T133_CMD_VCOMCONTROL		0xc5
+#define KD35T133_CMD_POSITIVEGAMMA		0xe0
+#define KD35T133_CMD_NEGATIVEGAMMA		0xe1
+#define KD35T133_CMD_SETIMAGEFUNCTION		0xe9
+#define KD35T133_CMD_ADJUSTCONTROL3		0xf7
+
+struct kd35t133 {
+	struct device *dev;
+	struct drm_panel panel;
+	struct gpio_desc *reset_gpio;
+	struct regulator *vdd;
+	struct regulator *iovcc;
+	bool prepared;
+};
+
+static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
+{
+	return container_of(panel, struct kd35t133, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do {			\
+		static const u8 d[] = { seq };				\
+		int ret;						\
+		ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d));	\
+		if (ret < 0)						\
+			return ret;					\
+	} while (0)
+
+static int kd35t133_init_sequence(struct kd35t133 *ctx)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	struct device *dev = ctx->dev;
+
+	/*
+	 * Init sequence was supplied by the panel vendor with minimal
+	 * documentation.
+	 */
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+			  0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+			  0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+			  0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+			  0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+	dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+	dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+			  0x20, 0x02);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+	dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+			  0xa9, 0x51, 0x2c, 0x82);
+	mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
+
+	DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+	return 0;
+}
+
+static int kd35t133_unprepare(struct drm_panel *panel)
+{
+	struct kd35t133 *ctx = panel_to_kd35t133(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (!ctx->prepared)
+		return 0;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+			      ret);
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+			      ret);
+		return ret;
+	}
+
+	regulator_disable(ctx->iovcc);
+	regulator_disable(ctx->vdd);
+
+	ctx->prepared = false;
+
+	return 0;
+}
+
+static int kd35t133_prepare(struct drm_panel *panel)
+{
+	struct kd35t133 *ctx = panel_to_kd35t133(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (ctx->prepared)
+		return 0;
+
+	DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+	ret = regulator_enable(ctx->vdd);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			      "Failed to enable vdd supply: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_enable(ctx->iovcc);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev,
+			      "Failed to enable iovcc supply: %d\n", ret);
+		goto disable_vdd;
+	}
+
+	msleep(20);
+
+	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+	usleep_range(10, 20);
+	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+	msleep(20);
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+		goto disable_iovcc;
+	}
+
+	msleep(250);
+
+	ret = kd35t133_init_sequence(ctx);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+			      ret);
+		goto disable_iovcc;
+	}
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+		goto disable_iovcc;
+	}
+
+	msleep(50);
+
+	ctx->prepared = true;
+
+	return 0;
+
+disable_iovcc:
+	regulator_disable(ctx->iovcc);
+disable_vdd:
+	regulator_disable(ctx->vdd);
+	return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+	.hdisplay	= 320,
+	.hsync_start	= 320 + 130,
+	.hsync_end	= 320 + 130 + 4,
+	.htotal		= 320 + 130 + 4 + 130,
+	.vdisplay	= 480,
+	.vsync_start	= 480 + 2,
+	.vsync_end	= 480 + 2 + 1,
+	.vtotal		= 480 + 2 + 1 + 2,
+	.vrefresh	= 60,
+	.clock		= 17000,
+	.width_mm	= 42,
+	.height_mm	= 82,
+};
+
+static int kd35t133_get_modes(struct drm_panel *panel,
+				struct drm_connector *connector)
+{
+	struct kd35t133 *ctx = panel_to_kd35t133(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &default_mode);
+	if (!mode) {
+		DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+			      default_mode.hdisplay, default_mode.vdisplay,
+			      default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs kd35t133_funcs = {
+	.unprepare	= kd35t133_unprepare,
+	.prepare	= kd35t133_prepare,
+	.get_modes	= kd35t133_get_modes,
+};
+
+static int kd35t133_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct kd35t133 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->reset_gpio)) {
+		DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	ctx->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(ctx->vdd)) {
+		ret = PTR_ERR(ctx->vdd);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to request vdd regulator: %d\n",
+				      ret);
+		return ret;
+	}
+
+	ctx->iovcc = devm_regulator_get(dev, "iovcc");
+	if (IS_ERR(ctx->iovcc)) {
+		ret = PTR_ERR(ctx->iovcc);
+		if (ret != -EPROBE_DEFER)
+			DRM_DEV_ERROR(dev,
+				      "Failed to request iovcc regulator: %d\n",
+				      ret);
+		return ret;
+	}
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	ctx->dev = dev;
+
+	dsi->lanes = 1;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+	drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
+
+	drm_panel_add(&ctx->panel);
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+		drm_panel_remove(&ctx->panel);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
+{
+	struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = drm_panel_unprepare(&ctx->panel);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+			      ret);
+
+	ret = drm_panel_disable(&ctx->panel);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+			      ret);
+}
+
+static int kd35t133_remove(struct mipi_dsi_device *dsi)
+{
+	struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	kd35t133_shutdown(dsi);
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+			      ret);
+
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id kd35t133_of_match[] = {
+	{ .compatible = "elida,kd35t133" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kd35t133_of_match);
+
+static struct mipi_dsi_driver kd35t133_driver = {
+	.driver = {
+		.name = "panel-elida-kd35t133",
+		.of_match_table = kd35t133_of_match,
+	},
+	.probe	= kd35t133_probe,
+	.remove = kd35t133_remove,
+	.shutdown = kd35t133_shutdown,
+};
+module_mipi_dsi_driver(kd35t133_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Elida kd35t133 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
new file mode 100644
index 0000000..fddbfdd
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2020 Icenowy Zheng <icenowy@aosc.io>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define K101_IM2BA02_INIT_CMD_LEN	2
+
+static const char * const regulator_names[] = {
+	"dvdd",
+	"avdd",
+	"cvdd"
+};
+
+struct k101_im2ba02 {
+	struct drm_panel	panel;
+	struct mipi_dsi_device	*dsi;
+
+	struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+	struct gpio_desc	*reset;
+};
+
+static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel)
+{
+	return container_of(panel, struct k101_im2ba02, panel);
+}
+
+struct k101_im2ba02_init_cmd {
+	u8 data[K101_IM2BA02_INIT_CMD_LEN];
+};
+
+static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = {
+	/* Switch to page 0 */
+	{ .data = { 0xE0, 0x00 } },
+
+	/* Seems to be some password */
+	{ .data = { 0xE1, 0x93} },
+	{ .data = { 0xE2, 0x65 } },
+	{ .data = { 0xE3, 0xF8 } },
+
+	/* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */
+	{ .data = { 0x80, 0x03 } },
+
+	/* Sequence control */
+	{ .data = { 0x70, 0x02 } },
+	{ .data = { 0x71, 0x23 } },
+	{ .data = { 0x72, 0x06 } },
+
+	/* Switch to page 1 */
+	{ .data = { 0xE0, 0x01 } },
+
+	/* Set VCOM */
+	{ .data = { 0x00, 0x00 } },
+	{ .data = { 0x01, 0x66 } },
+	/* Set VCOM_Reverse */
+	{ .data = { 0x03, 0x00 } },
+	{ .data = { 0x04, 0x25 } },
+
+	/* Set Gamma Power, VG[MS][PN] */
+	{ .data = { 0x17, 0x00 } },
+	{ .data = { 0x18, 0x6D } },
+	{ .data = { 0x19, 0x00 } },
+	{ .data = { 0x1A, 0x00 } },
+	{ .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */
+	{ .data = { 0x1C, 0x00 } },
+
+	/* Set Gate Power */
+	{ .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */
+	{ .data = { 0x20, 0x28 } }, /* VGL_R = -11V */
+	{ .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */
+	{ .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */
+
+	/* Set Panel */
+	{ .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */
+
+	/* Set RGBCYC */
+	{ .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */
+	{ .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */
+	{ .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */
+	{ .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */
+	{ .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */
+	{ .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */
+	{ .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */
+
+	/* Set TCON parameter */
+	{ .data = { 0x40, 0x06 } }, /* RSO = 800 points */
+	{ .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */
+
+	/* Set power voltage */
+	{ .data = { 0x55, 0x0F } }, /* DCDCM */
+	{ .data = { 0x56, 0x01 } },
+	{ .data = { 0x57, 0x69 } },
+	{ .data = { 0x58, 0x0A } },
+	{ .data = { 0x59, 0x0A } },
+	{ .data = { 0x5A, 0x45 } },
+	{ .data = { 0x5B, 0x15 } },
+
+	/* Set gamma */
+	{ .data = { 0x5D, 0x7C } },
+	{ .data = { 0x5E, 0x65 } },
+	{ .data = { 0x5F, 0x55 } },
+	{ .data = { 0x60, 0x49 } },
+	{ .data = { 0x61, 0x44 } },
+	{ .data = { 0x62, 0x35 } },
+	{ .data = { 0x63, 0x3A } },
+	{ .data = { 0x64, 0x23 } },
+	{ .data = { 0x65, 0x3D } },
+	{ .data = { 0x66, 0x3C } },
+	{ .data = { 0x67, 0x3D } },
+	{ .data = { 0x68, 0x5D } },
+	{ .data = { 0x69, 0x4D } },
+	{ .data = { 0x6A, 0x56 } },
+	{ .data = { 0x6B, 0x48 } },
+	{ .data = { 0x6C, 0x45 } },
+	{ .data = { 0x6D, 0x38 } },
+	{ .data = { 0x6E, 0x25 } },
+	{ .data = { 0x6F, 0x00 } },
+	{ .data = { 0x70, 0x7C } },
+	{ .data = { 0x71, 0x65 } },
+	{ .data = { 0x72, 0x55 } },
+	{ .data = { 0x73, 0x49 } },
+	{ .data = { 0x74, 0x44 } },
+	{ .data = { 0x75, 0x35 } },
+	{ .data = { 0x76, 0x3A } },
+	{ .data = { 0x77, 0x23 } },
+	{ .data = { 0x78, 0x3D } },
+	{ .data = { 0x79, 0x3C } },
+	{ .data = { 0x7A, 0x3D } },
+	{ .data = { 0x7B, 0x5D } },
+	{ .data = { 0x7C, 0x4D } },
+	{ .data = { 0x7D, 0x56 } },
+	{ .data = { 0x7E, 0x48 } },
+	{ .data = { 0x7F, 0x45 } },
+	{ .data = { 0x80, 0x38 } },
+	{ .data = { 0x81, 0x25 } },
+	{ .data = { 0x82, 0x00 } },
+
+	/* Switch to page 2, for GIP */
+	{ .data = { 0xE0, 0x02 } },
+
+	{ .data = { 0x00, 0x1E } },
+	{ .data = { 0x01, 0x1E } },
+	{ .data = { 0x02, 0x41 } },
+	{ .data = { 0x03, 0x41 } },
+	{ .data = { 0x04, 0x43 } },
+	{ .data = { 0x05, 0x43 } },
+	{ .data = { 0x06, 0x1F } },
+	{ .data = { 0x07, 0x1F } },
+	{ .data = { 0x08, 0x1F } },
+	{ .data = { 0x09, 0x1F } },
+	{ .data = { 0x0A, 0x1E } },
+	{ .data = { 0x0B, 0x1E } },
+	{ .data = { 0x0C, 0x1F } },
+	{ .data = { 0x0D, 0x47 } },
+	{ .data = { 0x0E, 0x47 } },
+	{ .data = { 0x0F, 0x45 } },
+	{ .data = { 0x10, 0x45 } },
+	{ .data = { 0x11, 0x4B } },
+	{ .data = { 0x12, 0x4B } },
+	{ .data = { 0x13, 0x49 } },
+	{ .data = { 0x14, 0x49 } },
+	{ .data = { 0x15, 0x1F } },
+
+	{ .data = { 0x16, 0x1E } },
+	{ .data = { 0x17, 0x1E } },
+	{ .data = { 0x18, 0x40 } },
+	{ .data = { 0x19, 0x40 } },
+	{ .data = { 0x1A, 0x42 } },
+	{ .data = { 0x1B, 0x42 } },
+	{ .data = { 0x1C, 0x1F } },
+	{ .data = { 0x1D, 0x1F } },
+	{ .data = { 0x1E, 0x1F } },
+	{ .data = { 0x1F, 0x1f } },
+	{ .data = { 0x20, 0x1E } },
+	{ .data = { 0x21, 0x1E } },
+	{ .data = { 0x22, 0x1f } },
+	{ .data = { 0x23, 0x46 } },
+	{ .data = { 0x24, 0x46 } },
+	{ .data = { 0x25, 0x44 } },
+	{ .data = { 0x26, 0x44 } },
+	{ .data = { 0x27, 0x4A } },
+	{ .data = { 0x28, 0x4A } },
+	{ .data = { 0x29, 0x48 } },
+	{ .data = { 0x2A, 0x48 } },
+	{ .data = { 0x2B, 0x1f } },
+
+	{ .data = { 0x2C, 0x1F } },
+	{ .data = { 0x2D, 0x1F } },
+	{ .data = { 0x2E, 0x42 } },
+	{ .data = { 0x2F, 0x42 } },
+	{ .data = { 0x30, 0x40 } },
+	{ .data = { 0x31, 0x40 } },
+	{ .data = { 0x32, 0x1E } },
+	{ .data = { 0x33, 0x1E } },
+	{ .data = { 0x34, 0x1F } },
+	{ .data = { 0x35, 0x1F } },
+	{ .data = { 0x36, 0x1E } },
+	{ .data = { 0x37, 0x1E } },
+	{ .data = { 0x38, 0x1F } },
+	{ .data = { 0x39, 0x48 } },
+	{ .data = { 0x3A, 0x48 } },
+	{ .data = { 0x3B, 0x4A } },
+	{ .data = { 0x3C, 0x4A } },
+	{ .data = { 0x3D, 0x44 } },
+	{ .data = { 0x3E, 0x44 } },
+	{ .data = { 0x3F, 0x46 } },
+	{ .data = { 0x40, 0x46 } },
+	{ .data = { 0x41, 0x1F } },
+
+	{ .data = { 0x42, 0x1F } },
+	{ .data = { 0x43, 0x1F } },
+	{ .data = { 0x44, 0x43 } },
+	{ .data = { 0x45, 0x43 } },
+	{ .data = { 0x46, 0x41 } },
+	{ .data = { 0x47, 0x41 } },
+	{ .data = { 0x48, 0x1E } },
+	{ .data = { 0x49, 0x1E } },
+	{ .data = { 0x4A, 0x1E } },
+	{ .data = { 0x4B, 0x1F } },
+	{ .data = { 0x4C, 0x1E } },
+	{ .data = { 0x4D, 0x1E } },
+	{ .data = { 0x4E, 0x1F } },
+	{ .data = { 0x4F, 0x49 } },
+	{ .data = { 0x50, 0x49 } },
+	{ .data = { 0x51, 0x4B } },
+	{ .data = { 0x52, 0x4B } },
+	{ .data = { 0x53, 0x45 } },
+	{ .data = { 0x54, 0x45 } },
+	{ .data = { 0x55, 0x47 } },
+	{ .data = { 0x56, 0x47 } },
+	{ .data = { 0x57, 0x1F } },
+
+	{ .data = { 0x58, 0x10 } },
+	{ .data = { 0x59, 0x00 } },
+	{ .data = { 0x5A, 0x00 } },
+	{ .data = { 0x5B, 0x30 } },
+	{ .data = { 0x5C, 0x02 } },
+	{ .data = { 0x5D, 0x40 } },
+	{ .data = { 0x5E, 0x01 } },
+	{ .data = { 0x5F, 0x02 } },
+	{ .data = { 0x60, 0x30 } },
+	{ .data = { 0x61, 0x01 } },
+	{ .data = { 0x62, 0x02 } },
+	{ .data = { 0x63, 0x6A } },
+	{ .data = { 0x64, 0x6A } },
+	{ .data = { 0x65, 0x05 } },
+	{ .data = { 0x66, 0x12 } },
+	{ .data = { 0x67, 0x74 } },
+	{ .data = { 0x68, 0x04 } },
+	{ .data = { 0x69, 0x6A } },
+	{ .data = { 0x6A, 0x6A } },
+	{ .data = { 0x6B, 0x08 } },
+
+	{ .data = { 0x6C, 0x00 } },
+	{ .data = { 0x6D, 0x04 } },
+	{ .data = { 0x6E, 0x04 } },
+	{ .data = { 0x6F, 0x88 } },
+	{ .data = { 0x70, 0x00 } },
+	{ .data = { 0x71, 0x00 } },
+	{ .data = { 0x72, 0x06 } },
+	{ .data = { 0x73, 0x7B } },
+	{ .data = { 0x74, 0x00 } },
+	{ .data = { 0x75, 0x07 } },
+	{ .data = { 0x76, 0x00 } },
+	{ .data = { 0x77, 0x5D } },
+	{ .data = { 0x78, 0x17 } },
+	{ .data = { 0x79, 0x1F } },
+	{ .data = { 0x7A, 0x00 } },
+	{ .data = { 0x7B, 0x00 } },
+	{ .data = { 0x7C, 0x00 } },
+	{ .data = { 0x7D, 0x03 } },
+	{ .data = { 0x7E, 0x7B } },
+
+	{ .data = { 0xE0, 0x04 } },
+	{ .data = { 0x2B, 0x2B } },
+	{ .data = { 0x2E, 0x44 } },
+
+	{ .data = { 0xE0, 0x01 } },
+	{ .data = { 0x0E, 0x01 } },
+
+	{ .data = { 0xE0, 0x03 } },
+	{ .data = { 0x98, 0x2F } },
+
+	{ .data = { 0xE0, 0x00 } },
+	{ .data = { 0xE6, 0x02 } },
+	{ .data = { 0xE7, 0x02 } },
+
+	{ .data = { 0x11, 0x00 } },
+};
+
+static const struct k101_im2ba02_init_cmd timed_cmds[] = {
+	{ .data = { 0x29, 0x00 } },
+	{ .data = { 0x35, 0x00 } },
+};
+
+static int k101_im2ba02_prepare(struct drm_panel *panel)
+{
+	struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+	struct mipi_dsi_device *dsi = ctx->dsi;
+	unsigned int i;
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret)
+		return ret;
+
+	msleep(30);
+
+	gpiod_set_value(ctx->reset, 1);
+	msleep(50);
+
+	gpiod_set_value(ctx->reset, 0);
+	msleep(50);
+
+	gpiod_set_value(ctx->reset, 1);
+	msleep(200);
+
+	for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) {
+		const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i];
+
+		ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+		if (ret < 0)
+			goto powerdown;
+	}
+
+	return 0;
+
+powerdown:
+	gpiod_set_value(ctx->reset, 0);
+	msleep(50);
+
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int k101_im2ba02_enable(struct drm_panel *panel)
+{
+	struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+	const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1];
+	int ret;
+
+	msleep(150);
+
+	ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+	if (ret < 0)
+		return ret;
+
+	msleep(50);
+
+	return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+}
+
+static int k101_im2ba02_disable(struct drm_panel *panel)
+{
+	struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+
+	return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int k101_im2ba02_unprepare(struct drm_panel *panel)
+{
+	struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+	int ret;
+
+	ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+			      ret);
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+	if (ret < 0)
+		DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+			      ret);
+
+	msleep(200);
+
+	gpiod_set_value(ctx->reset, 0);
+	msleep(20);
+
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static const struct drm_display_mode k101_im2ba02_default_mode = {
+	.clock = 70000,
+	.vrefresh = 60,
+
+	.hdisplay = 800,
+	.hsync_start = 800 + 20,
+	.hsync_end = 800 + 20 + 20,
+	.htotal = 800 + 20 + 20 + 20,
+
+	.vdisplay = 1280,
+	.vsync_start = 1280 + 16,
+	.vsync_end = 1280 + 16 + 4,
+	.vtotal = 1280 + 16 + 4 + 4,
+
+	.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+	.width_mm	= 136,
+	.height_mm	= 217,
+};
+
+static int k101_im2ba02_get_modes(struct drm_panel *panel,
+				  struct drm_connector *connector)
+{
+	struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
+	if (!mode) {
+		DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+			      k101_im2ba02_default_mode.hdisplay,
+			      k101_im2ba02_default_mode.vdisplay,
+			      k101_im2ba02_default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs k101_im2ba02_funcs = {
+	.disable = k101_im2ba02_disable,
+	.unprepare = k101_im2ba02_unprepare,
+	.prepare = k101_im2ba02_prepare,
+	.enable = k101_im2ba02_enable,
+	.get_modes = k101_im2ba02_get_modes,
+};
+
+static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
+{
+	struct k101_im2ba02 *ctx;
+	unsigned int i;
+	int ret;
+
+	ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+	ctx->dsi = dsi;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+		ctx->supplies[i].supply = regulator_names[i];
+
+	ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0) {
+		DRM_DEV_ERROR(&dsi->dev, "Couldn't get regulators\n");
+		return ret;
+	}
+
+	ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->reset)) {
+		DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+		return PTR_ERR(ctx->reset);
+	}
+
+	drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	ret = drm_panel_of_backlight(&ctx->panel);
+	if (ret)
+		return ret;
+
+	ret = drm_panel_add(&ctx->panel);
+	if (ret < 0)
+		return ret;
+
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->lanes = 4;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		drm_panel_remove(&ctx->panel);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+{
+	struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id k101_im2ba02_of_match[] = {
+	{ .compatible = "feixin,k101-im2ba02", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match);
+
+static struct mipi_dsi_driver k101_im2ba02_driver = {
+	.probe = k101_im2ba02_dsi_probe,
+	.remove = k101_im2ba02_dsi_remove,
+	.driver = {
+		.name = "feixin-k101-im2ba02",
+		.of_match_table = k101_im2ba02_of_match,
+	},
+};
+module_mipi_dsi_driver(k101_im2ba02_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index f394d53..0993552 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -540,7 +540,7 @@ static int ili9322_enable(struct drm_panel *panel)
 
 /* Serial RGB modes */
 static const struct drm_display_mode srgb_320x240_mode = {
-	.clock = 2453500,
+	.clock = 24535,
 	.hdisplay = 320,
 	.hsync_start = 320 + 359,
 	.hsync_end = 320 + 359 + 1,
@@ -554,7 +554,7 @@ static const struct drm_display_mode srgb_320x240_mode = {
 };
 
 static const struct drm_display_mode srgb_360x240_mode = {
-	.clock = 2700000,
+	.clock = 27000,
 	.hdisplay = 360,
 	.hsync_start = 360 + 35,
 	.hsync_end = 360 + 35 + 1,
@@ -569,7 +569,7 @@ static const struct drm_display_mode srgb_360x240_mode = {
 
 /* This is the only mode listed for parallel RGB in the datasheet */
 static const struct drm_display_mode prgb_320x240_mode = {
-	.clock = 6400000,
+	.clock = 64000,
 	.hdisplay = 320,
 	.hsync_start = 320 + 38,
 	.hsync_end = 320 + 38 + 1,
@@ -584,7 +584,7 @@ static const struct drm_display_mode prgb_320x240_mode = {
 
 /* YUV modes */
 static const struct drm_display_mode yuv_640x320_mode = {
-	.clock = 2454000,
+	.clock = 24540,
 	.hdisplay = 640,
 	.hsync_start = 640 + 252,
 	.hsync_end = 640 + 252 + 1,
@@ -598,7 +598,7 @@ static const struct drm_display_mode yuv_640x320_mode = {
 };
 
 static const struct drm_display_mode yuv_720x360_mode = {
-	.clock = 2700000,
+	.clock = 27000,
 	.hdisplay = 720,
 	.hsync_start = 720 + 252,
 	.hsync_end = 720 + 252 + 1,
@@ -613,7 +613,7 @@ static const struct drm_display_mode yuv_720x360_mode = {
 
 /* BT.656 VGA mode, 640x480 */
 static const struct drm_display_mode itu_r_bt_656_640_mode = {
-	.clock = 2454000,
+	.clock = 24540,
 	.hdisplay = 640,
 	.hsync_start = 640 + 3,
 	.hsync_end = 640 + 3 + 1,
@@ -628,7 +628,7 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = {
 
 /* BT.656 D1 mode 720x480 */
 static const struct drm_display_mode itu_r_bt_656_720_mode = {
-	.clock = 2700000,
+	.clock = 27000,
 	.hdisplay = 720,
 	.hsync_start = 720 + 3,
 	.hsync_end = 720 + 3 + 1,
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index b262b53..5907f25 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -197,7 +197,7 @@ static int lg4573_enable(struct drm_panel *panel)
 }
 
 static const struct drm_display_mode default_mode = {
-	.clock = 27000,
+	.clock = 28341,
 	.hdisplay = 480,
 	.hsync_start = 480 + 10,
 	.hsync_end = 480 + 10 + 59,
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
new file mode 100644
index 0000000..4a8fa90
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT35510 panel driver
+ * Copyright (C) 2020 Linus Walleij <linus.walleij@linaro.org>
+ * Based on code by Robert Teather (C) 2012 Samsung
+ *
+ * This display driver (and I refer to the physical component NT35510,
+ * not this Linux kernel software driver) can handle:
+ * 480x864, 480x854, 480x800, 480x720 and 480x640 pixel displays.
+ * It has 480x840x24bit SRAM embedded for storing a frame.
+ * When powered on the display is by default in 480x800 mode.
+ *
+ * The actual panels using this component have different names, but
+ * the code needed to set up and configure the panel will be similar,
+ * so they should all use the NT35510 driver with appropriate configuration
+ * per-panel, e.g. for physical size.
+ *
+ * This driver is for the DSI interface to panels using the NT35510.
+ *
+ * The NT35510 can also use an RGB (DPI) interface combined with an
+ * I2C or SPI interface for setting up the NT35510. If this is needed
+ * this panel driver should be refactored to also support that use
+ * case.
+ */
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define MCS_CMD_MAUCCTR		0xF0 /* Manufacturer command enable */
+#define MCS_CMD_READ_ID1	0xDA
+#define MCS_CMD_READ_ID2	0xDB
+#define MCS_CMD_READ_ID3	0xDC
+#define MCS_CMD_MTP_READ_SETTING 0xF8 /* Uncertain about name */
+#define MCS_CMD_MTP_READ_PARAM 0xFF /* Uncertain about name */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 0.
+ */
+#define NT35510_P0_DOPCTR 0xB1
+#define NT35510_P0_SDHDTCTR 0xB6
+#define NT35510_P0_GSEQCTR 0xB7
+#define NT35510_P0_SDEQCTR 0xB8
+#define NT35510_P0_SDVPCTR 0xBA
+#define NT35510_P0_DPFRCTR1 0xBD
+#define NT35510_P0_DPFRCTR2 0xBE
+#define NT35510_P0_DPFRCTR3 0xBF
+#define NT35510_P0_DPMCTR12 0xCC
+
+#define NT35510_P0_DOPCTR_LEN 2
+#define NT35510_P0_GSEQCTR_LEN 2
+#define NT35510_P0_SDEQCTR_LEN 4
+#define NT35510_P0_SDVPCTR_LEN 1
+#define NT35510_P0_DPFRCTR1_LEN 5
+#define NT35510_P0_DPFRCTR2_LEN 5
+#define NT35510_P0_DPFRCTR3_LEN 5
+#define NT35510_P0_DPMCTR12_LEN 3
+
+#define NT35510_DOPCTR_0_RAMKP BIT(7) /* Contents kept in sleep */
+#define NT35510_DOPCTR_0_DSITE BIT(6) /* Enable TE signal */
+#define NT35510_DOPCTR_0_DSIG BIT(5) /* Enable generic read/write */
+#define NT35510_DOPCTR_0_DSIM BIT(4) /* Enable video mode on DSI */
+#define NT35510_DOPCTR_0_EOTP BIT(3) /* Support EoTP */
+#define NT35510_DOPCTR_0_N565 BIT(2) /* RGB or BGR pixel format */
+#define NT35510_DOPCTR_1_TW_PWR_SEL BIT(4) /* TE power selector */
+#define NT35510_DOPCTR_1_CRGB BIT(3) /* RGB or BGR byte order */
+#define NT35510_DOPCTR_1_CTB BIT(2) /* Vertical scanning direction */
+#define NT35510_DOPCTR_1_CRL BIT(1) /* Source driver data shift */
+#define NT35510_P0_SDVPCTR_PRG BIT(2) /* 0 = normal operation, 1 = VGLO */
+#define NT35510_P0_SDVPCTR_AVDD 0 /* source driver output = AVDD */
+#define NT35510_P0_SDVPCTR_OFFCOL 1 /* source driver output = off color */
+#define NT35510_P0_SDVPCTR_AVSS 2 /* source driver output = AVSS */
+#define NT35510_P0_SDVPCTR_HI_Z 3 /* source driver output = High impedance */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 1.
+ */
+#define NT35510_P1_SETAVDD 0xB0
+#define NT35510_P1_SETAVEE 0xB1
+#define NT35510_P1_SETVCL 0xB2
+#define NT35510_P1_SETVGH 0xB3
+#define NT35510_P1_SETVRGH 0xB4
+#define NT35510_P1_SETVGL 0xB5
+#define NT35510_P1_BT1CTR 0xB6
+#define NT35510_P1_BT2CTR 0xB7
+#define NT35510_P1_BT3CTR 0xB8
+#define NT35510_P1_BT4CTR 0xB9 /* VGH boosting times/freq */
+#define NT35510_P1_BT5CTR 0xBA
+#define NT35510_P1_PFMCTR 0xBB
+#define NT35510_P1_SETVGP 0xBC
+#define NT35510_P1_SETVGN 0xBD
+#define NT35510_P1_SETVCMOFF 0xBE
+#define NT35510_P1_VGHCTR 0xBF /* VGH output ctrl */
+#define NT35510_P1_SET_GAMMA_RED_POS 0xD1
+#define NT35510_P1_SET_GAMMA_GREEN_POS 0xD2
+#define NT35510_P1_SET_GAMMA_BLUE_POS 0xD3
+#define NT35510_P1_SET_GAMMA_RED_NEG 0xD4
+#define NT35510_P1_SET_GAMMA_GREEN_NEG 0xD5
+#define NT35510_P1_SET_GAMMA_BLUE_NEG 0xD6
+
+/* AVDD and AVEE setting 3 bytes */
+#define NT35510_P1_AVDD_LEN 3
+#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VGH_LEN 3
+#define NT35510_P1_VGL_LEN 3
+#define NT35510_P1_VGP_LEN 3
+#define NT35510_P1_VGN_LEN 3
+/* BT1CTR thru BT5CTR setting 3 bytes */
+#define NT35510_P1_BT1CTR_LEN 3
+#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT4CTR_LEN 3
+#define NT35510_P1_BT5CTR_LEN 3
+/* 52 gamma parameters times two per color: positive and negative */
+#define NT35510_P1_GAMMA_LEN 52
+
+/**
+ * struct nt35510_config - the display-specific NT35510 configuration
+ *
+ * Some of the settings provide an array of bytes, A, B C which mean:
+ * A = normal / idle off mode
+ * B = idle on mode
+ * C = partial / idle off mode
+ *
+ * Gamma correction arrays are 10bit numbers, two consecutive bytes
+ * makes out one point on the gamma correction curve. The points are
+ * not linearly placed along the X axis, we get points 0, 1, 3, 5
+ * 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, 192, 208, 224, 232,
+ * 240, 244, 248, 250, 252, 254, 255. The voltages tuples form
+ * V0, V1, V3 ... V255, with 0x0000 being the lowest voltage and
+ * 0x03FF being the highest voltage.
+ *
+ * Each value must be strictly higher than the previous value forming
+ * a rising curve like this:
+ *
+ * ^
+ * |                                        V255
+ * |                                 V254
+ * |                         ....
+ * |                    V5
+ * |           V3
+ * |     V1
+ * | V0
+ * +------------------------------------------->
+ *
+ * The details about all settings can be found in the NT35510 Application
+ * Note.
+ */
+struct nt35510_config {
+	/**
+	 * @width_mm: physical panel width [mm]
+	 */
+	u32 width_mm;
+	/**
+	 * @height_mm: physical panel height [mm]
+	 */
+	u32 height_mm;
+	/**
+	 * @mode: the display mode. This is only relevant outside the panel
+	 * in video mode: in command mode this is configuring the internal
+	 * timing in the display controller.
+	 */
+	const struct drm_display_mode mode;
+	/**
+	 * @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
+	 * in 0.1V steps the default is 0x05 which means 6.0V
+	 */
+	u8 avdd[NT35510_P1_AVDD_LEN];
+	/**
+	 * @bt1ctr: setting for boost power control for the AVDD step-up
+	 * circuit (1)
+	 * bits 0..2 in the lower nibble controls PCK, the booster clock
+	 * frequency for the step-up circuit:
+	 * 0 = Hsync/32
+	 * 1 = Hsync/16
+	 * 2 = Hsync/8
+	 * 3 = Hsync/4
+	 * 4 = Hsync/2
+	 * 5 = Hsync
+	 * 6 = Hsync x 2
+	 * 7 = Hsync x 4
+	 * bits 4..6 in the upper nibble controls BTP, the boosting
+	 * amplification for the the step-up circuit:
+	 * 0 = Disable
+	 * 1 = 1.5 x VDDB
+	 * 2 = 1.66 x VDDB
+	 * 3 = 2 x VDDB
+	 * 4 = 2.5 x VDDB
+	 * 5 = 3 x VDDB
+	 * The defaults are 4 and 4 yielding 0x44
+	 */
+	u8 bt1ctr[NT35510_P1_BT1CTR_LEN];
+	/**
+	 * @avee: setting for AVEE ranging from 0x00 = -6.5V to 0x14 = -4.5V
+	 * in 0.1V steps the default is 0x05 which means -6.0V
+	 */
+	u8 avee[NT35510_P1_AVEE_LEN];
+	/**
+	 * @bt2ctr: setting for boost power control for the AVEE step-up
+	 * circuit (2)
+	 * bits 0..2 in the lower nibble controls NCK, the booster clock
+	 * frequency, the values are the same as for PCK in @bt1ctr.
+	 * bits 4..5 in the upper nibble controls BTN, the boosting
+	 * amplification for the the step-up circuit.
+	 * 0 = Disable
+	 * 1 = -1.5 x VDDB
+	 * 2 = -2 x VDDB
+	 * 3 = -2.5 x VDDB
+	 * 4 = -3 x VDDB
+	 * The defaults are 4 and 3 yielding 0x34
+	 */
+	u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
+	/**
+	 * @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
+	 * in 1V steps, the default is 0x08 which means 15V
+	 */
+	u8 vgh[NT35510_P1_VGH_LEN];
+	/**
+	 * @bt4ctr: setting for boost power control for the VGH step-up
+	 * circuit (4)
+	 * bits 0..2 in the lower nibble controls HCK, the booster clock
+	 * frequency, the values are the same as for PCK in @bt1ctr.
+	 * bits 4..5 in the upper nibble controls BTH, the boosting
+	 * amplification for the the step-up circuit.
+	 * 0 = AVDD + VDDB
+	 * 1 = AVDD - AVEE
+	 * 2 = AVDD - AVEE + VDDB
+	 * 3 = AVDD x 2 - AVEE
+	 * The defaults are 4 and 3 yielding 0x34
+	 */
+	u8 bt4ctr[NT35510_P1_BT4CTR_LEN];
+	/**
+	 * @vgl: setting for VGL ranging from 0x00 = -2V to 0x0f = -15V in
+	 * 1V steps, the default is 0x08 which means -10V
+	 */
+	u8 vgl[NT35510_P1_VGL_LEN];
+	/**
+	 * @bt5ctr: setting for boost power control for the VGL step-up
+	 * circuit (5)
+	 * bits 0..2 in the lower nibble controls LCK, the booster clock
+	 * frequency, the values are the same as for PCK in @bt1ctr.
+	 * bits 4..5 in the upper nibble controls BTL, the boosting
+	 * amplification for the the step-up circuit.
+	 * 0 = AVEE + VCL
+	 * 1 = AVEE - AVDD
+	 * 2 = AVEE + VCL - AVDD
+	 * 3 = AVEE x 2 - AVDD
+	 * The defaults are 3 and 2 yielding 0x32
+	 */
+	u8 bt5ctr[NT35510_P1_BT5CTR_LEN];
+	/**
+	 * @vgp: setting for VGP, the positive gamma divider voltages
+	 * VGMP the high voltage and VGSP the low voltage.
+	 * The first byte contains bit 8 of VGMP and VGSP in bits 4 and 0
+	 * The second byte contains bit 0..7 of VGMP
+	 * The third byte contains bit 0..7 of VGSP
+	 * VGMP 0x00 = 3.0V .. 0x108 = 6.3V in steps of 12.5mV
+	 * VGSP 0x00 = 0V .. 0x111 = 3.7V in steps of 12.5mV
+	 */
+	u8 vgp[NT35510_P1_VGP_LEN];
+	/**
+	 * @vgn: setting for VGN, the negative gamma divider voltages,
+	 * same layout of bytes as @vgp.
+	 */
+	u8 vgn[NT35510_P1_VGN_LEN];
+	/**
+	 * @sdeqctr: Source driver control settings, first byte is
+	 * 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
+	 * mode 2 uses three steps meaning EQS3 is not used in mode
+	 * 1. Mode 2 is default. The last three parameters are EQS1, EQS2
+	 * and EQS3, setting the rise time for each equalizer step:
+	 * 0x00 = 0.0 us to 0x0f = 7.5 us in steps of 0.5us. The default
+	 * is 0x07 = 3.5 us.
+	 */
+	u8 sdeqctr[NT35510_P0_SDEQCTR_LEN];
+	/**
+	 * @sdvpctr: power/voltage behaviour during vertical porch time
+	 */
+	u8 sdvpctr;
+	/**
+	 * @t1: the number of pixel clocks on one scanline, range
+	 * 0x100 (258 ticks) .. 0x3FF (1024 ticks) so the value + 1
+	 * clock ticks.
+	 */
+	u16 t1;
+	/**
+	 * @vbp: vertical back porch toward the PANEL note: not toward
+	 * the DSI host; these are separate interfaces, in from DSI host
+	 * and out to the panel.
+	 */
+	u8 vbp;
+	/**
+	 * @vfp: vertical front porch toward the PANEL.
+	 */
+	u8 vfp;
+	/**
+	 * @psel: pixel clock divisor: 0 = 1, 1 = 2, 2 = 4, 3 = 8.
+	 */
+	u8 psel;
+	/**
+	 * @dpmctr12: Display timing control 12
+	 * Byte 1 bit 4 selects LVGL voltage level: 0 = VGLX, 1 = VGL_REG
+	 * Byte 1 bit 1 selects gate signal mode: 0 = non-overlap, 1 = overlap
+	 * Byte 1 bit 0 selects output signal control R/L swap, 0 = normal
+	 * 1 = swap all O->E, L->R
+	 * Byte 2 is CLW delay clock for CK O/E and CKB O/E signals:
+	 * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+	 * Byte 3 is FTI_H0 delay time for STP O/E signals:
+	 * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+	 */
+	u8 dpmctr12[NT35510_P0_DPMCTR12_LEN];
+	/**
+	 * @gamma_corr_pos_r: Red gamma correction parameters, positive
+	 */
+	u8 gamma_corr_pos_r[NT35510_P1_GAMMA_LEN];
+	/**
+	 * @gamma_corr_pos_g: Green gamma correction parameters, positive
+	 */
+	u8 gamma_corr_pos_g[NT35510_P1_GAMMA_LEN];
+	/**
+	 * @gamma_corr_pos_b: Blue gamma correction parameters, positive
+	 */
+	u8 gamma_corr_pos_b[NT35510_P1_GAMMA_LEN];
+	/**
+	 * @gamma_corr_neg_r: Red gamma correction parameters, negative
+	 */
+	u8 gamma_corr_neg_r[NT35510_P1_GAMMA_LEN];
+	/**
+	 * @gamma_corr_neg_g: Green gamma correction parameters, negative
+	 */
+	u8 gamma_corr_neg_g[NT35510_P1_GAMMA_LEN];
+	/**
+	 * @gamma_corr_neg_b: Blue gamma correction parameters, negative
+	 */
+	u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+};
+
+/**
+ * struct nt35510 - state container for the NT35510 panel
+ */
+struct nt35510 {
+	/**
+	 * @dev: the container device
+	 */
+	struct device *dev;
+	/**
+	 * @conf: the specific panel configuration, as the NT35510
+	 * can be combined with many physical panels, they can have
+	 * different physical dimensions and gamma correction etc,
+	 * so this is stored in the config.
+	 */
+	const struct nt35510_config *conf;
+	/**
+	 * @panel: the DRM panel object for the instance
+	 */
+	struct drm_panel panel;
+	/**
+	 * @supplies: regulators supplying the panel
+	 */
+	struct regulator_bulk_data supplies[2];
+	/**
+	 * @reset_gpio: the reset line
+	 */
+	struct gpio_desc *reset_gpio;
+};
+
+/* Manufacturer command has strictly this byte sequence */
+static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
+static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
+static const u8 nt35510_vgh_on[] = { 0x01 };
+
+static inline struct nt35510 *panel_to_nt35510(struct drm_panel *panel)
+{
+	return container_of(panel, struct nt35510, panel);
+}
+
+#define NT35510_ROTATE_0_SETTING	0x02
+#define NT35510_ROTATE_180_SETTING	0x00
+
+static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
+			     u8 cmd, u8 cmdlen, const u8 *seq)
+{
+	const u8 *seqp = seq;
+	int cmdwritten = 0;
+	int chunk = cmdlen;
+	int ret;
+
+	if (chunk > 15)
+		chunk = 15;
+	ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+	if (ret < 0) {
+		DRM_DEV_ERROR(nt->dev,
+			      "error sending DCS command seq cmd %02x\n",
+			      cmd);
+		return ret;
+	}
+	cmdwritten += chunk;
+	seqp += chunk;
+
+	while (cmdwritten < cmdlen) {
+		chunk = cmdlen - cmdwritten;
+		if (chunk > 15)
+			chunk = 15;
+		ret = mipi_dsi_generic_write(dsi, seqp, chunk);
+		if (ret < 0) {
+			DRM_DEV_ERROR(nt->dev,
+				      "error sending generic write seq %02x\n",
+				      cmd);
+			return ret;
+		}
+		cmdwritten += chunk;
+		seqp += chunk;
+	}
+	DRM_DEV_DEBUG(nt->dev, "sent command %02x %02x bytes\n",
+		      cmd, cmdlen);
+	return 0;
+}
+
+static int nt35510_read_id(struct nt35510 *nt)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	u8 id1, id2, id3;
+	int ret;
+
+	ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(nt->dev, "could not read MTP ID1\n");
+		return ret;
+	}
+	ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(nt->dev, "could not read MTP ID2\n");
+		return ret;
+	}
+	ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
+	if (ret < 0) {
+		DRM_DEV_ERROR(nt->dev, "could not read MTP ID3\n");
+		return ret;
+	}
+
+	/*
+	 * Multi-Time Programmable (?) memory contains manufacturer
+	 * ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
+	 * version.
+	 */
+	DRM_DEV_INFO(nt->dev,
+		     "MTP ID manufacturer: %02x version: %02x driver: %02x\n",
+		     id1, id2, id3);
+
+	return 0;
+}
+
+/**
+ * nt35510_setup_power() - set up power config in page 1
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_power(struct nt35510 *nt)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	int ret;
+
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVDD,
+				NT35510_P1_AVDD_LEN,
+				nt->conf->avdd);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_BT1CTR,
+				NT35510_P1_BT1CTR_LEN,
+				nt->conf->bt1ctr);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVEE,
+				NT35510_P1_AVEE_LEN,
+				nt->conf->avee);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_BT2CTR,
+				NT35510_P1_BT2CTR_LEN,
+				nt->conf->bt2ctr);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
+				NT35510_P1_VGH_LEN,
+				nt->conf->vgh);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_BT4CTR,
+				NT35510_P1_BT4CTR_LEN,
+				nt->conf->bt4ctr);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_VGHCTR,
+				ARRAY_SIZE(nt35510_vgh_on),
+				nt35510_vgh_on);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGL,
+				NT35510_P1_VGL_LEN,
+				nt->conf->vgl);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_BT5CTR,
+				NT35510_P1_BT5CTR_LEN,
+				nt->conf->bt5ctr);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGP,
+				NT35510_P1_VGP_LEN,
+				nt->conf->vgp);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGN,
+				NT35510_P1_VGN_LEN,
+				nt->conf->vgn);
+	if (ret)
+		return ret;
+
+	/* Typically 10 ms */
+	usleep_range(10000, 20000);
+
+	return 0;
+}
+
+/**
+ * nt35510_setup_display() - set up display config in page 0
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_display(struct nt35510 *nt)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	const struct nt35510_config *conf = nt->conf;
+	u8 dopctr[NT35510_P0_DOPCTR_LEN];
+	u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+	u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
+	/* FIXME: set up any rotation (assume none for now) */
+	u8 addr_mode = NT35510_ROTATE_0_SETTING;
+	u8 val;
+	int ret;
+
+	/* Enable TE, EoTP and RGB pixel format */
+	dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+		NT35510_DOPCTR_0_N565;
+	dopctr[1] = NT35510_DOPCTR_1_CTB;
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
+				NT35510_P0_DOPCTR_LEN,
+				dopctr);
+	if (ret)
+		return ret;
+
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
+				 sizeof(addr_mode));
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Source data hold time, default 0x05 = 2.5us
+	 * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
+	 * 0x0A = 5us
+	 */
+	val = 0x0A;
+	ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
+				 sizeof(val));
+	if (ret < 0)
+		return ret;
+
+	/* EQ control for gate signals, 0x00 = 0 us */
+	gseqctr[0] = 0x00;
+	gseqctr[1] = 0x00;
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
+				NT35510_P0_GSEQCTR_LEN,
+				gseqctr);
+	if (ret)
+		return ret;
+
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_SDEQCTR,
+				NT35510_P0_SDEQCTR_LEN,
+				conf->sdeqctr);
+	if (ret)
+		return ret;
+
+	ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDVPCTR,
+				 &conf->sdvpctr, 1);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Display timing control for active and idle off mode:
+	 * the first byte contains
+	 * the two high bits of T1A and second byte the low 8 bits, and
+	 * the valid range is 0x100 (257) to 0x3ff (1023) representing
+	 * 258..1024 (+1) pixel clock ticks for one scanline. At 20MHz pixel
+	 * clock this covers the range of 12.90us .. 51.20us in steps of
+	 * 0.05us, the default is 0x184 (388) representing 389 ticks.
+	 * The third byte is VBPDA, vertical back porch display active
+	 * and the fourth VFPDA, vertical front porch display active,
+	 * both given in number of scanlines in the range 0x02..0xff
+	 * for 2..255 scanlines. The fifth byte is 2 bits selecting
+	 * PSEL for active and idle off mode, how much the 20MHz clock
+	 * is divided by 0..3.  This needs to be adjusted to get the right
+	 * frame rate.
+	 */
+	dpfrctr[0] = (conf->t1 >> 8) & 0xFF;
+	dpfrctr[1] = conf->t1 & 0xFF;
+	/* Vertical back porch */
+	dpfrctr[2] = conf->vbp;
+	/* Vertical front porch */
+	dpfrctr[3] = conf->vfp;
+	dpfrctr[4] = conf->psel;
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR1,
+				NT35510_P0_DPFRCTR1_LEN,
+				dpfrctr);
+	if (ret)
+		return ret;
+	/* For idle and partial idle off mode we decrease front porch by one */
+	dpfrctr[3]--;
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR2,
+				NT35510_P0_DPFRCTR2_LEN,
+				dpfrctr);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR3,
+				NT35510_P0_DPFRCTR3_LEN,
+				dpfrctr);
+	if (ret)
+		return ret;
+
+	/* Enable TE on vblank */
+	ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+	if (ret)
+		return ret;
+
+	/* Turn on the pads? */
+	ret = nt35510_send_long(nt, dsi, NT35510_P0_DPMCTR12,
+				NT35510_P0_DPMCTR12_LEN,
+				conf->dpmctr12);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int nt35510_set_brightness(struct backlight_device *bl)
+{
+	struct nt35510 *nt = bl_get_data(bl);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	u8 brightness = bl->props.brightness;
+	int ret;
+
+	DRM_DEV_DEBUG(nt->dev, "set brightness %d\n", brightness);
+	ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+				 &brightness,
+				 sizeof(brightness));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static const struct backlight_ops nt35510_bl_ops = {
+	.update_status = nt35510_set_brightness,
+};
+
+/*
+ * This power-on sequence
+ */
+static int nt35510_power_on(struct nt35510 *nt)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(nt->supplies), nt->supplies);
+	if (ret < 0) {
+		dev_err(nt->dev, "unable to enable regulators\n");
+		return ret;
+	}
+
+	/* Toggle RESET in accordance with datasheet page 370 */
+	if (nt->reset_gpio) {
+		gpiod_set_value(nt->reset_gpio, 1);
+		/* Active min 10 us according to datasheet, let's say 20 */
+		usleep_range(20, 1000);
+		gpiod_set_value(nt->reset_gpio, 0);
+		/*
+		 * 5 ms during sleep mode, 120 ms during sleep out mode
+		 * according to datasheet, let's use 120-140 ms.
+		 */
+		usleep_range(120000, 140000);
+	}
+
+	ret = nt35510_read_id(nt);
+	if (ret)
+		return ret;
+
+	/* Set up stuff in  manufacturer control, page 1 */
+	ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+				ARRAY_SIZE(nt35510_mauc_select_page_1),
+				nt35510_mauc_select_page_1);
+	if (ret)
+		return ret;
+
+	ret = nt35510_setup_power(nt);
+	if (ret)
+		return ret;
+
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+				NT35510_P1_GAMMA_LEN,
+				nt->conf->gamma_corr_pos_r);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+				NT35510_P1_GAMMA_LEN,
+				nt->conf->gamma_corr_pos_g);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+				NT35510_P1_GAMMA_LEN,
+				nt->conf->gamma_corr_pos_b);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+				NT35510_P1_GAMMA_LEN,
+				nt->conf->gamma_corr_neg_r);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+				NT35510_P1_GAMMA_LEN,
+				nt->conf->gamma_corr_neg_g);
+	if (ret)
+		return ret;
+	ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+				NT35510_P1_GAMMA_LEN,
+				nt->conf->gamma_corr_neg_b);
+	if (ret)
+		return ret;
+
+	/* Set up stuff in  manufacturer control, page 0 */
+	ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+				ARRAY_SIZE(nt35510_mauc_select_page_0),
+				nt35510_mauc_select_page_0);
+	if (ret)
+		return ret;
+
+	ret = nt35510_setup_display(nt);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int nt35510_power_off(struct nt35510 *nt)
+{
+	int ret;
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(nt->supplies), nt->supplies);
+	if (ret)
+		return ret;
+
+	if (nt->reset_gpio)
+		gpiod_set_value(nt->reset_gpio, 1);
+
+	return 0;
+}
+
+static int nt35510_unprepare(struct drm_panel *panel)
+{
+	struct nt35510 *nt = panel_to_nt35510(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	int ret;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(nt->dev, "failed to turn display off (%d)\n",
+			      ret);
+		return ret;
+	}
+	usleep_range(10000, 20000);
+
+	/* Enter sleep mode */
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(nt->dev, "failed to enter sleep mode (%d)\n",
+			      ret);
+		return ret;
+	}
+
+	/* Wait 4 frames, how much is that 5ms in the vendor driver */
+	usleep_range(5000, 10000);
+
+	ret = nt35510_power_off(nt);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int nt35510_prepare(struct drm_panel *panel)
+{
+	struct nt35510 *nt = panel_to_nt35510(panel);
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+	int ret;
+
+	ret = nt35510_power_on(nt);
+	if (ret)
+		return ret;
+
+	/* Exit sleep mode */
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(nt->dev, "failed to exit sleep mode (%d)\n",
+			      ret);
+		return ret;
+	}
+	/* Up to 120 ms */
+	usleep_range(120000, 150000);
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret) {
+		DRM_DEV_ERROR(nt->dev, "failed to turn display on (%d)\n",
+			      ret);
+		return ret;
+	}
+	/* Some 10 ms */
+	usleep_range(10000, 20000);
+
+	return 0;
+}
+
+static int nt35510_get_modes(struct drm_panel *panel,
+			     struct drm_connector *connector)
+{
+	struct nt35510 *nt = panel_to_nt35510(panel);
+	struct drm_display_mode *mode;
+	struct drm_display_info *info;
+
+	info = &connector->display_info;
+	info->width_mm = nt->conf->width_mm;
+	info->height_mm = nt->conf->height_mm;
+	mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
+	if (!mode) {
+		DRM_ERROR("bad mode or failed to add mode\n");
+		return -EINVAL;
+	}
+	drm_mode_set_name(mode);
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+	mode->width_mm = nt->conf->width_mm;
+	mode->height_mm = nt->conf->height_mm;
+	drm_mode_probed_add(connector, mode);
+
+	return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs nt35510_drm_funcs = {
+	.unprepare = nt35510_unprepare,
+	.prepare = nt35510_prepare,
+	.get_modes = nt35510_get_modes,
+};
+
+static int nt35510_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct nt35510 *nt;
+	int ret;
+
+	nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
+	if (!nt)
+		return -ENOMEM;
+	mipi_dsi_set_drvdata(dsi, nt);
+	nt->dev = dev;
+
+	dsi->lanes = 2;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	/*
+	 * Datasheet suggests max HS rate for NT35510 is 250 MHz
+	 * (period time 4ns, see figure 7.6.4 page 365) and max LP rate is
+	 * 20 MHz (period time 50ns, see figure 7.6.6. page 366).
+	 * However these frequencies appear in source code for the Hydis
+	 * HVA40WV1 panel and setting up the LP frequency makes the panel
+	 * not work.
+	 *
+	 * TODO: if other panels prove to be closer to the datasheet,
+	 * maybe make this a per-panel config in struct nt35510_config?
+	 */
+	dsi->hs_rate = 349440000;
+	dsi->lp_rate = 9600000;
+	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+		MIPI_DSI_MODE_EOT_PACKET;
+
+	/*
+	 * Every new incarnation of this display must have a unique
+	 * data entry for the system in this driver.
+	 */
+	nt->conf = of_device_get_match_data(dev);
+	if (!nt->conf) {
+		dev_err(dev, "missing device configuration\n");
+		return -ENODEV;
+	}
+
+	nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
+	nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
+				      nt->supplies);
+	if (ret < 0)
+		return ret;
+	ret = regulator_set_voltage(nt->supplies[0].consumer,
+				    2300000, 4800000);
+	if (ret)
+		return ret;
+	ret = regulator_set_voltage(nt->supplies[1].consumer,
+				    1650000, 3300000);
+	if (ret)
+		return ret;
+
+	nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+	if (IS_ERR(nt->reset_gpio)) {
+		dev_err(dev, "error getting RESET GPIO\n");
+		return PTR_ERR(nt->reset_gpio);
+	}
+
+	drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	/*
+	 * First, try to locate an external backlight (such as on GPIO)
+	 * if this fails, assume we will want to use the internal backlight
+	 * control.
+	 */
+	ret = drm_panel_of_backlight(&nt->panel);
+	if (ret) {
+		dev_err(dev, "error getting external backlight %d\n", ret);
+		return ret;
+	}
+	if (!nt->panel.backlight) {
+		struct backlight_device *bl;
+
+		bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
+						    &nt35510_bl_ops, NULL);
+		if (IS_ERR(bl)) {
+			DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+			return PTR_ERR(bl);
+		}
+		bl->props.max_brightness = 255;
+		bl->props.brightness = 255;
+		bl->props.power = FB_BLANK_POWERDOWN;
+		nt->panel.backlight = bl;
+	}
+
+	ret = drm_panel_add(&nt->panel);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0)
+		drm_panel_remove(&nt->panel);
+
+	return 0;
+}
+
+static int nt35510_remove(struct mipi_dsi_device *dsi)
+{
+	struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	mipi_dsi_detach(dsi);
+	/* Power off */
+	ret = nt35510_power_off(nt);
+	drm_panel_remove(&nt->panel);
+
+	return ret;
+}
+
+/*
+ * These gamma correction values are 10bit tuples, so only bits 0 and 1 is
+ * ever used in the first byte. They form a positive and negative gamma
+ * correction curve for each color, values must be strictly higher for each
+ * step on the curve. As can be seen these default curves goes from 0x0001
+ * to 0x03FE.
+ */
+#define NT35510_GAMMA_POS_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+		0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+		0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+		0x83, 0x02, 0x78, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+		0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+		0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+#define NT35510_GAMMA_NEG_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+		0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+		0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+		0x43, 0x02, 0x50, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+		0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+		0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+/*
+ * The Hydis HVA40WV1 panel
+ */
+static const struct nt35510_config nt35510_hydis_hva40wv1 = {
+	.width_mm = 52,
+	.height_mm = 86,
+	/**
+	 * As the Hydis panel is used in command mode, the porches etc
+	 * are settings programmed internally into the NT35510 controller
+	 * and generated toward the physical display. As the panel is not
+	 * used in video mode, these are not really exposed to the DSI
+	 * host.
+	 *
+	 * Display frame rate control:
+	 * Frame rate = (20 MHz / 1) / (389 * (7 + 50 + 800)) ~= 60 Hz
+	 */
+	.mode = {
+		/* The internal pixel clock of the NT35510 is 20 MHz */
+		.clock = 20000,
+		.hdisplay = 480,
+		.hsync_start = 480 + 2, /* HFP = 2 */
+		.hsync_end = 480 + 2 + 0, /* HSync = 0 */
+		.htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+		.vdisplay = 800,
+		.vsync_start = 800 + 2, /* VFP = 2 */
+		.vsync_end = 800 + 2 + 0, /* VSync = 0 */
+		.vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
+		.vrefresh = 60, /* Calculated */
+		.flags = 0,
+	},
+	/* 0x09: AVDD = 5.6V */
+	.avdd = { 0x09, 0x09, 0x09 },
+	/* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
+	.bt1ctr = { 0x34, 0x34, 0x34 },
+	/* 0x09: AVEE = -5.6V */
+	.avee = { 0x09, 0x09, 0x09 },
+	/* 0x24: NCK = Hsync/2, BTN =  -2 x VDDB */
+	.bt2ctr = { 0x24, 0x24, 0x24 },
+	/* 0x05 = 12V */
+	.vgh = { 0x05, 0x05, 0x05 },
+	/* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
+	.bt4ctr = { 0x24, 0x24, 0x24 },
+	/* 0x0B = -13V */
+	.vgl = { 0x0B, 0x0B, 0x0B },
+	/* 0x24: LCKA = Hsync, VGL = AVDD + VCL - AVDD */
+	.bt5ctr = { 0x24, 0x24, 0x24 },
+	/* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+	.vgp = { 0x00, 0xA3, 0x00 },
+	/* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+	.vgn = { 0x00, 0xA3, 0x00 },
+	/* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
+	.sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
+	/* SDVPCTR: Normal operation off color during v porch */
+	.sdvpctr = 0x01,
+	/* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+	.t1 = 0x0184,
+	/* VBP: vertical back porch toward the panel */
+	.vbp = 7,
+	/* VFP: vertical front porch toward the panel */
+	.vfp = 50,
+	/* PSEL: divide pixel clock 20MHz with 1 (no clock downscaling) */
+	.psel = 0,
+	/* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+	.dpmctr12 = { 0x03, 0x00, 0x00, },
+	/* Default gamma correction values */
+	.gamma_corr_pos_r = { NT35510_GAMMA_POS_DEFAULT },
+	.gamma_corr_pos_g = { NT35510_GAMMA_POS_DEFAULT },
+	.gamma_corr_pos_b = { NT35510_GAMMA_POS_DEFAULT },
+	.gamma_corr_neg_r = { NT35510_GAMMA_NEG_DEFAULT },
+	.gamma_corr_neg_g = { NT35510_GAMMA_NEG_DEFAULT },
+	.gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
+};
+
+static const struct of_device_id nt35510_of_match[] = {
+	{
+		.compatible = "hydis,hva40wv1",
+		.data = &nt35510_hydis_hva40wv1,
+	},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, nt35510_of_match);
+
+static struct mipi_dsi_driver nt35510_driver = {
+	.probe = nt35510_probe,
+	.remove = nt35510_remove,
+	.driver = {
+		.name = "panel-novatek-nt35510",
+		.of_match_table = nt35510_of_match,
+	},
+};
+module_mipi_dsi_driver(nt35510_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("NT35510-based panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3c52f15..9bb2e8c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -373,6 +373,12 @@ static const struct of_device_id ld9040_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ld9040_of_match);
 
+static const struct spi_device_id ld9040_ids[] = {
+	{ "ld9040", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ld9040_ids);
+
 static struct spi_driver ld9040_driver = {
 	.probe = ld9040_probe,
 	.remove = ld9040_remove,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
new file mode 100644
index 0000000..9d843fc
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019, Michael Srba
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e88a0_ams452ef01 {
+	struct drm_panel panel;
+	struct mipi_dsi_device *dsi;
+	struct regulator_bulk_data supplies[2];
+	struct gpio_desc *reset_gpio;
+
+	bool prepared;
+};
+
+static inline struct
+s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
+{
+	return container_of(panel, struct s6e88a0_ams452ef01, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do {				\
+		static const u8 d[] = { seq };				\
+		int ret;						\
+		ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d));	\
+		if (ret < 0)						\
+			return ret;					\
+	} while (0)
+
+static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
+{
+	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+	usleep_range(5000, 6000);
+	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+	usleep_range(1000, 2000);
+	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+	usleep_range(10000, 11000);
+}
+
+static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
+{
+	struct mipi_dsi_device *dsi = ctx->dsi;
+	struct device *dev = &dsi->dev;
+	int ret;
+
+	dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+	dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+	dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+
+	ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+	if (ret < 0) {
+		dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+		return ret;
+	}
+	msleep(120);
+
+	// set default brightness/gama
+	dsi_dcs_write_seq(dsi, 0xca,
+			  0x01, 0x00, 0x01, 0x00, 0x01, 0x00,	// V255 RR,GG,BB
+			  0x80, 0x80, 0x80,			// V203 R,G,B
+			  0x80, 0x80, 0x80,			// V151 R,G,B
+			  0x80, 0x80, 0x80,			// V87  R,G,B
+			  0x80, 0x80, 0x80,			// V51  R,G,B
+			  0x80, 0x80, 0x80,			// V35  R,G,B
+			  0x80, 0x80, 0x80,			// V23  R,G,B
+			  0x80, 0x80, 0x80,			// V11  R,G,B
+			  0x6b, 0x68, 0x71,			// V3   R,G,B
+			  0x00, 0x00, 0x00);			// V1   R,G,B
+	// set default Amoled Off Ratio
+	dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+	dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+	dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+	dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+	dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+
+	ret = mipi_dsi_dcs_set_display_on(dsi);
+	if (ret < 0) {
+		dev_err(dev, "Failed to set display on: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+{
+	struct mipi_dsi_device *dsi = ctx->dsi;
+	struct device *dev = &dsi->dev;
+	int ret;
+
+	dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+	ret = mipi_dsi_dcs_set_display_off(dsi);
+	if (ret < 0) {
+		dev_err(dev, "Failed to set display off: %d\n", ret);
+		return ret;
+	}
+	msleep(35);
+
+	ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+		return ret;
+	}
+	msleep(120);
+
+	return 0;
+}
+
+static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
+{
+	struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+	struct device *dev = &ctx->dsi->dev;
+	int ret;
+
+	if (ctx->prepared)
+		return 0;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret < 0) {
+		dev_err(dev, "Failed to enable regulators: %d\n", ret);
+		return ret;
+	}
+
+	s6e88a0_ams452ef01_reset(ctx);
+
+	ret = s6e88a0_ams452ef01_on(ctx);
+	if (ret < 0) {
+		dev_err(dev, "Failed to initialize panel: %d\n", ret);
+		gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+		regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+				       ctx->supplies);
+		return ret;
+	}
+
+	ctx->prepared = true;
+	return 0;
+}
+
+static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
+{
+	struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+	struct device *dev = &ctx->dsi->dev;
+	int ret;
+
+	if (!ctx->prepared)
+		return 0;
+
+	ret = s6e88a0_ams452ef01_off(ctx);
+	if (ret < 0)
+		dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+	gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+	regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+	ctx->prepared = false;
+	return 0;
+}
+
+static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
+	.clock = (540 + 88 + 4 + 20) * (960 + 14 + 2 + 8) * 60 / 1000,
+	.hdisplay = 540,
+	.hsync_start = 540 + 88,
+	.hsync_end = 540 + 88 + 4,
+	.htotal = 540 + 88 + 4 + 20,
+	.vdisplay = 960,
+	.vsync_start = 960 + 14,
+	.vsync_end = 960 + 14 + 2,
+	.vtotal = 960 + 14 + 2 + 8,
+	.vrefresh = 60,
+	.width_mm = 56,
+	.height_mm = 100,
+};
+
+static int s6e88a0_ams452ef01_get_modes(struct drm_panel *panel,
+					struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, &s6e88a0_ams452ef01_mode);
+	if (!mode)
+		return -ENOMEM;
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs s6e88a0_ams452ef01_panel_funcs = {
+	.unprepare = s6e88a0_ams452ef01_unprepare,
+	.prepare = s6e88a0_ams452ef01_prepare,
+	.get_modes = s6e88a0_ams452ef01_get_modes,
+};
+
+static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct s6e88a0_ams452ef01 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->supplies[0].supply = "vdd3";
+	ctx->supplies[1].supply = "vci";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0) {
+		dev_err(dev, "Failed to get regulators: %d\n", ret);
+		return ret;
+	}
+
+	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ctx->reset_gpio)) {
+		ret = PTR_ERR(ctx->reset_gpio);
+		dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+		return ret;
+	}
+
+	ctx->dsi = dsi;
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	dsi->lanes = 2;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+	drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
+		       DRM_MODE_CONNECTOR_DSI);
+
+	ret = drm_panel_add(&ctx->panel);
+	if (ret < 0) {
+		dev_err(dev, "Failed to add panel: %d\n", ret);
+		return ret;
+	}
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0) {
+		dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+{
+	struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
+	int ret;
+
+	ret = mipi_dsi_detach(dsi);
+	if (ret < 0)
+		dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
+	{ .compatible = "samsung,s6e88a0-ams452ef01" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6e88a0_ams452ef01_of_match);
+
+static struct mipi_dsi_driver s6e88a0_ams452ef01_driver = {
+	.probe = s6e88a0_ams452ef01_probe,
+	.remove = s6e88a0_ams452ef01_remove,
+	.driver = {
+		.name = "panel-s6e88a0-ams452ef01",
+		.of_match_table = s6e88a0_ams452ef01_of_match,
+	},
+};
+module_mipi_dsi_driver(s6e88a0_ams452ef01_driver);
+
+MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>");
+MODULE_DESCRIPTION("MIPI-DSI based Panel Driver for AMS452EF01 AMOLED LCD with a S6E88A0 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e14c14a..0ce81b1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -351,6 +351,65 @@ static const struct drm_panel_funcs panel_simple_funcs = {
 	.get_timings = panel_simple_get_timings,
 };
 
+static struct panel_desc panel_dpi;
+
+static int panel_dpi_probe(struct device *dev,
+			   struct panel_simple *panel)
+{
+	struct display_timing *timing;
+	const struct device_node *np;
+	struct panel_desc *desc;
+	unsigned int bus_flags;
+	struct videomode vm;
+	const char *mapping;
+	int ret;
+
+	np = dev->of_node;
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
+	if (!timing)
+		return -ENOMEM;
+
+	ret = of_get_display_timing(np, "panel-timing", timing);
+	if (ret < 0) {
+		dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
+			np);
+		return ret;
+	}
+
+	desc->timings = timing;
+	desc->num_timings = 1;
+
+	of_property_read_u32(np, "width-mm", &desc->size.width);
+	of_property_read_u32(np, "height-mm", &desc->size.height);
+
+	of_property_read_string(np, "data-mapping", &mapping);
+	if (!strcmp(mapping, "rgb24"))
+		desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+	else if (!strcmp(mapping, "rgb565"))
+		desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+	else if (!strcmp(mapping, "bgr666"))
+		desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+	else if (!strcmp(mapping, "lvds666"))
+		desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+	/* Extract bus_flags from display_timing */
+	bus_flags = 0;
+	vm.flags = timing->flags;
+	drm_bus_flags_from_videomode(&vm, &bus_flags);
+	desc->bus_flags = bus_flags;
+
+	/* We do not know the connector for the DT node, so guess it */
+	desc->connector_type = DRM_MODE_CONNECTOR_DPI;
+
+	panel->desc = desc;
+
+	return 0;
+}
+
 #define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
 	(to_check->field.typ >= bounds->field.min && \
 	 to_check->field.typ <= bounds->field.max)
@@ -437,8 +496,15 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
 			return -EPROBE_DEFER;
 	}
 
-	if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
-		panel_simple_parse_panel_timing_node(dev, panel, &dt);
+	if (desc == &panel_dpi) {
+		/* Handle the generic panel-dpi binding */
+		err = panel_dpi_probe(dev, panel);
+		if (err)
+			goto free_ddc;
+	} else {
+		if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+			panel_simple_parse_panel_timing_node(dev, panel, &dt);
+	}
 
 	drm_panel_init(&panel->base, dev, &panel_simple_funcs,
 		       desc->connector_type);
@@ -1301,6 +1367,37 @@ static const struct panel_desc edt_et035012dm6 = {
 	.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
 };
 
+static const struct drm_display_mode edt_etm043080dh6gp_mode = {
+	.clock = 10870,
+	.hdisplay = 480,
+	.hsync_start = 480 + 8,
+	.hsync_end = 480 + 8 + 4,
+	.htotal = 480 + 8 + 4 + 41,
+
+	/*
+	 * IWG22M: Y resolution changed for "dc_linuxfb" module crashing while
+	 * fb_align
+	 */
+
+	.vdisplay = 288,
+	.vsync_start = 288 + 2,
+	.vsync_end = 288 + 2 + 4,
+	.vtotal = 288 + 2 + 4 + 10,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc edt_etm043080dh6gp = {
+	.modes = &edt_etm043080dh6gp_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 100,
+		.height = 65,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
 static const struct drm_display_mode edt_etm0430g0dh6_mode = {
 	.clock = 9000,
 	.hdisplay = 480,
@@ -1440,6 +1537,33 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode frida_frd350h54004_mode = {
+	.clock = 6000,
+	.hdisplay = 320,
+	.hsync_start = 320 + 44,
+	.hsync_end = 320 + 44 + 16,
+	.htotal = 320 + 44 + 16 + 20,
+	.vdisplay = 240,
+	.vsync_start = 240 + 2,
+	.vsync_end = 240 + 2 + 6,
+	.vtotal = 240 + 2 + 6 + 2,
+	.vrefresh = 60,
+	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc frida_frd350h54004 = {
+	.modes = &frida_frd350h54004_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 77,
+		.height = 64,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+	.connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
 static const struct drm_display_mode friendlyarm_hd702e_mode = {
 	.clock		= 67185,
 	.hdisplay	= 800,
@@ -2080,6 +2204,64 @@ static const struct panel_desc lg_lp129qe = {
 	},
 };
 
+static const struct display_timing logictechno_lt161010_2nh_timing = {
+	.pixelclock = { 26400000, 33300000, 46800000 },
+	.hactive = { 800, 800, 800 },
+	.hfront_porch = { 16, 210, 354 },
+	.hback_porch = { 46, 46, 46 },
+	.hsync_len = { 1, 20, 40 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 7, 22, 147 },
+	.vback_porch = { 23, 23, 23 },
+	.vsync_len = { 1, 10, 20 },
+	.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+		 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+		 DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt161010_2nh = {
+	.timings = &logictechno_lt161010_2nh_timing,
+	.num_timings = 1,
+	.size = {
+		.width = 154,
+		.height = 86,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH |
+		     DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+		     DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+	.connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
+static const struct display_timing logictechno_lt170410_2whc_timing = {
+	.pixelclock = { 68900000, 71100000, 73400000 },
+	.hactive = { 1280, 1280, 1280 },
+	.hfront_porch = { 23, 60, 71 },
+	.hback_porch = { 23, 60, 71 },
+	.hsync_len = { 15, 40, 47 },
+	.vactive = { 800, 800, 800 },
+	.vfront_porch = { 5, 7, 10 },
+	.vback_porch = { 5, 7, 10 },
+	.vsync_len = { 6, 9, 12 },
+	.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+		 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+		 DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt170410_2whc = {
+	.timings = &logictechno_lt170410_2whc_timing,
+	.num_timings = 1,
+	.size = {
+		.width = 217,
+		.height = 136,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH |
+		     DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+		     DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
 	.clock = 30400,
 	.hdisplay = 800,
@@ -2095,7 +2277,7 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
 };
 
 static const struct drm_display_mode logicpd_type_28_mode = {
-	.clock = 9000,
+	.clock = 9107,
 	.hdisplay = 480,
 	.hsync_start = 480 + 3,
 	.hsync_end = 480 + 3 + 42,
@@ -2224,6 +2406,51 @@ static const struct panel_desc netron_dy_e231732 = {
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+	{
+		.clock = 138500,
+		.hdisplay = 1920,
+		.hsync_start = 1920 + 48,
+		.hsync_end = 1920 + 48 + 32,
+		.htotal = 1920 + 48 + 32 + 80,
+		.vdisplay = 1080,
+		.vsync_start = 1080 + 3,
+		.vsync_end = 1080 + 3 + 5,
+		.vtotal = 1080 + 3 + 5 + 23,
+		.vrefresh = 60,
+		.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+	}, {
+		.clock = 110920,
+		.hdisplay = 1920,
+		.hsync_start = 1920 + 48,
+		.hsync_end = 1920 + 48 + 32,
+		.htotal = 1920 + 48 + 32 + 80,
+		.vdisplay = 1080,
+		.vsync_start = 1080 + 3,
+		.vsync_end = 1080 + 3 + 5,
+		.vtotal = 1080 + 3 + 5 + 23,
+		.vrefresh = 48,
+		.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+	}
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+	.modes = neweast_wjfh116008a_modes,
+	.num_modes = 2,
+	.bpc = 6,
+	.size = {
+		.width = 260,
+		.height = 150,
+	},
+	.delay = {
+		.prepare = 110,
+		.enable = 20,
+		.unprepare = 500,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+	.connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
 static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
 	.clock = 9000,
 	.hdisplay = 480,
@@ -2390,15 +2617,15 @@ static const struct panel_desc ontat_yx700wv03 = {
 };
 
 static const struct drm_display_mode ortustech_com37h3m_mode  = {
-	.clock = 22153,
+	.clock = 22230,
 	.hdisplay = 480,
-	.hsync_start = 480 + 8,
-	.hsync_end = 480 + 8 + 10,
-	.htotal = 480 + 8 + 10 + 10,
+	.hsync_start = 480 + 40,
+	.hsync_end = 480 + 40 + 10,
+	.htotal = 480 + 40 + 10 + 40,
 	.vdisplay = 640,
 	.vsync_start = 640 + 4,
-	.vsync_end = 640 + 4 + 3,
-	.vtotal = 640 + 4 + 3 + 4,
+	.vsync_end = 640 + 4 + 2,
+	.vtotal = 640 + 4 + 2 + 4,
 	.vrefresh = 60,
 	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
 };
@@ -2439,6 +2666,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
 	},
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+	.connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode  = {
@@ -2464,7 +2692,8 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
 		.height = 91,
 	},
 	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
-	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+		DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
 	.connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
@@ -2546,6 +2775,35 @@ static const struct panel_desc rocktech_rk070er9427 = {
 	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
+	.clock = 71100,
+	.hdisplay = 1280,
+	.hsync_start = 1280 + 48,
+	.hsync_end = 1280 + 48 + 32,
+	.htotal = 1280 + 48 + 32 + 80,
+	.vdisplay = 800,
+	.vsync_start = 800 + 2,
+	.vsync_end = 800 + 2 + 5,
+	.vtotal = 800 + 2 + 5 + 16,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc rocktech_rk101ii01d_ct = {
+	.modes = &rocktech_rk101ii01d_ct_mode,
+	.num_modes = 1,
+	.size = {
+		.width = 217,
+		.height = 136,
+	},
+	.delay = {
+		.prepare = 50,
+		.disable = 50,
+	},
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
 	.clock = 271560,
 	.hdisplay = 2560,
@@ -2768,30 +3026,6 @@ static const struct panel_desc sharp_lq123p1jx31 = {
 	},
 };
 
-static const struct drm_display_mode sharp_lq150x1lg11_mode = {
-	.clock = 71100,
-	.hdisplay = 1024,
-	.hsync_start = 1024 + 168,
-	.hsync_end = 1024 + 168 + 64,
-	.htotal = 1024 + 168 + 64 + 88,
-	.vdisplay = 768,
-	.vsync_start = 768 + 37,
-	.vsync_end = 768 + 37 + 2,
-	.vtotal = 768 + 37 + 2 + 8,
-	.vrefresh = 60,
-};
-
-static const struct panel_desc sharp_lq150x1lg11 = {
-	.modes = &sharp_lq150x1lg11_mode,
-	.num_modes = 1,
-	.bpc = 6,
-	.size = {
-		.width = 304,
-		.height = 228,
-	},
-	.bus_format = MEDIA_BUS_FMT_RGB565_1X16,
-};
-
 static const struct display_timing sharp_ls020b1dd01d_timing = {
 	.pixelclock = { 2000000, 4200000, 5000000 },
 	.hactive = { 240, 240, 240 },
@@ -3023,7 +3257,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
 		.width = 194,
 		.height = 116,
 	},
-	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
 	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
 	.connector_type = DRM_MODE_CONNECTOR_LVDS,
 };
@@ -3286,6 +3520,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "edt,et035012dm6",
 		.data = &edt_et035012dm6,
 	}, {
+		.compatible = "edt,etm043080dh6gp",
+		.data = &edt_etm043080dh6gp,
+	}, {
 		.compatible = "edt,etm0430g0dh6",
 		.data = &edt_etm0430g0dh6,
 	}, {
@@ -3310,6 +3547,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "foxlink,fl500wvr00-a0t",
 		.data = &foxlink_fl500wvr00_a0t,
 	}, {
+		.compatible = "frida,frd350h54004",
+		.data = &frida_frd350h54004,
+	}, {
 		.compatible = "friendlyarm,hd702e",
 		.data = &friendlyarm_hd702e,
 	}, {
@@ -3388,6 +3628,15 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "logicpd,type28",
 		.data = &logicpd_type_28,
 	}, {
+		.compatible = "logictechno,lt161010-2nhc",
+		.data = &logictechno_lt161010_2nh,
+	}, {
+		.compatible = "logictechno,lt161010-2nhr",
+		.data = &logictechno_lt161010_2nh,
+	}, {
+		.compatible = "logictechno,lt170410-2whc",
+		.data = &logictechno_lt170410_2whc,
+	}, {
 		.compatible = "mitsubishi,aa070mc01-ca1",
 		.data = &mitsubishi_aa070mc01,
 	}, {
@@ -3400,6 +3649,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "netron-dy,e231732",
 		.data = &netron_dy_e231732,
 	}, {
+		.compatible = "neweast,wjfh116008a",
+		.data = &neweast_wjfh116008a,
+	}, {
 		.compatible = "newhaven,nhd-4.3-480272ef-atxl",
 		.data = &newhaven_nhd_43_480272ef_atxl,
 	}, {
@@ -3439,6 +3691,9 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "rocktech,rk070er9427",
 		.data = &rocktech_rk070er9427,
 	}, {
+		.compatible = "rocktech,rk101ii01d-ct",
+		.data = &rocktech_rk101ii01d_ct,
+	}, {
 		.compatible = "samsung,lsn122dl01-c01",
 		.data = &samsung_lsn122dl01_c01,
 	}, {
@@ -3466,9 +3721,6 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "sharp,lq123p1jx31",
 		.data = &sharp_lq123p1jx31,
 	}, {
-		.compatible = "sharp,lq150x1lg11",
-		.data = &sharp_lq150x1lg11,
-	}, {
 		.compatible = "sharp,ls020b1dd01d",
 		.data = &sharp_ls020b1dd01d,
 	}, {
@@ -3526,6 +3778,10 @@ static const struct of_device_id platform_of_match[] = {
 		.compatible = "winstar,wf35ltiacd",
 		.data = &winstar_wf35ltiacd,
 	}, {
+		/* Must be the last entry */
+		.compatible = "panel-dpi",
+		.data = &panel_dpi,
+	}, {
 		/* sentinel */
 	}
 };
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index de0abf7..c91e55b 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -48,7 +48,7 @@ struct acx424akp {
 };
 
 static const struct drm_display_mode sony_acx424akp_vid_mode = {
-	.clock = 330000,
+	.clock = 27234,
 	.hdisplay = 480,
 	.hsync_start = 480 + 15,
 	.hsync_end = 480 + 15 + 0,
@@ -68,7 +68,7 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = {
  * command mode using the maximum HS frequency.
  */
 static const struct drm_display_mode sony_acx424akp_cmd_mode = {
-	.clock = 420160,
+	.clock = 35478,
 	.hdisplay = 480,
 	.hsync_start = 480 + 154,
 	.hsync_end = 480 + 154 + 16,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index cf29405..aeca15d 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,7 +86,12 @@ struct td028ttec1_panel {
 
 #define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
 
-static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+/*
+ * noinline_for_stack so we don't get multiple copies of tx_buf
+ * on the stack in case of gcc-plugin-structleak
+ */
+static int noinline_for_stack
+jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
 {
 	struct spi_device *spi = lcd->spi;
 	u16 tx_buf = JBT_COMMAND | reg;
@@ -105,8 +110,9 @@ static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
 	return ret;
 }
 
-static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
-			   u8 reg, u8 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_1(struct td028ttec1_panel *lcd,
+		u8 reg, u8 data, int *err)
 {
 	struct spi_device *spi = lcd->spi;
 	u16 tx_buf[2];
@@ -128,8 +134,9 @@ static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
 	return ret;
 }
 
-static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
-			   u8 reg, u16 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_2(struct td028ttec1_panel *lcd,
+		u8 reg, u16 data, int *err)
 {
 	struct spi_device *spi = lcd->spi;
 	u16 tx_buf[3];
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d..8136bab 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@
 #include <linux/clk.h>
 #include <linux/reset.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
 #include <linux/regulator/consumer.h>
 
 #include "panfrost_device.h"
@@ -87,18 +88,27 @@ static void panfrost_clk_fini(struct panfrost_device *pfdev)
 
 static int panfrost_regulator_init(struct panfrost_device *pfdev)
 {
-	int ret;
+	int ret, i;
 
-	pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
-	if (IS_ERR(pfdev->regulator)) {
-		ret = PTR_ERR(pfdev->regulator);
-		dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+	if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
+			"Too many supplies in compatible structure.\n"))
+		return -EINVAL;
+
+	for (i = 0; i < pfdev->comp->num_supplies; i++)
+		pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
+
+	ret = devm_regulator_bulk_get(pfdev->dev,
+				      pfdev->comp->num_supplies,
+				      pfdev->regulators);
+	if (ret < 0) {
+		dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
 		return ret;
 	}
 
-	ret = regulator_enable(pfdev->regulator);
+	ret = regulator_bulk_enable(pfdev->comp->num_supplies,
+				    pfdev->regulators);
 	if (ret < 0) {
-		dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+		dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
 		return ret;
 	}
 
@@ -107,7 +117,81 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
 
 static void panfrost_regulator_fini(struct panfrost_device *pfdev)
 {
-	regulator_disable(pfdev->regulator);
+	regulator_bulk_disable(pfdev->comp->num_supplies,
+			pfdev->regulators);
+}
+
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+		if (!pfdev->pm_domain_devs[i])
+			break;
+
+		if (pfdev->pm_domain_links[i])
+			device_link_del(pfdev->pm_domain_links[i]);
+
+		dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+	}
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+	int err;
+	int i, num_domains;
+
+	num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+						 "power-domains",
+						 "#power-domain-cells");
+
+	/*
+	 * Single domain is handled by the core, and, if only a single power
+	 * the power domain is requested, the property is optional.
+	 */
+	if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
+		return 0;
+
+	if (num_domains != pfdev->comp->num_pm_domains) {
+		dev_err(pfdev->dev,
+			"Incorrect number of power domains: %d provided, %d needed\n",
+			num_domains, pfdev->comp->num_pm_domains);
+		return -EINVAL;
+	}
+
+	if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
+			"Too many supplies in compatible structure.\n"))
+		return -EINVAL;
+
+	for (i = 0; i < num_domains; i++) {
+		pfdev->pm_domain_devs[i] =
+			dev_pm_domain_attach_by_name(pfdev->dev,
+					pfdev->comp->pm_domain_names[i]);
+		if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
+			err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
+			pfdev->pm_domain_devs[i] = NULL;
+			dev_err(pfdev->dev,
+				"failed to get pm-domain %s(%d): %d\n",
+				pfdev->comp->pm_domain_names[i], i, err);
+			goto err;
+		}
+
+		pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+				pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+				DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+		if (!pfdev->pm_domain_links[i]) {
+			dev_err(pfdev->pm_domain_devs[i],
+				"adding device link failed!\n");
+			err = -ENODEV;
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	panfrost_pm_domain_fini(pfdev);
+	return err;
 }
 
 int panfrost_device_init(struct panfrost_device *pfdev)
@@ -140,37 +224,43 @@ int panfrost_device_init(struct panfrost_device *pfdev)
 		goto err_out1;
 	}
 
+	err = panfrost_pm_domain_init(pfdev);
+	if (err)
+		goto err_out2;
+
 	res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
 	pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
 	if (IS_ERR(pfdev->iomem)) {
 		dev_err(pfdev->dev, "failed to ioremap iomem\n");
 		err = PTR_ERR(pfdev->iomem);
-		goto err_out2;
+		goto err_out3;
 	}
 
 	err = panfrost_gpu_init(pfdev);
 	if (err)
-		goto err_out2;
+		goto err_out3;
 
 	err = panfrost_mmu_init(pfdev);
 	if (err)
-		goto err_out3;
+		goto err_out4;
 
 	err = panfrost_job_init(pfdev);
 	if (err)
-		goto err_out4;
+		goto err_out5;
 
 	err = panfrost_perfcnt_init(pfdev);
 	if (err)
-		goto err_out5;
+		goto err_out6;
 
 	return 0;
-err_out5:
+err_out6:
 	panfrost_job_fini(pfdev);
-err_out4:
+err_out5:
 	panfrost_mmu_fini(pfdev);
-err_out3:
+err_out4:
 	panfrost_gpu_fini(pfdev);
+err_out3:
+	panfrost_pm_domain_fini(pfdev);
 err_out2:
 	panfrost_reset_fini(pfdev);
 err_out1:
@@ -186,6 +276,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
 	panfrost_job_fini(pfdev);
 	panfrost_mmu_fini(pfdev);
 	panfrost_gpu_fini(pfdev);
+	panfrost_pm_domain_fini(pfdev);
 	panfrost_reset_fini(pfdev);
 	panfrost_regulator_fini(pfdev);
 	panfrost_clk_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 0671381..c30c719 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
 
 #include <linux/atomic.h>
 #include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
 #include <drm/drm_device.h>
 #include <drm/drm_mm.h>
@@ -19,6 +20,8 @@ struct panfrost_job;
 struct panfrost_perfcnt;
 
 #define NUM_JOB_SLOTS 3
+#define MAX_REGULATORS 2
+#define MAX_PM_DOMAINS 3
 
 struct panfrost_features {
 	u16 id;
@@ -51,6 +54,23 @@ struct panfrost_features {
 	unsigned long hw_issues[64 / BITS_PER_LONG];
 };
 
+/*
+ * Features that cannot be automatically detected and need matching using the
+ * compatible string, typically SoC-specific.
+ */
+struct panfrost_compatible {
+	/* Supplies count and names. */
+	int num_supplies;
+	const char * const *supply_names;
+	/*
+	 * Number of power domains required, note that values 0 and 1 are
+	 * handled identically, as only values > 1 need special handling.
+	 */
+	int num_pm_domains;
+	/* Only required if num_pm_domains > 1. */
+	const char * const *pm_domain_names;
+};
+
 struct panfrost_device {
 	struct device *dev;
 	struct drm_device *ddev;
@@ -59,10 +79,14 @@ struct panfrost_device {
 	void __iomem *iomem;
 	struct clk *clock;
 	struct clk *bus_clock;
-	struct regulator *regulator;
+	struct regulator_bulk_data regulators[MAX_REGULATORS];
 	struct reset_control *rstc;
+	/* pm_domains for devices with more than one. */
+	struct device *pm_domain_devs[MAX_PM_DOMAINS];
+	struct device_link *pm_domain_links[MAX_PM_DOMAINS];
 
 	struct panfrost_features features;
+	const struct panfrost_compatible *comp;
 
 	spinlock_t as_lock;
 	unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b7a618d..882fecc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -584,6 +584,10 @@ static int panfrost_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, pfdev);
 
+	pfdev->comp = of_device_get_match_data(&pdev->dev);
+	if (!pfdev->comp)
+		return -ENODEV;
+
 	/* Allocate and initialze the DRM device. */
 	ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
 	if (IS_ERR(ddev))
@@ -655,16 +659,24 @@ static int panfrost_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const char * const default_supplies[] = { "mali" };
+static const struct panfrost_compatible default_data = {
+	.num_supplies = ARRAY_SIZE(default_supplies),
+	.supply_names = default_supplies,
+	.num_pm_domains = 1, /* optional */
+	.pm_domain_names = NULL,
+};
+
 static const struct of_device_id dt_match[] = {
-	{ .compatible = "arm,mali-t604" },
-	{ .compatible = "arm,mali-t624" },
-	{ .compatible = "arm,mali-t628" },
-	{ .compatible = "arm,mali-t720" },
-	{ .compatible = "arm,mali-t760" },
-	{ .compatible = "arm,mali-t820" },
-	{ .compatible = "arm,mali-t830" },
-	{ .compatible = "arm,mali-t860" },
-	{ .compatible = "arm,mali-t880" },
+	{ .compatible = "arm,mali-t604", .data = &default_data, },
+	{ .compatible = "arm,mali-t624", .data = &default_data, },
+	{ .compatible = "arm,mali-t628", .data = &default_data, },
+	{ .compatible = "arm,mali-t720", .data = &default_data, },
+	{ .compatible = "arm,mali-t760", .data = &default_data, },
+	{ .compatible = "arm,mali-t820", .data = &default_data, },
+	{ .compatible = "arm,mali-t830", .data = &default_data, },
+	{ .compatible = "arm,mali-t860", .data = &default_data, },
+	{ .compatible = "arm,mali-t880", .data = &default_data, },
 	{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 8822ec1..f2c1ddc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -308,28 +308,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
 	gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
 	ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
 		val, val == pfdev->features.l2_present, 100, 1000);
-
-	gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
-	ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
-		val, val == pfdev->features.stack_present, 100, 1000);
+	if (ret)
+		dev_err(pfdev->dev, "error powering up gpu L2");
 
 	gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
-	ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+	ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
 		val, val == pfdev->features.shader_present, 100, 1000);
+	if (ret)
+		dev_err(pfdev->dev, "error powering up gpu shader");
 
 	gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
-	ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+	ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
 		val, val == pfdev->features.tiler_present, 100, 1000);
-
 	if (ret)
-		dev_err(pfdev->dev, "error powering up gpu");
+		dev_err(pfdev->dev, "error powering up gpu tiler");
 }
 
 void panfrost_gpu_power_off(struct panfrost_device *pfdev)
 {
 	gpu_write(pfdev, TILER_PWROFF_LO, 0);
 	gpu_write(pfdev, SHADER_PWROFF_LO, 0);
-	gpu_write(pfdev, STACK_PWROFF_LO, 0);
 	gpu_write(pfdev, L2_PWROFF_LO, 0);
 }
 
@@ -351,7 +349,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
 		return -ENODEV;
 
 	err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
-			       IRQF_SHARED, "gpu", pfdev);
+			       IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
 	if (err) {
 		dev_err(pfdev->dev, "failed to request gpu irq");
 		return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9a1a72a..7914b15 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -508,7 +508,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
 		return -ENODEV;
 
 	ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
-			       IRQF_SHARED, "job", pfdev);
+			       IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
 	if (ret) {
 		dev_err(pfdev->dev, "failed to request job irq");
 		return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 5d75f8c..ed28aeb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -640,9 +640,11 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
 	if (irq <= 0)
 		return -ENODEV;
 
-	err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+	err = devm_request_threaded_irq(pfdev->dev, irq,
+					panfrost_mmu_irq_handler,
 					panfrost_mmu_irq_handler_thread,
-					IRQF_SHARED, "mmu", pfdev);
+					IRQF_SHARED, KBUILD_MODNAME "-mmu",
+					pfdev);
 
 	if (err) {
 		dev_err(pfdev->dev, "failed to request mmu irq");
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 09aeaff..4f325c4 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -19,6 +19,7 @@ static struct regmap *versatile_syscon_map;
  * We detect the different syscon types from the compatible strings.
  */
 enum versatile_clcd {
+	INTEGRATOR_IMPD1,
 	INTEGRATOR_CLCD_CM,
 	VERSATILE_CLCD,
 	REALVIEW_CLCD_EB,
@@ -65,6 +66,14 @@ static const struct of_device_id versatile_clcd_of_match[] = {
 	{},
 };
 
+static const struct of_device_id impd1_clcd_of_match[] = {
+	{
+		.compatible = "arm,im-pd1-syscon",
+		.data = (void *)INTEGRATOR_IMPD1,
+	},
+	{},
+};
+
 /*
  * Core module CLCD control on the Integrator/CP, bits
  * 8 thru 19 of the CM_CONTROL register controls a bunch
@@ -125,6 +134,36 @@ static void pl111_integrator_enable(struct drm_device *drm, u32 format)
 			   val);
 }
 
+#define IMPD1_CTRL_OFFSET	0x18
+#define IMPD1_CTRL_DISP_LCD	(0 << 0)
+#define IMPD1_CTRL_DISP_VGA	(1 << 0)
+#define IMPD1_CTRL_DISP_LCD1	(2 << 0)
+#define IMPD1_CTRL_DISP_ENABLE	(1 << 2)
+#define IMPD1_CTRL_DISP_MASK	(7 << 0)
+
+static void pl111_impd1_enable(struct drm_device *drm, u32 format)
+{
+	u32 val;
+
+	dev_info(drm->dev, "enable IM-PD1 CLCD connectors\n");
+	val = IMPD1_CTRL_DISP_VGA | IMPD1_CTRL_DISP_ENABLE;
+
+	regmap_update_bits(versatile_syscon_map,
+			   IMPD1_CTRL_OFFSET,
+			   IMPD1_CTRL_DISP_MASK,
+			   val);
+}
+
+static void pl111_impd1_disable(struct drm_device *drm)
+{
+	dev_info(drm->dev, "disable IM-PD1 CLCD connectors\n");
+
+	regmap_update_bits(versatile_syscon_map,
+			   IMPD1_CTRL_OFFSET,
+			   IMPD1_CTRL_DISP_MASK,
+			   0);
+}
+
 /*
  * This configuration register in the Versatile and RealView
  * family is uniformly present but appears more and more
@@ -271,6 +310,20 @@ static const struct pl111_variant_data pl110_integrator = {
 };
 
 /*
+ * The IM-PD1 variant is a PL110 with a bunch of broken, or not
+ * yet implemented features
+ */
+static const struct pl111_variant_data pl110_impd1 = {
+	.name = "PL110 IM-PD1",
+	.is_pl110 = true,
+	.broken_clockdivider = true,
+	.broken_vblank = true,
+	.formats = pl110_integrator_pixel_formats,
+	.nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
+	.fb_bpp = 16,
+};
+
+/*
  * This is the in-between PL110 variant found in the ARM Versatile,
  * supporting RGB565/BGR565
  */
@@ -322,8 +375,21 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 		/* Non-ARM reference designs, just bail out */
 		return 0;
 	}
+
 	versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
 
+	/*
+	 * On the Integrator, check if we should use the IM-PD1 instead,
+	 * if we find it, it will take precedence. This is on the Integrator/AP
+	 * which only has this option for PL110 graphics.
+	 */
+	 if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+		np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
+						     &clcd_id);
+		if (np)
+			versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+	}
+
 	/* Versatile Express special handling */
 	if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
 		struct platform_device *pdev;
@@ -367,6 +433,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
 		priv->variant_display_enable = pl111_integrator_enable;
 		dev_info(dev, "set up callbacks for Integrator PL110\n");
 		break;
+	case INTEGRATOR_IMPD1:
+		versatile_syscon_map = map;
+		priv->variant = &pl110_impd1;
+		priv->variant_display_enable = pl111_impd1_enable;
+		priv->variant_display_disable = pl111_impd1_disable;
+		dev_info(dev, "set up callbacks for IM-PD1 PL110\n");
+		break;
 	case VERSATILE_CLCD:
 		versatile_syscon_map = map;
 		/* This can do RGB565 with external PLD */
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index ef09dc6..d1086b2 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -36,7 +36,7 @@ static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
 
 struct ring {
 	struct qxl_ring_header      header;
-	uint8_t                     elements[0];
+	uint8_t                     elements[];
 };
 
 struct qxl_ring {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 16d73b2..09583a0 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -31,7 +31,7 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
 
 #include "qxl_drv.h"
 #include "qxl_object.h"
@@ -372,19 +372,6 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
 static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
 				  struct drm_crtc_state *old_crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_pending_vblank_event *event;
-	unsigned long flags;
-
-	if (crtc->state && crtc->state->event) {
-		event = crtc->state->event;
-		crtc->state->event = NULL;
-
-		spin_lock_irqsave(&dev->event_lock, flags);
-		drm_crtc_send_vblank_event(crtc, event);
-		spin_unlock_irqrestore(&dev->event_lock, flags);
-	}
-
 	qxl_crtc_update_monitors_config(crtc, "flush");
 }
 
@@ -1021,9 +1008,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
 	return &qxl_output->enc;
 }
 
-static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
-};
-
 static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
 	.get_modes = qxl_conn_get_modes,
 	.mode_valid = qxl_conn_mode_valid,
@@ -1073,15 +1057,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-static void qxl_enc_destroy(struct drm_encoder *encoder)
-{
-	drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs qxl_enc_funcs = {
-	.destroy = qxl_enc_destroy,
-};
-
 static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
 {
 	if (qdev->hotplug_mode_update_property)
@@ -1100,6 +1075,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
 	struct qxl_output *qxl_output;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
+	int ret;
 
 	qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
 	if (!qxl_output)
@@ -1112,15 +1088,19 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
 	drm_connector_init(dev, &qxl_output->base,
 			   &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
 
-	drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
-			 DRM_MODE_ENCODER_VIRTUAL, NULL);
+	ret = drm_simple_encoder_init(dev, &qxl_output->enc,
+				      DRM_MODE_ENCODER_VIRTUAL);
+	if (ret) {
+		drm_err(dev, "drm_simple_encoder_init() failed, error %d\n",
+			ret);
+		goto err_drm_connector_cleanup;
+	}
 
 	/* we get HPD via client monitors config */
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 	encoder->possible_crtcs = 1 << num_output;
 	drm_connector_attach_encoder(&qxl_output->base,
 					  &qxl_output->enc);
-	drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
 	drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
 
 	drm_object_attach_property(&connector->base,
@@ -1130,6 +1110,11 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
 	drm_object_attach_property(&connector->base,
 				   dev->mode_config.suggested_y_property, 0);
 	return 0;
+
+err_drm_connector_cleanup:
+	drm_connector_cleanup(&qxl_output->base);
+	kfree(qxl_output);
+	return ret;
 }
 
 static struct drm_framebuffer *
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d601f5..4fda3f9 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,6 +34,7 @@
 #include <linux/pci.h>
 
 #include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 #include <drm/drm_modeset_helper.h>
@@ -132,21 +133,30 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	return ret;
 }
 
+static void qxl_drm_release(struct drm_device *dev)
+{
+	struct qxl_device *qdev = dev->dev_private;
+
+	/*
+	 * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
+	 * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+	 * non-trivial though.
+	 */
+	qxl_modeset_fini(qdev);
+	qxl_device_fini(qdev);
+	dev->dev_private = NULL;
+	kfree(qdev);
+}
+
 static void
 qxl_pci_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct qxl_device *qdev = dev->dev_private;
 
 	drm_dev_unregister(dev);
-
-	qxl_modeset_fini(qdev);
-	qxl_device_fini(qdev);
+	drm_atomic_helper_shutdown(dev);
 	if (is_vga(pdev))
 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
-
-	dev->dev_private = NULL;
-	kfree(qdev);
 	drm_dev_put(dev);
 }
 
@@ -279,6 +289,8 @@ static struct drm_driver qxl_driver = {
 	.major = 0,
 	.minor = 1,
 	.patchlevel = 0,
+
+	.release = qxl_drm_release,
 };
 
 static int __init qxl_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bfc1631..70b20ee 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -299,12 +299,12 @@ void qxl_device_fini(struct qxl_device *qdev)
 {
 	qxl_bo_unref(&qdev->current_release_bo[0]);
 	qxl_bo_unref(&qdev->current_release_bo[1]);
+	qxl_gem_fini(qdev);
+	qxl_bo_fini(qdev);
 	flush_work(&qdev->gc_work);
 	qxl_ring_free(qdev->command_ring);
 	qxl_ring_free(qdev->cursor_ring);
 	qxl_ring_free(qdev->release_ring);
-	qxl_gem_fini(qdev);
-	qxl_bo_fini(qdev);
 	io_mapping_free(qdev->surface_mapping);
 	io_mapping_free(qdev->vram_mapping);
 	iounmap(qdev->ram_header);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 16a5e90..62a5e42 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,11 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
 	return qdev;
 }
 
-static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-	return 0;
-}
-
 static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 			     struct ttm_mem_type_manager *man)
 {
@@ -256,7 +251,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
 
 static struct ttm_bo_driver qxl_bo_driver = {
 	.ttm_tt_create = &qxl_ttm_tt_create,
-	.invalidate_caches = &qxl_invalidate_caches,
 	.init_mem_type = &qxl_init_mem_type,
 	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &qxl_evict_flags,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index be58369..9181175 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2231,6 +2231,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
 	.prepare = atombios_crtc_prepare,
 	.commit = atombios_crtc_commit,
 	.disable = atombios_crtc_disable,
+	.get_scanout_position = radeon_get_crtc_scanout_position,
 };
 
 void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a522e09..266e3cb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1263,7 +1263,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
 	 * locking inversion with the driver load path. And the access here is
 	 * completely racy anyway. So don't bother with locking for now.
 	 */
-	return dev->open_count == 0;
+	return atomic_read(&dev->open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d07c7db..35db79a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -45,6 +45,10 @@
 #include "atom.h"
 #include "radeon.h"
 
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc);
+int radeon_enable_vblank_kms(struct drm_crtc *crtc);
+void radeon_disable_vblank_kms(struct drm_crtc *crtc);
+
 static void avivo_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -460,7 +464,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
 		(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
 		(!ASIC_IS_AVIVO(rdev) ||
 		((int) (work->target_vblank -
-		dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+		crtc->funcs->get_vblank_counter(crtc)) > 0)))
 		usleep_range(1000, 2000);
 
 	/* We borrow the event spin lock for protecting flip_status */
@@ -576,7 +580,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
 	}
 	work->base = base;
 	work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
-		dev->driver->get_vblank_counter(dev, work->crtc_id);
+		crtc->funcs->get_vblank_counter(crtc);
 
 	/* We borrow the event spin lock for protecting flip_work */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -668,6 +672,10 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
 	.set_config = radeon_crtc_set_config,
 	.destroy = radeon_crtc_destroy,
 	.page_flip_target = radeon_crtc_page_flip_target,
+	.get_vblank_counter = radeon_get_vblank_counter_kms,
+	.enable_vblank = radeon_enable_vblank_kms,
+	.disable_vblank = radeon_disable_vblank_kms,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1973,3 +1981,16 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 
 	return ret;
 }
+
+bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc,
+				 bool in_vblank_irq, int *vpos, int *hpos,
+				 ktime_t *stime, ktime_t *etime,
+				 const struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
+
+	return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+					  stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 28eef92..0083087 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -301,35 +301,8 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 	return connector;
 }
 
-static void radeon_dp_register_mst_connector(struct drm_connector *connector)
-{
-	struct drm_device *dev = connector->dev;
-	struct radeon_device *rdev = dev->dev_private;
-
-	radeon_fb_add_connector(rdev, connector);
-
-	drm_connector_register(connector);
-}
-
-static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
-					    struct drm_connector *connector)
-{
-	struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
-	struct drm_device *dev = master->base.dev;
-	struct radeon_device *rdev = dev->dev_private;
-
-	drm_connector_unregister(connector);
-	radeon_fb_remove_connector(rdev, connector);
-	drm_connector_cleanup(connector);
-
-	kfree(connector);
-	DRM_DEBUG_KMS("\n");
-}
-
 static const struct drm_dp_mst_topology_cbs mst_cbs = {
 	.add_connector = radeon_dp_add_mst_connector,
-	.register_connector = radeon_dp_register_mst_connector,
-	.destroy_connector = radeon_dp_destroy_mst_connector,
 };
 
 static struct
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8696af1..59f8186 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -120,9 +120,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
 int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 		       bool fbcon, bool freeze);
 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -602,16 +599,6 @@ static const struct file_operations radeon_driver_kms_fops = {
 #endif
 };
 
-static bool
-radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
-				 bool in_vblank_irq, int *vpos, int *hpos,
-				 ktime_t *stime, ktime_t *etime,
-				 const struct drm_display_mode *mode)
-{
-	return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
-					  stime, etime, mode);
-}
-
 static struct drm_driver kms_driver = {
 	.driver_features =
 	    DRIVER_GEM | DRIVER_RENDER,
@@ -620,11 +607,6 @@ static struct drm_driver kms_driver = {
 	.postclose = radeon_driver_postclose_kms,
 	.lastclose = radeon_driver_lastclose_kms,
 	.unload = radeon_driver_unload_kms,
-	.get_vblank_counter = radeon_get_vblank_counter_kms,
-	.enable_vblank = radeon_enable_vblank_kms,
-	.disable_vblank = radeon_disable_vblank_kms,
-	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-	.get_scanout_position = radeon_get_crtc_scanout_position,
 	.irq_preinstall = radeon_driver_irq_preinstall_kms,
 	.irq_postinstall = radeon_driver_irq_postinstall_kms,
 	.irq_uninstall = radeon_driver_irq_uninstall_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ec0b7d6..cf3156a 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -354,15 +354,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
 	drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
 			      &radeon_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
-				 RADEONFB_CONN_LIMIT);
+	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper);
 	if (ret)
 		goto free;
 
-	ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-	if (ret)
-		goto fini;
-
 	/* disable all the possible outputs/crtcs before entering KMS mode */
 	drm_helper_disable_unused_functions(rdev->ddev);
 
@@ -404,15 +399,3 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
 		return true;
 	return false;
 }
-
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
-	if (rdev->mode_info.rfbdev)
-		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
-
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
-	if (rdev->mode_info.rfbdev)
-		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dd2f19b..58176db 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -745,14 +745,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
 /**
  * radeon_get_vblank_counter_kms - get frame count
  *
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
  *
  * Gets the frame count on the requested crtc (all asics).
  * Returns frame count on success, -EINVAL on failure.
  */
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	int vpos, hpos, stat;
 	u32 count;
 	struct radeon_device *rdev = dev->dev_private;
@@ -814,25 +815,26 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
 /**
  * radeon_enable_vblank_kms - enable vblank interrupt
  *
- * @dev: drm dev pointer
  * @crtc: crtc to enable vblank interrupt for
  *
  * Enable the interrupt on the requested crtc (all asics).
  * Returns 0 on success, -EINVAL on failure.
  */
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+int radeon_enable_vblank_kms(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct radeon_device *rdev = dev->dev_private;
 	unsigned long irqflags;
 	int r;
 
-	if (crtc < 0 || crtc >= rdev->num_crtc) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe < 0 || pipe >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
 		return -EINVAL;
 	}
 
 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
-	rdev->irq.crtc_vblank_int[crtc] = true;
+	rdev->irq.crtc_vblank_int[pipe] = true;
 	r = radeon_irq_set(rdev);
 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 	return r;
@@ -841,23 +843,24 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
 /**
  * radeon_disable_vblank_kms - disable vblank interrupt
  *
- * @dev: drm dev pointer
  * @crtc: crtc to disable vblank interrupt for
  *
  * Disable the interrupt on the requested crtc (all asics).
  */
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+void radeon_disable_vblank_kms(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct radeon_device *rdev = dev->dev_private;
 	unsigned long irqflags;
 
-	if (crtc < 0 || crtc >= rdev->num_crtc) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe < 0 || pipe >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
 		return;
 	}
 
 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
-	rdev->irq.crtc_vblank_int[crtc] = false;
+	rdev->irq.crtc_vblank_int[pipe] = false;
 	radeon_irq_set(rdev);
 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index a1985a5..8817fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1111,7 +1111,8 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
 	.mode_set_base_atomic = radeon_crtc_set_base_atomic,
 	.prepare = radeon_crtc_prepare,
 	.commit = radeon_crtc_commit,
-	.disable = radeon_crtc_disable
+	.disable = radeon_crtc_disable,
+	.get_scanout_position = radeon_get_crtc_scanout_position,
 };
 
 
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9656517..c7f2237 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -880,6 +880,12 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 				      ktime_t *stime, ktime_t *etime,
 				      const struct drm_display_mode *mode);
 
+extern bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+				 int *vpos, int *hpos,
+				 ktime_t *stime, ktime_t *etime,
+				 const struct drm_display_mode *mode);
+
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
 radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
@@ -980,9 +986,6 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
 
 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
 
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
-
 void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
 
 int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b3380ff..5d50c9e 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -66,11 +66,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
 	return rdev;
 }
 
-static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-	return 0;
-}
-
 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 				struct ttm_mem_type_manager *man)
 {
@@ -774,7 +769,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
 	.ttm_tt_create = &radeon_ttm_tt_create,
 	.ttm_tt_populate = &radeon_ttm_tt_populate,
 	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
-	.invalidate_caches = &radeon_invalidate_caches,
 	.init_mem_type = &radeon_init_mem_type,
 	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = &radeon_evict_flags,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 05e8b4d..2cb85db 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2979,7 +2979,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
 
 	if (rdev->family == CHIP_HAINAN) {
 		if ((rdev->pdev->revision == 0x81) ||
-		    (rdev->pdev->revision == 0x83) ||
 		    (rdev->pdev->revision == 0xC3) ||
 		    (rdev->pdev->device == 0x6664) ||
 		    (rdev->pdev->device == 0x6665) ||
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3cd83a0..c07c6a8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -121,7 +121,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
 	 * Attach the bridge to the encoder. The bridge will create the
 	 * connector.
 	 */
-	ret = drm_bridge_attach(encoder, bridge, NULL);
+	ret = drm_bridge_attach(encoder, bridge, NULL, 0);
 	if (ret) {
 		drm_encoder_cleanup(encoder);
 		return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8ffa4fb..ab0d496 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -23,6 +23,7 @@
 #include <drm/drm_bridge.h>
 #include <drm/drm_of.h>
 #include <drm/drm_panel.h>
+#include <drm/drm_print.h>
 #include <drm/drm_probe_helper.h>
 
 #include "rcar_lvds.h"
@@ -590,8 +591,9 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
 }
 
 static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
-				    struct drm_atomic_state *state)
+				    struct drm_bridge_state *old_bridge_state)
 {
+	struct drm_atomic_state *state = old_bridge_state->base.state;
 	struct drm_connector *connector;
 	struct drm_crtc *crtc;
 
@@ -603,7 +605,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
 }
 
 static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
-				     struct drm_atomic_state *state)
+				     struct drm_bridge_state *old_bridge_state)
 {
 	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 
@@ -618,7 +620,8 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
 
 	/* Disable the companion LVDS encoder in dual-link mode. */
 	if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
-		lvds->companion->funcs->atomic_disable(lvds->companion, state);
+		lvds->companion->funcs->atomic_disable(lvds->companion,
+						       old_bridge_state);
 
 	clk_disable_unprepare(lvds->clocks.mod);
 }
@@ -641,7 +644,8 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
 	return true;
 }
 
-static int rcar_lvds_attach(struct drm_bridge *bridge)
+static int rcar_lvds_attach(struct drm_bridge *bridge,
+			    enum drm_bridge_attach_flags flags)
 {
 	struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 	struct drm_connector *connector = &lvds->connector;
@@ -651,7 +655,12 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
 	/* If we have a next bridge just attach it. */
 	if (lvds->next_bridge)
 		return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
-					 bridge);
+					 bridge, flags);
+
+	if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+		DRM_ERROR("Fix bridge driver to make connector optional!");
+		return -EINVAL;
+	}
 
 	/* Otherwise if we have a panel, create a connector. */
 	if (!lvds->panel)
@@ -682,6 +691,9 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
 static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
 	.attach = rcar_lvds_attach,
 	.detach = rcar_lvds_detach,
+	.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+	.atomic_reset = drm_atomic_helper_bridge_reset,
 	.atomic_enable = rcar_lvds_atomic_enable,
 	.atomic_disable = rcar_lvds_atomic_disable,
 	.mode_fixup = rcar_lvds_mode_fixup,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 521fe42..2fdc455 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -124,7 +124,7 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
 
 	drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
 
-	ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
+	ret = drm_fb_helper_init(dev, helper);
 	if (ret < 0) {
 		DRM_DEV_ERROR(dev->dev,
 			      "Failed to initialize drm fb helper - %d.\n",
@@ -132,13 +132,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
 		return ret;
 	}
 
-	ret = drm_fb_helper_single_add_all_connectors(helper);
-	if (ret < 0) {
-		DRM_DEV_ERROR(dev->dev,
-			      "Failed to add connectors - %d.\n", ret);
-		goto err_drm_fb_helper_fini;
-	}
-
 	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
 	if (ret < 0) {
 		DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7582d0e..0d18846 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -6,6 +6,7 @@
 
 #include <linux/dma-buf.h>
 #include <linux/iommu.h>
+#include <linux/vmalloc.h>
 
 #include <drm/drm.h>
 #include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d04b349..cecb2cc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -724,7 +724,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
 	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
 					DRM_PLANE_HELPER_NO_SCALING;
 
-	if (!crtc || !fb)
+	if (!crtc || WARN_ON(!fb))
 		return 0;
 
 	crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0b3d18c..cc67262 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -328,7 +328,7 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
 {
 	int act_height;
 
-	act_height = (src_h + vskiplines - 1) / vskiplines;
+	act_height = DIV_ROUND_UP(src_h, vskiplines);
 
 	if (act_height == dst_h)
 		return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f25a3674..449a629 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -646,7 +646,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
 			goto err_free_connector;
 		}
 	} else {
-		ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+		ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
 		if (ret) {
 			DRM_DEV_ERROR(drm_dev->dev,
 				      "failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ae73027..9078478 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -98,7 +98,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
 		if (of_property_read_u32(endpoint, "reg", &endpoint_id))
 			endpoint_id = 0;
 
-		if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+		/* if subdriver (> 0) or error case (< 0), ignore entry */
+		if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
 			continue;
 
 		child_count++;
@@ -144,7 +145,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
 
 	rgb->bridge = bridge;
 
-	ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+	ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
 	if (ret) {
 		DRM_DEV_ERROR(drm_dev->dev,
 			      "failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index d7908649..877ce9b 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job,
 		      __entry->job_count, __entry->hw_job_count)
 );
 
+TRACE_EVENT(drm_run_job,
+	    TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+	    TP_ARGS(sched_job, entity),
+	    TP_STRUCT__entry(
+			     __field(struct drm_sched_entity *, entity)
+			     __field(struct dma_fence *, fence)
+			     __field(const char *, name)
+			     __field(uint64_t, id)
+			     __field(u32, job_count)
+			     __field(int, hw_job_count)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->entity = entity;
+			   __entry->id = sched_job->id;
+			   __entry->fence = &sched_job->s_fence->finished;
+			   __entry->name = sched_job->sched->name;
+			   __entry->job_count = spsc_queue_count(&entity->job_queue);
+			   __entry->hw_job_count = atomic_read(
+				   &sched_job->sched->hw_rq_count);
+			   ),
+	    TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+		      __entry->entity, __entry->id,
+		      __entry->fence, __entry->name,
+		      __entry->job_count, __entry->hw_job_count)
+);
+
 TRACE_EVENT(drm_sched_process_job,
 	    TP_PROTO(struct drm_sched_fence *fence),
 	    TP_ARGS(fence),
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 63bccd2..c803e14e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -84,6 +84,24 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 EXPORT_SYMBOL(drm_sched_entity_init);
 
 /**
+ * drm_sched_entity_modify_sched - Modify sched of an entity
+ * @entity: scheduler entity to init
+ * @sched_list: the list of new drm scheds which will replace
+ *		 existing entity->sched_list
+ * @num_sched_list: number of drm sched in sched_list
+ */
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+				    struct drm_gpu_scheduler **sched_list,
+				    unsigned int num_sched_list)
+{
+	WARN_ON(!num_sched_list || !sched_list);
+
+	entity->sched_list = sched_list;
+	entity->num_sched_list = num_sched_list;
+}
+EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+/**
  * drm_sched_entity_is_idle - Check if entity is idle
  *
  * @entity: scheduler entity
@@ -120,38 +138,6 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
 }
 
 /**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
-	struct drm_sched_rq *rq = NULL;
-	unsigned int min_score = UINT_MAX, num_score;
-	int i;
-
-	for (i = 0; i < entity->num_sched_list; ++i) {
-		struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
-		if (!entity->sched_list[i]->ready) {
-			DRM_WARN("sched%s is not ready, skipping", sched->name);
-			continue;
-		}
-
-		num_score = atomic_read(&sched->score);
-		if (num_score < min_score) {
-			min_score = num_score;
-			rq = &entity->sched_list[i]->sched_rq[entity->priority];
-		}
-	}
-
-	return rq;
-}
-
-/**
  * drm_sched_entity_flush - Flush a context entity
  *
  * @entity: scheduler entity
@@ -461,6 +447,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
 {
 	struct dma_fence *fence;
+	struct drm_gpu_scheduler *sched;
 	struct drm_sched_rq *rq;
 
 	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
@@ -471,7 +458,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
 		return;
 
 	spin_lock(&entity->rq_lock);
-	rq = drm_sched_entity_get_free_sched(entity);
+	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
 	if (rq != entity->rq) {
 		drm_sched_rq_remove_entity(entity->rq, entity);
 		entity->rq = rq;
@@ -498,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 	bool first;
 
 	trace_drm_sched_job(sched_job, entity);
-	atomic_inc(&entity->rq->sched->score);
+	atomic_inc(&entity->rq->sched->num_jobs);
 	WRITE_ONCE(entity->last_user, current->group_leader);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 60c4c6a..8e731ed 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,7 +92,6 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 	if (!list_empty(&entity->list))
 		return;
 	spin_lock(&rq->lock);
-	atomic_inc(&rq->sched->score);
 	list_add_tail(&entity->list, &rq->entities);
 	spin_unlock(&rq->lock);
 }
@@ -111,7 +110,6 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 	if (list_empty(&entity->list))
 		return;
 	spin_lock(&rq->lock);
-	atomic_dec(&rq->sched->score);
 	list_del_init(&entity->list);
 	if (rq->current_entity == entity)
 		rq->current_entity = NULL;
@@ -222,8 +220,7 @@ EXPORT_SYMBOL(drm_sched_fault);
  *
  * Suspend the delayed work timeout for the scheduler. This is done by
  * modifying the delayed work timeout to an arbitrary large value,
- * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
- * called from an IRQ context.
+ * MAX_SCHEDULE_TIMEOUT in this case.
  *
  * Returns the timeout remaining
  *
@@ -252,46 +249,41 @@ EXPORT_SYMBOL(drm_sched_suspend_timeout);
  * @sched: scheduler instance for which to resume the timeout
  * @remaining: remaining timeout
  *
- * Resume the delayed work timeout for the scheduler. Note that
- * this function can be called from an IRQ context.
+ * Resume the delayed work timeout for the scheduler.
  */
 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 		unsigned long remaining)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&sched->job_list_lock, flags);
+	spin_lock(&sched->job_list_lock);
 
 	if (list_empty(&sched->ring_mirror_list))
 		cancel_delayed_work(&sched->work_tdr);
 	else
 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
 
-	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	spin_unlock(&sched->job_list_lock);
 }
 EXPORT_SYMBOL(drm_sched_resume_timeout);
 
 static void drm_sched_job_begin(struct drm_sched_job *s_job)
 {
 	struct drm_gpu_scheduler *sched = s_job->sched;
-	unsigned long flags;
 
-	spin_lock_irqsave(&sched->job_list_lock, flags);
+	spin_lock(&sched->job_list_lock);
 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
 	drm_sched_start_timeout(sched);
-	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	spin_unlock(&sched->job_list_lock);
 }
 
 static void drm_sched_job_timedout(struct work_struct *work)
 {
 	struct drm_gpu_scheduler *sched;
 	struct drm_sched_job *job;
-	unsigned long flags;
 
 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 
 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
-	spin_lock_irqsave(&sched->job_list_lock, flags);
+	spin_lock(&sched->job_list_lock);
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
 				       struct drm_sched_job, node);
 
@@ -302,7 +294,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 		 * is parked at which point it's safe.
 		 */
 		list_del_init(&job->node);
-		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+		spin_unlock(&sched->job_list_lock);
 
 		job->sched->ops->timedout_job(job);
 
@@ -315,12 +307,12 @@ static void drm_sched_job_timedout(struct work_struct *work)
 			sched->free_guilty = false;
 		}
 	} else {
-		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+		spin_unlock(&sched->job_list_lock);
 	}
 
-	spin_lock_irqsave(&sched->job_list_lock, flags);
+	spin_lock(&sched->job_list_lock);
 	drm_sched_start_timeout(sched);
-	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	spin_unlock(&sched->job_list_lock);
 }
 
  /**
@@ -383,7 +375,6 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 {
 	struct drm_sched_job *s_job, *tmp;
-	unsigned long flags;
 
 	kthread_park(sched->thread);
 
@@ -417,9 +408,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 			 * remove job from ring_mirror_list.
 			 * Locking here is for concurrent resume timeout
 			 */
-			spin_lock_irqsave(&sched->job_list_lock, flags);
+			spin_lock(&sched->job_list_lock);
 			list_del_init(&s_job->node);
-			spin_unlock_irqrestore(&sched->job_list_lock, flags);
+			spin_unlock(&sched->job_list_lock);
 
 			/*
 			 * Wait for job's HW fence callback to finish using s_job
@@ -462,7 +453,6 @@ EXPORT_SYMBOL(drm_sched_stop);
 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 {
 	struct drm_sched_job *s_job, *tmp;
-	unsigned long flags;
 	int r;
 
 	/*
@@ -491,9 +481,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 	}
 
 	if (full_recovery) {
-		spin_lock_irqsave(&sched->job_list_lock, flags);
+		spin_lock(&sched->job_list_lock);
 		drm_sched_start_timeout(sched);
-		spin_unlock_irqrestore(&sched->job_list_lock, flags);
+		spin_unlock(&sched->job_list_lock);
 	}
 
 	kthread_unpark(sched->thread);
@@ -657,7 +647,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 	struct drm_gpu_scheduler *sched = s_fence->sched;
 
 	atomic_dec(&sched->hw_rq_count);
-	atomic_dec(&sched->score);
+	atomic_dec(&sched->num_jobs);
 
 	trace_drm_sched_process_job(s_fence);
 
@@ -679,7 +669,6 @@ static struct drm_sched_job *
 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 {
 	struct drm_sched_job *job;
-	unsigned long flags;
 
 	/*
 	 * Don't destroy jobs while the timeout worker is running  OR thread
@@ -690,7 +679,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 	    __kthread_should_park(sched->thread))
 		return NULL;
 
-	spin_lock_irqsave(&sched->job_list_lock, flags);
+	spin_lock(&sched->job_list_lock);
 
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
 				       struct drm_sched_job, node);
@@ -704,12 +693,48 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 		drm_sched_start_timeout(sched);
 	}
 
-	spin_unlock_irqrestore(&sched->job_list_lock, flags);
+	spin_unlock(&sched->job_list_lock);
 
 	return job;
 }
 
 /**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+		     unsigned int num_sched_list)
+{
+	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+	int i;
+	unsigned int min_jobs = UINT_MAX, num_jobs;
+
+	for (i = 0; i < num_sched_list; ++i) {
+		sched = sched_list[i];
+
+		if (!sched->ready) {
+			DRM_WARN("scheduler %s is not ready, skipping",
+				 sched->name);
+			continue;
+		}
+
+		num_jobs = atomic_read(&sched->num_jobs);
+		if (num_jobs < min_jobs) {
+			min_jobs = num_jobs;
+			picked_sched = sched;
+		}
+	}
+
+	return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
+/**
  * drm_sched_blocked - check if the scheduler is blocked
  *
  * @sched: scheduler instance
@@ -775,6 +800,7 @@ static int drm_sched_main(void *param)
 		atomic_inc(&sched->hw_rq_count);
 		drm_sched_job_begin(sched_job);
 
+		trace_drm_run_job(sched_job, entity);
 		fence = sched->ops->run_job(sched_job);
 		drm_sched_fence_scheduled(s_fence);
 
@@ -834,7 +860,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
-	atomic_set(&sched->score, 0);
+	atomic_set(&sched->num_jobs, 0);
 	atomic64_set(&sched->job_id_count, 0);
 
 	/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index dc64fbf..49e6cb8 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -279,12 +279,13 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
 	return 0;
 }
 
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int sti_crtc_enable_vblank(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct sti_private *dev_priv = dev->dev_private;
 	struct sti_compositor *compo = dev_priv->compo;
 	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
-	struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
 	struct sti_vtg *vtg = compo->vtg[pipe];
 
 	DRM_DEBUG_DRIVER("\n");
@@ -297,8 +298,10 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 	return 0;
 }
 
-void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
+static void sti_crtc_disable_vblank(struct drm_crtc *crtc)
 {
+	struct drm_device *drm_dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct sti_private *priv = drm_dev->dev_private;
 	struct sti_compositor *compo = priv->compo;
 	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
@@ -330,6 +333,8 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 	.late_register = sti_crtc_late_register,
+	.enable_vblank = sti_crtc_enable_vblank,
+	.disable_vblank = sti_crtc_disable_vblank,
 };
 
 bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index df489ab..1132b45 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -15,8 +15,6 @@ struct sti_mixer;
 
 int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
 		  struct drm_plane *primary, struct drm_plane *cursor);
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
 int sti_crtc_vblank_cb(struct notifier_block *nb,
 		       unsigned long event, void *data);
 bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a39fc36..50870d8 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -21,7 +21,6 @@
 #include <drm/drm_of.h>
 #include <drm/drm_probe_helper.h>
 
-#include "sti_crtc.h"
 #include "sti_drv.h"
 #include "sti_plane.h"
 
@@ -146,9 +145,6 @@ static struct drm_driver sti_driver = {
 	.dumb_create = drm_gem_cma_dumb_create,
 	.fops = &sti_driver_fops,
 
-	.enable_vblank = sti_crtc_enable_vblank,
-	.disable_vblank = sti_crtc_disable_vblank,
-
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index b2778ec..3d04bfca 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -467,7 +467,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
 	bridge->of_node = dvo->dev.of_node;
 	drm_bridge_add(bridge);
 
-	err = drm_bridge_attach(encoder, bridge, NULL);
+	err = drm_bridge_attach(encoder, bridge, NULL, 0);
 	if (err) {
 		DRM_ERROR("Failed to attach bridge\n");
 		return err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 2bb3200..f3f28d7 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -701,7 +701,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
 
 	bridge->driver_private = hda;
 	bridge->funcs = &sti_hda_bridge_funcs;
-	drm_bridge_attach(encoder, bridge, NULL);
+	drm_bridge_attach(encoder, bridge, NULL, 0);
 
 	connector->encoder = encoder;
 
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 64ed102..18eaf78 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1281,7 +1281,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
 
 	bridge->driver_private = hdmi;
 	bridge->funcs = &sti_hdmi_bridge_funcs;
-	drm_bridge_attach(encoder, bridge, NULL);
+	drm_bridge_attach(encoder, bridge, NULL, 0);
 
 	connector->encoder = encoder;
 
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 5a9f9ac..ea9fcbd 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
 	.gem_prime_vmap = drm_gem_cma_prime_vmap,
 	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
 	.gem_prime_mmap = drm_gem_cma_prime_mmap,
-	.get_scanout_position = ltdc_crtc_scanoutpos,
-	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
 };
 
 static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 4b16563..2e1f266 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -377,7 +377,9 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
 	dsi->pllref_clk = devm_clk_get(dev, "ref");
 	if (IS_ERR(dsi->pllref_clk)) {
 		ret = PTR_ERR(dsi->pllref_clk);
-		DRM_ERROR("Unable to get pll reference clock: %d\n", ret);
+		if (ret != -EPROBE_DEFER)
+			DRM_ERROR("Unable to get pll reference clock: %d\n",
+				  ret);
 		goto err_clk_get;
 	}
 
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index c2815e8..df585fe 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -636,38 +636,13 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
 	}
 }
 
-static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
-	.mode_valid = ltdc_crtc_mode_valid,
-	.mode_fixup = ltdc_crtc_mode_fixup,
-	.mode_set_nofb = ltdc_crtc_mode_set_nofb,
-	.atomic_flush = ltdc_crtc_atomic_flush,
-	.atomic_enable = ltdc_crtc_atomic_enable,
-	.atomic_disable = ltdc_crtc_atomic_disable,
-};
-
-static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
+static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+					   bool in_vblank_irq,
+					   int *vpos, int *hpos,
+					   ktime_t *stime, ktime_t *etime,
+					   const struct drm_display_mode *mode)
 {
-	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
-	DRM_DEBUG_DRIVER("\n");
-	reg_set(ldev->regs, LTDC_IER, IER_LIE);
-
-	return 0;
-}
-
-static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
-{
-	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
-	DRM_DEBUG_DRIVER("\n");
-	reg_clear(ldev->regs, LTDC_IER, IER_LIE);
-}
-
-bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
-			  bool in_vblank_irq, int *vpos, int *hpos,
-			  ktime_t *stime, ktime_t *etime,
-			  const struct drm_display_mode *mode)
-{
+	struct drm_device *ddev = crtc->dev;
 	struct ltdc_device *ldev = ddev->dev_private;
 	int line, vactive_start, vactive_end, vtotal;
 
@@ -710,6 +685,39 @@ bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
 	return true;
 }
 
+static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+	.mode_valid = ltdc_crtc_mode_valid,
+	.mode_fixup = ltdc_crtc_mode_fixup,
+	.mode_set_nofb = ltdc_crtc_mode_set_nofb,
+	.atomic_flush = ltdc_crtc_atomic_flush,
+	.atomic_enable = ltdc_crtc_atomic_enable,
+	.atomic_disable = ltdc_crtc_atomic_disable,
+	.get_scanout_position = ltdc_crtc_get_scanout_position,
+};
+
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+	struct drm_crtc_state *state = crtc->state;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (state->enable)
+		reg_set(ldev->regs, LTDC_IER, IER_LIE);
+	else
+		return -EPERM;
+
+	return 0;
+}
+
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+	struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+
+	DRM_DEBUG_DRIVER("\n");
+	reg_clear(ldev->regs, LTDC_IER, IER_LIE);
+}
+
 static const struct drm_crtc_funcs ltdc_crtc_funcs = {
 	.destroy = drm_crtc_cleanup,
 	.set_config = drm_atomic_helper_set_config,
@@ -719,6 +727,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
 	.enable_vblank = ltdc_crtc_enable_vblank,
 	.disable_vblank = ltdc_crtc_disable_vblank,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 };
 
@@ -1100,7 +1109,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
 
 	drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
 
-	ret = drm_bridge_attach(encoder, bridge, NULL);
+	ret = drm_bridge_attach(encoder, bridge, NULL, 0);
 	if (ret) {
 		drm_encoder_cleanup(encoder);
 		return -EINVAL;
@@ -1146,12 +1155,14 @@ static int ltdc_get_caps(struct drm_device *ddev)
 		ldev->caps.pad_max_freq_hz = 90000000;
 		if (ldev->caps.hw_version == HWVER_10200)
 			ldev->caps.pad_max_freq_hz = 65000000;
+		ldev->caps.nb_irq = 2;
 		break;
 	case HWVER_20101:
 		ldev->caps.reg_ofs = REG_OFS_4;
 		ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
 		ldev->caps.non_alpha_only_l1 = false;
 		ldev->caps.pad_max_freq_hz = 150000000;
+		ldev->caps.nb_irq = 4;
 		break;
 	default:
 		return -ENODEV;
@@ -1251,13 +1262,21 @@ int ltdc_load(struct drm_device *ddev)
 	reg_clear(ldev->regs, LTDC_IER,
 		  IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
 
-	for (i = 0; i < MAX_IRQ; i++) {
-		irq = platform_get_irq(pdev, i);
-		if (irq == -EPROBE_DEFER)
-			goto err;
+	ret = ltdc_get_caps(ddev);
+	if (ret) {
+		DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
+			  ldev->caps.hw_version);
+		goto err;
+	}
 
-		if (irq < 0)
-			continue;
+	DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+
+	for (i = 0; i < ldev->caps.nb_irq; i++) {
+		irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			ret = irq;
+			goto err;
+		}
 
 		ret = devm_request_threaded_irq(dev, irq, ltdc_irq,
 						ltdc_irq_thread, IRQF_ONESHOT,
@@ -1268,16 +1287,6 @@ int ltdc_load(struct drm_device *ddev)
 		}
 	}
 
-
-	ret = ltdc_get_caps(ddev);
-	if (ret) {
-		DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
-			  ldev->caps.hw_version);
-		goto err;
-	}
-
-	DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
-
 	/* Add endpoints panels or bridges if any */
 	for (i = 0; i < MAX_ENDPOINTS; i++) {
 		if (panel[i]) {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index a1ad0ae..f153b90 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -19,6 +19,7 @@ struct ltdc_caps {
 	const u32 *pix_fmt_hw;	/* supported pixel formats */
 	bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
 	int pad_max_freq_hz;	/* max frequency supported by pad */
+	int nb_irq;		/* number of hardware interrupts */
 };
 
 #define LTDC_MAX_LAYER	4
@@ -39,11 +40,6 @@ struct ltdc_device {
 	struct drm_atomic_state *suspend_state;
 };
 
-bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
-			  bool in_vblank_irq, int *vpos, int *hpos,
-			  ktime_t *stime, ktime_t *etime,
-			  const struct drm_display_mode *mode);
-
 int ltdc_load(struct drm_device *ddev);
 void ltdc_unload(struct drm_device *ddev);
 void ltdc_suspend(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 65b7a87..26e5c7c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -156,7 +156,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 	}
 
 	if (bridge) {
-		ret = drm_bridge_attach(encoder, bridge, NULL);
+		ret = drm_bridge_attach(encoder, bridge, NULL, 0);
 		if (ret) {
 			dev_err(drm->dev, "Couldn't attach our bridge\n");
 			goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index b27f16a..3b23d5b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -253,7 +253,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
 	}
 
 	if (rgb->bridge) {
-		ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+		ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
 		if (ret) {
 			dev_err(drm->dev, "Couldn't attach our bridge\n");
 			goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c81cdce..624437b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -114,46 +114,71 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
 	}
 }
 
+static void sun4i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+				      const struct drm_encoder *encoder)
+{
+	regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+		     SUN4I_TCON0_LVDS_ANA0_CK_EN |
+		     SUN4I_TCON0_LVDS_ANA0_REG_V |
+		     SUN4I_TCON0_LVDS_ANA0_REG_C |
+		     SUN4I_TCON0_LVDS_ANA0_EN_MB |
+		     SUN4I_TCON0_LVDS_ANA0_PD |
+		     SUN4I_TCON0_LVDS_ANA0_DCHS);
+
+	udelay(2); /* delay at least 1200 ns */
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+			   SUN4I_TCON0_LVDS_ANA1_INIT,
+			   SUN4I_TCON0_LVDS_ANA1_INIT);
+	udelay(1); /* delay at least 120 ns */
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+			   SUN4I_TCON0_LVDS_ANA1_UPDATE,
+			   SUN4I_TCON0_LVDS_ANA1_UPDATE);
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+			   SUN4I_TCON0_LVDS_ANA0_EN_MB,
+			   SUN4I_TCON0_LVDS_ANA0_EN_MB);
+}
+
+static void sun6i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+				      const struct drm_encoder *encoder)
+{
+	u8 val;
+
+	regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+		     SUN6I_TCON0_LVDS_ANA0_C(2) |
+		     SUN6I_TCON0_LVDS_ANA0_V(3) |
+		     SUN6I_TCON0_LVDS_ANA0_PD(2) |
+		     SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+	udelay(2);
+
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+			   SUN6I_TCON0_LVDS_ANA0_EN_MB,
+			   SUN6I_TCON0_LVDS_ANA0_EN_MB);
+	udelay(2);
+
+	regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+			   SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+			   SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+	if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+		val = 7;
+	else
+		val = 0xf;
+
+	regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+			  SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+			  SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+}
+
 static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
 				       const struct drm_encoder *encoder,
 				       bool enabled)
 {
 	if (enabled) {
-		u8 val;
-
 		regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
 				   SUN4I_TCON0_LVDS_IF_EN,
 				   SUN4I_TCON0_LVDS_IF_EN);
-
-		/*
-		 * As their name suggest, these values only apply to the A31
-		 * and later SoCs. We'll have to rework this when merging
-		 * support for the older SoCs.
-		 */
-		regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
-			     SUN6I_TCON0_LVDS_ANA0_C(2) |
-			     SUN6I_TCON0_LVDS_ANA0_V(3) |
-			     SUN6I_TCON0_LVDS_ANA0_PD(2) |
-			     SUN6I_TCON0_LVDS_ANA0_EN_LDO);
-		udelay(2);
-
-		regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
-				   SUN6I_TCON0_LVDS_ANA0_EN_MB,
-				   SUN6I_TCON0_LVDS_ANA0_EN_MB);
-		udelay(2);
-
-		regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
-				   SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
-				   SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
-
-		if (sun4i_tcon_get_pixel_depth(encoder) == 18)
-			val = 7;
-		else
-			val = 0xf;
-
-		regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
-				  SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
-				  SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+		if (tcon->quirks->setup_lvds_phy)
+			tcon->quirks->setup_lvds_phy(tcon, encoder);
 	} else {
 		regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
 				   SUN4I_TCON0_LVDS_IF_EN, 0);
@@ -1453,6 +1478,16 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
 	.dclk_min_div		= 1,
 };
 
+static const struct sun4i_tcon_quirks sun7i_a20_tcon0_quirks = {
+	.supports_lvds		= true,
+	.has_channel_0		= true,
+	.has_channel_1		= true,
+	.dclk_min_div		= 4,
+	/* Same display pipeline structure as A10 */
+	.set_mux		= sun4i_a10_tcon_set_mux,
+	.setup_lvds_phy		= sun4i_tcon_setup_lvds_phy,
+};
+
 static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
 	.has_channel_0		= true,
 	.has_channel_1		= true,
@@ -1465,12 +1500,15 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
 	.has_channel_0		= true,
 	.has_lvds_alt		= true,
 	.dclk_min_div		= 1,
+	.setup_lvds_phy		= sun6i_tcon_setup_lvds_phy,
+	.supports_lvds		= true,
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
 	.supports_lvds		= true,
 	.has_channel_0		= true,
 	.dclk_min_div		= 1,
+	.setup_lvds_phy		= sun6i_tcon_setup_lvds_phy,
 };
 
 static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1505,6 +1543,8 @@ const struct of_device_id sun4i_tcon_of_table[] = {
 	{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
 	{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
 	{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
+	{ .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_tcon0_quirks },
+	{ .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks },
 	{ .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
 	{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
 	{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index a62ec82..cfbf4e6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -193,6 +193,13 @@
 #define SUN4I_TCON_MUX_CTRL_REG			0x200
 
 #define SUN4I_TCON0_LVDS_ANA0_REG		0x220
+#define SUN4I_TCON0_LVDS_ANA0_DCHS			BIT(16)
+#define SUN4I_TCON0_LVDS_ANA0_PD			(BIT(20) | BIT(21))
+#define SUN4I_TCON0_LVDS_ANA0_EN_MB			BIT(22)
+#define SUN4I_TCON0_LVDS_ANA0_REG_C			(BIT(24) | BIT(25))
+#define SUN4I_TCON0_LVDS_ANA0_REG_V			(BIT(26) | BIT(27))
+#define SUN4I_TCON0_LVDS_ANA0_CK_EN			(BIT(29) | BIT(28))
+
 #define SUN6I_TCON0_LVDS_ANA0_EN_MB			BIT(31)
 #define SUN6I_TCON0_LVDS_ANA0_EN_LDO			BIT(30)
 #define SUN6I_TCON0_LVDS_ANA0_EN_DRVC			BIT(24)
@@ -201,6 +208,10 @@
 #define SUN6I_TCON0_LVDS_ANA0_V(x)			(((x) & 3) << 8)
 #define SUN6I_TCON0_LVDS_ANA0_PD(x)			(((x) & 3) << 4)
 
+#define SUN4I_TCON0_LVDS_ANA1_REG		0x224
+#define SUN4I_TCON0_LVDS_ANA1_INIT			(0x1f << 26 | 0x1f << 10)
+#define SUN4I_TCON0_LVDS_ANA1_UPDATE			(0x1f << 16 | 0x1f << 00)
+
 #define SUN4I_TCON1_FILL_CTL_REG		0x300
 #define SUN4I_TCON1_FILL_BEG0_REG		0x304
 #define SUN4I_TCON1_FILL_END0_REG		0x308
@@ -228,6 +239,9 @@ struct sun4i_tcon_quirks {
 
 	/* callback to handle tcon muxing options */
 	int	(*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
+	/* handler for LVDS setup routine */
+	void	(*setup_lvds_phy)(struct sun4i_tcon *tcon,
+				  const struct drm_encoder *encoder);
 };
 
 struct sun4i_tcon {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a75fcb1..0599397 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -14,7 +14,6 @@
 #include <linux/phy/phy-mipi-dphy.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
@@ -27,7 +26,6 @@
 #include <drm/drm_probe_helper.h>
 
 #include "sun4i_crtc.h"
-#include "sun4i_drv.h"
 #include "sun4i_tcon.h"
 #include "sun6i_mipi_dsi.h"
 
@@ -722,10 +720,31 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
 	union phy_configure_opts opts = { 0 };
 	struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
 	u16 delay;
+	int err;
 
 	DRM_DEBUG_DRIVER("Enabling DSI output\n");
 
-	pm_runtime_get_sync(dsi->dev);
+	err = regulator_enable(dsi->regulator);
+	if (err)
+		dev_warn(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+
+	reset_control_deassert(dsi->reset);
+	clk_prepare_enable(dsi->mod_clk);
+
+	/*
+	 * Enable the DSI block.
+	 */
+	regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+	regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+		     SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+	regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+	regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+	sun6i_dsi_inst_init(dsi, dsi->device);
+
+	regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
 
 	delay = sun6i_dsi_get_video_start_delay(dsi, mode);
 	regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
@@ -749,7 +768,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
 	phy_configure(dsi->dphy, &opts);
 	phy_power_on(dsi->dphy);
 
-	if (!IS_ERR(dsi->panel))
+	if (dsi->panel)
 		drm_panel_prepare(dsi->panel);
 
 	/*
@@ -764,7 +783,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
 	 * ordering on the panels I've tested it with, so I guess this
 	 * will do for now, until that IP is better understood.
 	 */
-	if (!IS_ERR(dsi->panel))
+	if (dsi->panel)
 		drm_panel_enable(dsi->panel);
 
 	sun6i_dsi_start(dsi, DSI_START_HSC);
@@ -780,7 +799,7 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
 
 	DRM_DEBUG_DRIVER("Disabling DSI output\n");
 
-	if (!IS_ERR(dsi->panel)) {
+	if (dsi->panel) {
 		drm_panel_disable(dsi->panel);
 		drm_panel_unprepare(dsi->panel);
 	}
@@ -788,7 +807,9 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
 	phy_power_off(dsi->dphy);
 	phy_exit(dsi->dphy);
 
-	pm_runtime_put(dsi->dev);
+	clk_disable_unprepare(dsi->mod_clk);
+	reset_control_assert(dsi->reset);
+	regulator_disable(dsi->regulator);
 }
 
 static int sun6i_dsi_get_modes(struct drm_connector *connector)
@@ -805,7 +826,10 @@ static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
 static enum drm_connector_status
 sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
 {
-	return connector_status_connected;
+	struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+	return dsi->panel ? connector_status_connected :
+			    connector_status_disconnected;
 }
 
 static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
@@ -942,11 +966,18 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
 			    struct mipi_dsi_device *device)
 {
 	struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+	struct drm_panel *panel = of_drm_find_panel(device->dev.of_node);
 
+	if (IS_ERR(panel))
+		return PTR_ERR(panel);
+	if (!dsi->drm || !dsi->drm->registered)
+		return -EPROBE_DEFER;
+
+	dsi->panel = panel;
 	dsi->device = device;
-	dsi->panel = of_drm_find_panel(device->dev.of_node);
-	if (IS_ERR(dsi->panel))
-		return PTR_ERR(dsi->panel);
+
+	drm_panel_attach(dsi->panel, &dsi->connector);
+	drm_kms_helper_hotplug_event(dsi->drm);
 
 	dev_info(host->dev, "Attached device %s\n", device->name);
 
@@ -957,10 +988,14 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host,
 			    struct mipi_dsi_device *device)
 {
 	struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+	struct drm_panel *panel = dsi->panel;
 
 	dsi->panel = NULL;
 	dsi->device = NULL;
 
+	drm_panel_detach(panel);
+	drm_kms_helper_hotplug_event(dsi->drm);
+
 	return 0;
 }
 
@@ -1022,15 +1057,9 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
 			 void *data)
 {
 	struct drm_device *drm = data;
-	struct sun4i_drv *drv = drm->dev_private;
 	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
 	int ret;
 
-	if (!dsi->panel)
-		return -EPROBE_DEFER;
-
-	dsi->drv = drv;
-
 	drm_encoder_helper_add(&dsi->encoder,
 			       &sun6i_dsi_enc_helper_funcs);
 	ret = drm_encoder_init(drm,
@@ -1056,7 +1085,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
 	}
 
 	drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
-	drm_panel_attach(dsi->panel, &dsi->connector);
+
+	dsi->drm = drm;
 
 	return 0;
 
@@ -1070,7 +1100,7 @@ static void sun6i_dsi_unbind(struct device *dev, struct device *master,
 {
 	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
 
-	drm_panel_detach(dsi->panel);
+	dsi->drm = NULL;
 }
 
 static const struct component_ops sun6i_dsi_ops = {
@@ -1157,12 +1187,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
 		goto err_unprotect_clk;
 	}
 
-	pm_runtime_enable(dev);
-
 	ret = mipi_dsi_host_register(&dsi->host);
 	if (ret) {
 		dev_err(dev, "Couldn't register MIPI-DSI host\n");
-		goto err_pm_disable;
+		goto err_unprotect_clk;
 	}
 
 	ret = component_add(&pdev->dev, &sun6i_dsi_ops);
@@ -1175,8 +1203,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
 
 err_remove_dsi_host:
 	mipi_dsi_host_unregister(&dsi->host);
-err_pm_disable:
-	pm_runtime_disable(dev);
 err_unprotect_clk:
 	clk_rate_exclusive_put(dsi->mod_clk);
 err_attach_clk:
@@ -1192,7 +1218,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
 
 	component_del(&pdev->dev, &sun6i_dsi_ops);
 	mipi_dsi_host_unregister(&dsi->host);
-	pm_runtime_disable(dev);
 	clk_rate_exclusive_put(dsi->mod_clk);
 
 	if (!IS_ERR(dsi->bus_clk))
@@ -1201,59 +1226,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
-{
-	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
-	int err;
-
-	err = regulator_enable(dsi->regulator);
-	if (err) {
-		dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
-		return err;
-	}
-
-	reset_control_deassert(dsi->reset);
-	clk_prepare_enable(dsi->mod_clk);
-
-	/*
-	 * Enable the DSI block.
-	 *
-	 * Some part of it can only be done once we get a number of
-	 * lanes, see sun6i_dsi_inst_init
-	 */
-	regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
-
-	regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
-		     SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
-
-	regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
-	regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
-
-	if (dsi->device)
-		sun6i_dsi_inst_init(dsi, dsi->device);
-
-	regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
-
-	return 0;
-}
-
-static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
-{
-	struct sun6i_dsi *dsi = dev_get_drvdata(dev);
-
-	clk_disable_unprepare(dsi->mod_clk);
-	reset_control_assert(dsi->reset);
-	regulator_disable(dsi->regulator);
-
-	return 0;
-}
-
-static const struct dev_pm_ops sun6i_dsi_pm_ops = {
-	SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
-			   sun6i_dsi_runtime_resume,
-			   NULL)
-};
-
 static const struct of_device_id sun6i_dsi_of_table[] = {
 	{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
 	{ .compatible = "allwinner,sun50i-a64-mipi-dsi" },
@@ -1267,7 +1239,6 @@ static struct platform_driver sun6i_dsi_platform_driver = {
 	.driver		= {
 		.name		= "sun6i-mipi-dsi",
 		.of_match_table	= sun6i_dsi_of_table,
-		.pm		= &sun6i_dsi_pm_ops,
 	},
 };
 module_platform_driver(sun6i_dsi_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 3f4846f..c863900 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -28,8 +28,8 @@ struct sun6i_dsi {
 	struct phy		*dphy;
 
 	struct device		*dev;
-	struct sun4i_drv	*drv;
 	struct mipi_dsi_device	*device;
+	struct drm_device	*drm;
 	struct drm_panel	*panel;
 };
 
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 7c70fd3..1a7b08f 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2503,7 +2503,6 @@ static int tegra_dc_couple(struct tegra_dc *dc)
 
 static int tegra_dc_probe(struct platform_device *pdev)
 {
-	struct resource *regs;
 	struct tegra_dc *dc;
 	int err;
 
@@ -2560,8 +2559,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
 		tegra_powergate_power_off(dc->powergate);
 	}
 
-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+	dc->regs = devm_platform_ioremap_resource(pdev, 0);
 	if (IS_ERR(dc->regs))
 		return PTR_ERR(dc->regs);
 
@@ -2573,7 +2571,13 @@ static int tegra_dc_probe(struct platform_device *pdev)
 
 	err = tegra_dc_rgb_probe(dc);
 	if (err < 0 && err != -ENODEV) {
-		dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+		const char *level = KERN_ERR;
+
+		if (err == -EPROBE_DEFER)
+			level = KERN_DEBUG;
+
+		dev_printk(level, dc->dev, "failed to probe RGB output: %d\n",
+			   err);
 		return err;
 	}
 
@@ -2588,10 +2592,16 @@ static int tegra_dc_probe(struct platform_device *pdev)
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
-		return err;
+		goto disable_pm;
 	}
 
 	return 0;
+
+disable_pm:
+	pm_runtime_disable(&pdev->dev);
+	tegra_dc_rgb_remove(dc);
+
+	return err;
 }
 
 static int tegra_dc_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 84f0e01..b8a328f 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -314,19 +314,13 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
 	struct drm_device *drm = fbdev->base.dev;
 	int err;
 
-	err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
+	err = drm_fb_helper_init(drm, &fbdev->base);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
 			err);
 		return err;
 	}
 
-	err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
-	if (err < 0) {
-		dev_err(drm->dev, "failed to add connectors: %d\n", err);
-		goto fini;
-	}
-
 	err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
 	if (err < 0) {
 		dev_err(drm->dev, "failed to set initial configuration: %d\n",
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6f11762..38252c0 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1648,6 +1648,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
 
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
+	const char *level = KERN_ERR;
 	struct tegra_hdmi *hdmi;
 	struct resource *regs;
 	int err;
@@ -1686,21 +1687,36 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 	}
 
 	hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
-	if (IS_ERR(hdmi->hdmi)) {
-		dev_err(&pdev->dev, "failed to get HDMI regulator\n");
-		return PTR_ERR(hdmi->hdmi);
+	err = PTR_ERR_OR_ZERO(hdmi->hdmi);
+	if (err) {
+		if (err == -EPROBE_DEFER)
+			level = KERN_DEBUG;
+
+		dev_printk(level, &pdev->dev,
+			   "failed to get HDMI regulator: %d\n", err);
+		return err;
 	}
 
 	hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
-	if (IS_ERR(hdmi->pll)) {
-		dev_err(&pdev->dev, "failed to get PLL regulator\n");
-		return PTR_ERR(hdmi->pll);
+	err = PTR_ERR_OR_ZERO(hdmi->pll);
+	if (err) {
+		if (err == -EPROBE_DEFER)
+			level = KERN_DEBUG;
+
+		dev_printk(level, &pdev->dev,
+			   "failed to get PLL regulator: %d\n", err);
+		return err;
 	}
 
 	hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(hdmi->vdd)) {
-		dev_err(&pdev->dev, "failed to get VDD regulator\n");
-		return PTR_ERR(hdmi->vdd);
+	err = PTR_ERR_OR_ZERO(hdmi->vdd);
+	if (err) {
+		if (err == -EPROBE_DEFER)
+			level = KERN_DEBUG;
+
+		dev_printk(level, &pdev->dev,
+			   "failed to get VDD regulator: %d\n", err);
+		return err;
 	}
 
 	hdmi->output.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
new file mode 100644
index 0000000..f790a52
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -0,0 +1,14 @@
+config DRM_TIDSS
+	tristate "DRM Support for TI Keystone"
+	depends on DRM && OF
+	depends on ARM || ARM64 || COMPILE_TEST
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	help
+	  The TI Keystone family SoCs introduced a new generation of
+	  Display SubSystem. There is currently three Keystone family
+	  SoCs released with DSS. Each with somewhat different version
+	  of it. The SoCs are 66AK2Gx, AM65x, and J721E. Set this to Y
+	  or M to add display support for TI Keystone family
+	  platforms.
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
new file mode 100644
index 0000000..3126452
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+tidss-y := tidss_crtc.o \
+	tidss_drv.o \
+	tidss_encoder.o \
+	tidss_kms.o \
+	tidss_irq.o \
+	tidss_plane.o \
+	tidss_scale_coefs.o \
+	tidss_dispc.o
+
+obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
new file mode 100644
index 0000000..d4ce9ba
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* Page flip and frame done IRQs */
+
+static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
+{
+	struct drm_device *ddev = tcrtc->crtc.dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
+	bool busy;
+
+	spin_lock_irqsave(&ddev->event_lock, flags);
+
+	/*
+	 * New settings are taken into use at VFP, and GO bit is cleared at
+	 * the same time. This happens before the vertical blank interrupt.
+	 * So there is a small change that the driver sets GO bit after VFP, but
+	 * before vblank, and we have to check for that case here.
+	 */
+	busy = dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport);
+	if (busy) {
+		spin_unlock_irqrestore(&ddev->event_lock, flags);
+		return;
+	}
+
+	event = tcrtc->event;
+	tcrtc->event = NULL;
+
+	if (!event) {
+		spin_unlock_irqrestore(&ddev->event_lock, flags);
+		return;
+	}
+
+	drm_crtc_send_vblank_event(&tcrtc->crtc, event);
+
+	spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+	drm_crtc_vblank_put(&tcrtc->crtc);
+}
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+	drm_crtc_handle_vblank(crtc);
+
+	tidss_crtc_finish_page_flip(tcrtc);
+}
+
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+	complete(&tcrtc->framedone_completion);
+}
+
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+	dev_err_ratelimited(crtc->dev->dev, "CRTC%u SYNC LOST: (irq %llx)\n",
+			    tcrtc->hw_videoport, irqstatus);
+}
+
+/* drm_crtc_helper_funcs */
+
+static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
+				   struct drm_crtc_state *state)
+{
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct dispc_device *dispc = tidss->dispc;
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	u32 hw_videoport = tcrtc->hw_videoport;
+	const struct drm_display_mode *mode;
+	enum drm_mode_status ok;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	if (!state->enable)
+		return 0;
+
+	mode = &state->adjusted_mode;
+
+	ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
+	if (ok != MODE_OK) {
+		dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+			__func__, mode->hdisplay, mode->vdisplay, mode->clock);
+		return -EINVAL;
+	}
+
+	return dispc_vp_bus_check(dispc, hw_videoport, state);
+}
+
+/*
+ * This needs all affected planes to be present in the atomic
+ * state. The untouched planes are added to the state in
+ * tidss_atomic_check().
+ */
+static void tidss_crtc_position_planes(struct tidss_device *tidss,
+				       struct drm_crtc *crtc,
+				       struct drm_crtc_state *old_state,
+				       bool newmodeset)
+{
+	struct drm_atomic_state *ostate = old_state->state;
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	struct drm_crtc_state *cstate = crtc->state;
+	int layer;
+
+	if (!newmodeset && !cstate->zpos_changed &&
+	    !to_tidss_crtc_state(cstate)->plane_pos_changed)
+		return;
+
+	for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+		struct drm_plane_state *pstate;
+		struct drm_plane *plane;
+		bool layer_active = false;
+		int i;
+
+		for_each_new_plane_in_state(ostate, plane, pstate, i) {
+			if (pstate->crtc != crtc || !pstate->visible)
+				continue;
+
+			if (pstate->normalized_zpos == layer) {
+				layer_active = true;
+				break;
+			}
+		}
+
+		if (layer_active) {
+			struct tidss_plane *tplane = to_tidss_plane(plane);
+
+			dispc_ovr_set_plane(tidss->dispc, tplane->hw_plane_id,
+					    tcrtc->hw_videoport,
+					    pstate->crtc_x, pstate->crtc_y,
+					    layer);
+		}
+		dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+				       layer_active);
+	}
+}
+
+static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	unsigned long flags;
+
+	dev_dbg(ddev->dev,
+		"%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+		crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+		crtc->state->enable, crtc->state->event);
+
+	/* There is nothing to do if CRTC is not going to be enabled. */
+	if (!crtc->state->enable)
+		return;
+
+	/*
+	 * Flush CRTC changes with go bit only if new modeset is not
+	 * coming, so CRTC is enabled trough out the commit.
+	 */
+	if (drm_atomic_crtc_needs_modeset(crtc->state))
+		return;
+
+	/* If the GO bit is stuck we better quit here. */
+	if (WARN_ON(dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport)))
+		return;
+
+	/* We should have event if CRTC is enabled through out this commit. */
+	if (WARN_ON(!crtc->state->event))
+		return;
+
+	/* Write vp properties to HW if needed. */
+	dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, false);
+
+	/* Update plane positions if needed. */
+	tidss_crtc_position_planes(tidss, crtc, old_crtc_state, false);
+
+	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+	spin_lock_irqsave(&ddev->event_lock, flags);
+	dispc_vp_go(tidss->dispc, tcrtc->hw_videoport);
+
+	WARN_ON(tcrtc->event);
+
+	tcrtc->event = crtc->state->event;
+	crtc->state->event = NULL;
+
+	spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_state)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+	unsigned long flags;
+	int r;
+
+	dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+	tidss_runtime_get(tidss);
+
+	r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
+				  mode->clock * 1000);
+	if (r != 0)
+		return;
+
+	r = dispc_vp_enable_clk(tidss->dispc, tcrtc->hw_videoport);
+	if (r != 0)
+		return;
+
+	dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, true);
+	tidss_crtc_position_planes(tidss, crtc, old_state, true);
+
+	/* Turn vertical blanking interrupt reporting on. */
+	drm_crtc_vblank_on(crtc);
+
+	dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+	dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+	spin_lock_irqsave(&ddev->event_lock, flags);
+
+	if (crtc->state->event) {
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		crtc->state->event = NULL;
+	}
+
+	spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_state)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	unsigned long flags;
+
+	dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+	reinit_completion(&tcrtc->framedone_completion);
+
+	dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+
+	if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+					 msecs_to_jiffies(500)))
+		dev_err(tidss->dev, "Timeout waiting for framedone on crtc %d",
+			tcrtc->hw_videoport);
+
+	dispc_vp_unprepare(tidss->dispc, tcrtc->hw_videoport);
+
+	spin_lock_irqsave(&ddev->event_lock, flags);
+	if (crtc->state->event) {
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
+		crtc->state->event = NULL;
+	}
+	spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+	drm_crtc_vblank_off(crtc);
+
+	dispc_vp_disable_clk(tidss->dispc, tcrtc->hw_videoport);
+
+	tidss_runtime_put(tidss);
+}
+
+static
+enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
+					   const struct drm_display_mode *mode)
+{
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+
+	return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
+}
+
+static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
+	.atomic_check = tidss_crtc_atomic_check,
+	.atomic_flush = tidss_crtc_atomic_flush,
+	.atomic_enable = tidss_crtc_atomic_enable,
+	.atomic_disable = tidss_crtc_atomic_disable,
+
+	.mode_valid = tidss_crtc_mode_valid,
+};
+
+/* drm_crtc_funcs */
+
+static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	tidss_runtime_get(tidss);
+
+	tidss_irq_enable_vblank(crtc);
+
+	return 0;
+}
+
+static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	tidss_irq_disable_vblank(crtc);
+
+	tidss_runtime_put(tidss);
+}
+
+static void tidss_crtc_reset(struct drm_crtc *crtc)
+{
+	struct tidss_crtc_state *tcrtc;
+
+	if (crtc->state)
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+	kfree(crtc->state);
+
+	tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
+	if (!tcrtc) {
+		crtc->state = NULL;
+		return;
+	}
+
+	crtc->state = &tcrtc->base;
+	crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct tidss_crtc_state *state, *current_state;
+
+	if (WARN_ON(!crtc->state))
+		return NULL;
+
+	current_state = to_tidss_crtc_state(crtc->state);
+
+	state = kmalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return NULL;
+
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+	state->plane_pos_changed = false;
+
+	state->bus_format = current_state->bus_format;
+	state->bus_flags = current_state->bus_flags;
+
+	return &state->base;
+}
+
+static const struct drm_crtc_funcs tidss_crtc_funcs = {
+	.reset = tidss_crtc_reset,
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.atomic_duplicate_state = tidss_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.enable_vblank = tidss_crtc_enable_vblank,
+	.disable_vblank = tidss_crtc_disable_vblank,
+};
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+				     u32 hw_videoport,
+				     struct drm_plane *primary)
+{
+	struct tidss_crtc *tcrtc;
+	struct drm_crtc *crtc;
+	unsigned int gamma_lut_size = 0;
+	bool has_ctm = tidss->feat->vp_feat.color.has_ctm;
+	int ret;
+
+	tcrtc = devm_kzalloc(tidss->dev, sizeof(*tcrtc), GFP_KERNEL);
+	if (!tcrtc)
+		return ERR_PTR(-ENOMEM);
+
+	tcrtc->hw_videoport = hw_videoport;
+	init_completion(&tcrtc->framedone_completion);
+
+	crtc =  &tcrtc->crtc;
+
+	ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary,
+					NULL, &tidss_crtc_funcs, NULL);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs);
+
+	/*
+	 * The dispc gamma functions adapt to what ever size we ask
+	 * from it no matter what HW supports. X-server assumes 256
+	 * element gamma tables so lets use that.
+	 */
+	if (tidss->feat->vp_feat.color.gamma_size)
+		gamma_lut_size = 256;
+
+	drm_crtc_enable_color_mgmt(crtc, 0, has_ctm, gamma_lut_size);
+	if (gamma_lut_size)
+		drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+
+	return tcrtc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.h b/drivers/gpu/drm/tidss/tidss_crtc.h
new file mode 100644
index 0000000..09e7736
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_CRTC_H__
+#define __TIDSS_CRTC_H__
+
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+
+#define to_tidss_crtc(c) container_of((c), struct tidss_crtc, crtc)
+
+struct tidss_device;
+
+struct tidss_crtc {
+	struct drm_crtc crtc;
+
+	u32 hw_videoport;
+
+	struct drm_pending_vblank_event *event;
+
+	struct completion framedone_completion;
+};
+
+#define to_tidss_crtc_state(x) container_of(x, struct tidss_crtc_state, base)
+
+struct tidss_crtc_state {
+	/* Must be first. */
+	struct drm_crtc_state base;
+
+	bool plane_pos_changed;
+
+	u32 bus_format;
+	u32 bus_flags;
+};
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc);
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc);
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus);
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+				     u32 hw_videoport,
+				     struct drm_plane *primary);
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
new file mode 100644
index 0000000..29f4276
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+#include "tidss_dispc_regs.h"
+#include "tidss_scale_coefs.h"
+
+static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+	[DSS_REVISION_OFF] =                    0x00,
+	[DSS_SYSCONFIG_OFF] =                   0x04,
+	[DSS_SYSSTATUS_OFF] =                   0x08,
+	[DISPC_IRQ_EOI_OFF] =                   0x20,
+	[DISPC_IRQSTATUS_RAW_OFF] =             0x24,
+	[DISPC_IRQSTATUS_OFF] =                 0x28,
+	[DISPC_IRQENABLE_SET_OFF] =             0x2c,
+	[DISPC_IRQENABLE_CLR_OFF] =             0x30,
+
+	[DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] =    0x40,
+	[DISPC_GLOBAL_BUFFER_OFF] =             0x44,
+
+	[DISPC_DBG_CONTROL_OFF] =               0x4c,
+	[DISPC_DBG_STATUS_OFF] =                0x50,
+
+	[DISPC_CLKGATING_DISABLE_OFF] =         0x54,
+};
+
+const struct dispc_features dispc_k2g_feats = {
+	.min_pclk_khz = 4375,
+
+	.max_pclk_khz = {
+		[DISPC_VP_DPI] = 150000,
+	},
+
+	/*
+	 * XXX According TRM the RGB input buffer width up to 2560 should
+	 *     work on 3 taps, but in practice it only works up to 1280.
+	 */
+	.scaling = {
+		.in_width_max_5tap_rgb = 1280,
+		.in_width_max_3tap_rgb = 1280,
+		.in_width_max_5tap_yuv = 2560,
+		.in_width_max_3tap_yuv = 2560,
+		.upscale_limit = 16,
+		.downscale_limit_5tap = 4,
+		.downscale_limit_3tap = 2,
+		/*
+		 * The max supported pixel inc value is 255. The value
+		 * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+		 * The maximum bpp of all formats supported by the HW
+		 * is 8. So the maximum supported xinc value is 32,
+		 * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+		 */
+		.xinc_max = 32,
+	},
+
+	.subrev = DISPC_K2G,
+
+	.common = "common",
+
+	.common_regs = tidss_k2g_common_regs,
+
+	.num_vps = 1,
+	.vp_name = { "vp1" },
+	.ovr_name = { "ovr1" },
+	.vpclk_name =  { "vp1" },
+	.vp_bus_type = { DISPC_VP_DPI },
+
+	.vp_feat = { .color = {
+			.has_ctm = true,
+			.gamma_size = 256,
+			.gamma_type = TIDSS_GAMMA_8BIT,
+		},
+	},
+
+	.num_planes = 1,
+	.vid_name = { "vid1" },
+	.vid_lite = { false },
+	.vid_order = { 0 },
+};
+
+static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+	[DSS_REVISION_OFF] =			0x4,
+	[DSS_SYSCONFIG_OFF] =			0x8,
+	[DSS_SYSSTATUS_OFF] =			0x20,
+	[DISPC_IRQ_EOI_OFF] =			0x24,
+	[DISPC_IRQSTATUS_RAW_OFF] =		0x28,
+	[DISPC_IRQSTATUS_OFF] =			0x2c,
+	[DISPC_IRQENABLE_SET_OFF] =		0x30,
+	[DISPC_IRQENABLE_CLR_OFF] =		0x40,
+	[DISPC_VID_IRQENABLE_OFF] =		0x44,
+	[DISPC_VID_IRQSTATUS_OFF] =		0x58,
+	[DISPC_VP_IRQENABLE_OFF] =		0x70,
+	[DISPC_VP_IRQSTATUS_OFF] =		0x7c,
+
+	[WB_IRQENABLE_OFF] =			0x88,
+	[WB_IRQSTATUS_OFF] =			0x8c,
+
+	[DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] =	0x90,
+	[DISPC_GLOBAL_OUTPUT_ENABLE_OFF] =	0x94,
+	[DISPC_GLOBAL_BUFFER_OFF] =		0x98,
+	[DSS_CBA_CFG_OFF] =			0x9c,
+	[DISPC_DBG_CONTROL_OFF] =		0xa0,
+	[DISPC_DBG_STATUS_OFF] =		0xa4,
+	[DISPC_CLKGATING_DISABLE_OFF] =		0xa8,
+	[DISPC_SECURE_DISABLE_OFF] =		0xac,
+};
+
+const struct dispc_features dispc_am65x_feats = {
+	.max_pclk_khz = {
+		[DISPC_VP_DPI] = 165000,
+		[DISPC_VP_OLDI] = 165000,
+	},
+
+	.scaling = {
+		.in_width_max_5tap_rgb = 1280,
+		.in_width_max_3tap_rgb = 2560,
+		.in_width_max_5tap_yuv = 2560,
+		.in_width_max_3tap_yuv = 4096,
+		.upscale_limit = 16,
+		.downscale_limit_5tap = 4,
+		.downscale_limit_3tap = 2,
+		/*
+		 * The max supported pixel inc value is 255. The value
+		 * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+		 * The maximum bpp of all formats supported by the HW
+		 * is 8. So the maximum supported xinc value is 32,
+		 * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+		 */
+		.xinc_max = 32,
+	},
+
+	.subrev = DISPC_AM65X,
+
+	.common = "common",
+	.common_regs = tidss_am65x_common_regs,
+
+	.num_vps = 2,
+	.vp_name = { "vp1", "vp2" },
+	.ovr_name = { "ovr1", "ovr2" },
+	.vpclk_name =  { "vp1", "vp2" },
+	.vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+
+	.vp_feat = { .color = {
+			.has_ctm = true,
+			.gamma_size = 256,
+			.gamma_type = TIDSS_GAMMA_8BIT,
+		},
+	},
+
+	.num_planes = 2,
+	/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+	.vid_name = { "vid", "vidl1" },
+	.vid_lite = { false, true, },
+	.vid_order = { 1, 0 },
+
+	.errata = {
+		.i2000 = true,
+	},
+};
+
+static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+	[DSS_REVISION_OFF] =			0x4,
+	[DSS_SYSCONFIG_OFF] =			0x8,
+	[DSS_SYSSTATUS_OFF] =			0x20,
+	[DISPC_IRQ_EOI_OFF] =			0x80,
+	[DISPC_IRQSTATUS_RAW_OFF] =		0x28,
+	[DISPC_IRQSTATUS_OFF] =			0x2c,
+	[DISPC_IRQENABLE_SET_OFF] =		0x30,
+	[DISPC_IRQENABLE_CLR_OFF] =		0x34,
+	[DISPC_VID_IRQENABLE_OFF] =		0x38,
+	[DISPC_VID_IRQSTATUS_OFF] =		0x48,
+	[DISPC_VP_IRQENABLE_OFF] =		0x58,
+	[DISPC_VP_IRQSTATUS_OFF] =		0x68,
+
+	[WB_IRQENABLE_OFF] =			0x78,
+	[WB_IRQSTATUS_OFF] =			0x7c,
+
+	[DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] =	0x98,
+	[DISPC_GLOBAL_OUTPUT_ENABLE_OFF] =	0x9c,
+	[DISPC_GLOBAL_BUFFER_OFF] =		0xa0,
+	[DSS_CBA_CFG_OFF] =			0xa4,
+	[DISPC_DBG_CONTROL_OFF] =		0xa8,
+	[DISPC_DBG_STATUS_OFF] =		0xac,
+	[DISPC_CLKGATING_DISABLE_OFF] =		0xb0,
+	[DISPC_SECURE_DISABLE_OFF] =		0x90,
+
+	[FBDC_REVISION_1_OFF] =			0xb8,
+	[FBDC_REVISION_2_OFF] =			0xbc,
+	[FBDC_REVISION_3_OFF] =			0xc0,
+	[FBDC_REVISION_4_OFF] =			0xc4,
+	[FBDC_REVISION_5_OFF] =			0xc8,
+	[FBDC_REVISION_6_OFF] =			0xcc,
+	[FBDC_COMMON_CONTROL_OFF] =		0xd0,
+	[FBDC_CONSTANT_COLOR_0_OFF] =		0xd4,
+	[FBDC_CONSTANT_COLOR_1_OFF] =		0xd8,
+	[DISPC_CONNECTIONS_OFF] =		0xe4,
+	[DISPC_MSS_VP1_OFF] =			0xe8,
+	[DISPC_MSS_VP3_OFF] =			0xec,
+};
+
+const struct dispc_features dispc_j721e_feats = {
+	.max_pclk_khz = {
+		[DISPC_VP_DPI] = 170000,
+		[DISPC_VP_INTERNAL] = 600000,
+	},
+
+	.scaling = {
+		.in_width_max_5tap_rgb = 2048,
+		.in_width_max_3tap_rgb = 4096,
+		.in_width_max_5tap_yuv = 4096,
+		.in_width_max_3tap_yuv = 4096,
+		.upscale_limit = 16,
+		.downscale_limit_5tap = 4,
+		.downscale_limit_3tap = 2,
+		/*
+		 * The max supported pixel inc value is 255. The value
+		 * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+		 * The maximum bpp of all formats supported by the HW
+		 * is 8. So the maximum supported xinc value is 32,
+		 * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+		 */
+		.xinc_max = 32,
+	},
+
+	.subrev = DISPC_J721E,
+
+	.common = "common_m",
+	.common_regs = tidss_j721e_common_regs,
+
+	.num_vps = 4,
+	.vp_name = { "vp1", "vp2", "vp3", "vp4" },
+	.ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
+	.vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
+	/* Currently hard coded VP routing (see dispc_initial_config()) */
+	.vp_bus_type =	{ DISPC_VP_INTERNAL, DISPC_VP_DPI,
+			  DISPC_VP_INTERNAL, DISPC_VP_DPI, },
+	.vp_feat = { .color = {
+			.has_ctm = true,
+			.gamma_size = 1024,
+			.gamma_type = TIDSS_GAMMA_10BIT,
+		},
+	},
+	.num_planes = 4,
+	.vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
+	.vid_lite = { 0, 1, 0, 1, },
+	.vid_order = { 1, 3, 0, 2 },
+};
+
+static const u16 *dispc_common_regmap;
+
+struct dss_vp_data {
+	u32 *gamma_table;
+};
+
+struct dispc_device {
+	struct tidss_device *tidss;
+	struct device *dev;
+
+	void __iomem *base_common;
+	void __iomem *base_vid[TIDSS_MAX_PLANES];
+	void __iomem *base_ovr[TIDSS_MAX_PORTS];
+	void __iomem *base_vp[TIDSS_MAX_PORTS];
+
+	struct regmap *oldi_io_ctrl;
+
+	struct clk *vp_clk[TIDSS_MAX_PORTS];
+
+	const struct dispc_features *feat;
+
+	struct clk *fclk;
+
+	bool is_enabled;
+
+	struct dss_vp_data vp_data[TIDSS_MAX_PORTS];
+
+	u32 *fourccs;
+	u32 num_fourccs;
+
+	u32 memory_bandwidth_limit;
+};
+
+static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val)
+{
+	iowrite32(val, dispc->base_common + reg);
+}
+
+static u32 dispc_read(struct dispc_device *dispc, u16 reg)
+{
+	return ioread32(dispc->base_common + reg);
+}
+
+static
+void dispc_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val)
+{
+	void __iomem *base = dispc->base_vid[hw_plane];
+
+	iowrite32(val, base + reg);
+}
+
+static u32 dispc_vid_read(struct dispc_device *dispc, u32 hw_plane, u16 reg)
+{
+	void __iomem *base = dispc->base_vid[hw_plane];
+
+	return ioread32(base + reg);
+}
+
+static void dispc_ovr_write(struct dispc_device *dispc, u32 hw_videoport,
+			    u16 reg, u32 val)
+{
+	void __iomem *base = dispc->base_ovr[hw_videoport];
+
+	iowrite32(val, base + reg);
+}
+
+static u32 dispc_ovr_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+	void __iomem *base = dispc->base_ovr[hw_videoport];
+
+	return ioread32(base + reg);
+}
+
+static void dispc_vp_write(struct dispc_device *dispc, u32 hw_videoport,
+			   u16 reg, u32 val)
+{
+	void __iomem *base = dispc->base_vp[hw_videoport];
+
+	iowrite32(val, base + reg);
+}
+
+static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+	void __iomem *base = dispc->base_vp[hw_videoport];
+
+	return ioread32(base + reg);
+}
+
+/*
+ * TRM gives bitfields as start:end, where start is the higher bit
+ * number. For example 7:0
+ */
+
+static u32 FLD_MASK(u32 start, u32 end)
+{
+	return ((1 << (start - end + 1)) - 1) << end;
+}
+
+static u32 FLD_VAL(u32 val, u32 start, u32 end)
+{
+	return (val << end) & FLD_MASK(start, end);
+}
+
+static u32 FLD_GET(u32 val, u32 start, u32 end)
+{
+	return (val & FLD_MASK(start, end)) >> end;
+}
+
+static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
+{
+	return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
+}
+
+static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
+{
+	return FLD_GET(dispc_read(dispc, idx), start, end);
+}
+
+static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
+			u32 start, u32 end)
+{
+	dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
+					start, end));
+}
+
+static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+		       u32 start, u32 end)
+{
+	return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
+}
+
+static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+			    u32 val, u32 start, u32 end)
+{
+	dispc_vid_write(dispc, hw_plane, idx,
+			FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
+				val, start, end));
+}
+
+static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
+		      u32 start, u32 end)
+{
+	return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
+}
+
+static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
+			   u32 start, u32 end)
+{
+	dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
+					       val, start, end));
+}
+
+__maybe_unused
+static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
+		       u32 start, u32 end)
+{
+	return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
+}
+
+static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
+			    u32 val, u32 start, u32 end)
+{
+	dispc_ovr_write(dispc, ovr, idx,
+			FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
+				val, start, end));
+}
+
+static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
+{
+	dispc_irq_t vp_stat = 0;
+
+	if (stat & BIT(0))
+		vp_stat |= DSS_IRQ_VP_FRAME_DONE(hw_videoport);
+	if (stat & BIT(1))
+		vp_stat |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport);
+	if (stat & BIT(2))
+		vp_stat |= DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+	if (stat & BIT(4))
+		vp_stat |= DSS_IRQ_VP_SYNC_LOST(hw_videoport);
+
+	return vp_stat;
+}
+
+static u32 dispc_vp_irq_to_raw(dispc_irq_t vpstat, u32 hw_videoport)
+{
+	u32 stat = 0;
+
+	if (vpstat & DSS_IRQ_VP_FRAME_DONE(hw_videoport))
+		stat |= BIT(0);
+	if (vpstat & DSS_IRQ_VP_VSYNC_EVEN(hw_videoport))
+		stat |= BIT(1);
+	if (vpstat & DSS_IRQ_VP_VSYNC_ODD(hw_videoport))
+		stat |= BIT(2);
+	if (vpstat & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+		stat |= BIT(4);
+
+	return stat;
+}
+
+static dispc_irq_t dispc_vid_irq_from_raw(u32 stat, u32 hw_plane)
+{
+	dispc_irq_t vid_stat = 0;
+
+	if (stat & BIT(0))
+		vid_stat |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane);
+
+	return vid_stat;
+}
+
+static u32 dispc_vid_irq_to_raw(dispc_irq_t vidstat, u32 hw_plane)
+{
+	u32 stat = 0;
+
+	if (vidstat & DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane))
+		stat |= BIT(0);
+
+	return stat;
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqstatus(struct dispc_device *dispc,
+					       u32 hw_videoport)
+{
+	u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS);
+
+	return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_write_irqstatus(struct dispc_device *dispc,
+					 u32 hw_videoport, dispc_irq_t vpstat)
+{
+	u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqstatus(struct dispc_device *dispc,
+						u32 hw_plane)
+{
+	u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS);
+
+	return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_write_irqstatus(struct dispc_device *dispc,
+					  u32 hw_plane, dispc_irq_t vidstat)
+{
+	u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqenable(struct dispc_device *dispc,
+					       u32 hw_videoport)
+{
+	u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE);
+
+	return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_set_irqenable(struct dispc_device *dispc,
+				       u32 hw_videoport, dispc_irq_t vpstat)
+{
+	u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqenable(struct dispc_device *dispc,
+						u32 hw_plane)
+{
+	u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE);
+
+	return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_set_irqenable(struct dispc_device *dispc,
+					u32 hw_plane, dispc_irq_t vidstat)
+{
+	u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE, stat);
+}
+
+static void dispc_k2g_clear_irqstatus(struct dispc_device *dispc,
+				      dispc_irq_t mask)
+{
+	dispc_k2g_vp_write_irqstatus(dispc, 0, mask);
+	dispc_k2g_vid_write_irqstatus(dispc, 0, mask);
+}
+
+static
+dispc_irq_t dispc_k2g_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+	dispc_irq_t stat = 0;
+
+	/* always clear the top level irqstatus */
+	dispc_write(dispc, DISPC_IRQSTATUS,
+		    dispc_read(dispc, DISPC_IRQSTATUS));
+
+	stat |= dispc_k2g_vp_read_irqstatus(dispc, 0);
+	stat |= dispc_k2g_vid_read_irqstatus(dispc, 0);
+
+	dispc_k2g_clear_irqstatus(dispc, stat);
+
+	return stat;
+}
+
+static dispc_irq_t dispc_k2g_read_irqenable(struct dispc_device *dispc)
+{
+	dispc_irq_t stat = 0;
+
+	stat |= dispc_k2g_vp_read_irqenable(dispc, 0);
+	stat |= dispc_k2g_vid_read_irqenable(dispc, 0);
+
+	return stat;
+}
+
+static
+void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+	dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+
+	/* clear the irqstatus for newly enabled irqs */
+	dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+
+	dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+	dispc_k2g_vid_set_irqenable(dispc, 0, mask);
+
+	dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+
+	/* flush posted write */
+	dispc_k2g_read_irqenable(dispc);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqstatus(struct dispc_device *dispc,
+					      u32 hw_videoport)
+{
+	u32 stat = dispc_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
+
+	return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
+					u32 hw_videoport, dispc_irq_t vpstat)
+{
+	u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+	dispc_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
+					       u32 hw_plane)
+{
+	u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+
+	return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
+					 u32 hw_plane, dispc_irq_t vidstat)
+{
+	u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+	dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
+					      u32 hw_videoport)
+{
+	u32 stat = dispc_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
+
+	return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
+				      u32 hw_videoport, dispc_irq_t vpstat)
+{
+	u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+	dispc_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
+					       u32 hw_plane)
+{
+	u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+
+	return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
+				       u32 hw_plane, dispc_irq_t vidstat)
+{
+	u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+	dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+}
+
+static
+void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+{
+	unsigned int i;
+	u32 top_clear = 0;
+
+	for (i = 0; i < dispc->feat->num_vps; ++i) {
+		if (clearmask & DSS_IRQ_VP_MASK(i)) {
+			dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+			top_clear |= BIT(i);
+		}
+	}
+	for (i = 0; i < dispc->feat->num_planes; ++i) {
+		if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+			dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+			top_clear |= BIT(4 + i);
+		}
+	}
+	if (dispc->feat->subrev == DISPC_K2G)
+		return;
+
+	dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+
+	/* Flush posted writes */
+	dispc_read(dispc, DISPC_IRQSTATUS);
+}
+
+static
+dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+	dispc_irq_t status = 0;
+	unsigned int i;
+
+	for (i = 0; i < dispc->feat->num_vps; ++i)
+		status |= dispc_k3_vp_read_irqstatus(dispc, i);
+
+	for (i = 0; i < dispc->feat->num_planes; ++i)
+		status |= dispc_k3_vid_read_irqstatus(dispc, i);
+
+	dispc_k3_clear_irqstatus(dispc, status);
+
+	return status;
+}
+
+static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
+{
+	dispc_irq_t enable = 0;
+	unsigned int i;
+
+	for (i = 0; i < dispc->feat->num_vps; ++i)
+		enable |= dispc_k3_vp_read_irqenable(dispc, i);
+
+	for (i = 0; i < dispc->feat->num_planes; ++i)
+		enable |= dispc_k3_vid_read_irqenable(dispc, i);
+
+	return enable;
+}
+
+static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+				   dispc_irq_t mask)
+{
+	unsigned int i;
+	u32 main_enable = 0, main_disable = 0;
+	dispc_irq_t old_mask;
+
+	old_mask = dispc_k3_read_irqenable(dispc);
+
+	/* clear the irqstatus for newly enabled irqs */
+	dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+
+	for (i = 0; i < dispc->feat->num_vps; ++i) {
+		dispc_k3_vp_set_irqenable(dispc, i, mask);
+		if (mask & DSS_IRQ_VP_MASK(i))
+			main_enable |= BIT(i);		/* VP IRQ */
+		else
+			main_disable |= BIT(i);		/* VP IRQ */
+	}
+
+	for (i = 0; i < dispc->feat->num_planes; ++i) {
+		dispc_k3_vid_set_irqenable(dispc, i, mask);
+		if (mask & DSS_IRQ_PLANE_MASK(i))
+			main_enable |= BIT(i + 4);	/* VID IRQ */
+		else
+			main_disable |= BIT(i + 4);	/* VID IRQ */
+	}
+
+	if (main_enable)
+		dispc_write(dispc, DISPC_IRQENABLE_SET, main_enable);
+
+	if (main_disable)
+		dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+
+	/* Flush posted writes */
+	dispc_read(dispc, DISPC_IRQENABLE_SET);
+}
+
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+	switch (dispc->feat->subrev) {
+	case DISPC_K2G:
+		return dispc_k2g_read_and_clear_irqstatus(dispc);
+	case DISPC_AM65X:
+	case DISPC_J721E:
+		return dispc_k3_read_and_clear_irqstatus(dispc);
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+	switch (dispc->feat->subrev) {
+	case DISPC_K2G:
+		dispc_k2g_set_irqenable(dispc, mask);
+		break;
+	case DISPC_AM65X:
+	case DISPC_J721E:
+		dispc_k3_set_irqenable(dispc, mask);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
+struct dispc_bus_format {
+	u32 bus_fmt;
+	u32 data_width;
+	bool is_oldi_fmt;
+	enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+};
+
+static const struct dispc_bus_format dispc_bus_formats[] = {
+	{ MEDIA_BUS_FMT_RGB444_1X12,		12, false, 0 },
+	{ MEDIA_BUS_FMT_RGB565_1X16,		16, false, 0 },
+	{ MEDIA_BUS_FMT_RGB666_1X18,		18, false, 0 },
+	{ MEDIA_BUS_FMT_RGB888_1X24,		24, false, 0 },
+	{ MEDIA_BUS_FMT_RGB101010_1X30,		30, false, 0 },
+	{ MEDIA_BUS_FMT_RGB121212_1X36,		36, false, 0 },
+	{ MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,	18, true, SPWG_18 },
+	{ MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,	24, true, SPWG_24 },
+	{ MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,	24, true, JEIDA_24 },
+};
+
+static const
+struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
+					       u32 hw_videoport,
+					       u32 bus_fmt, u32 bus_flags)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(dispc_bus_formats); ++i) {
+		if (dispc_bus_formats[i].bus_fmt == bus_fmt)
+			return &dispc_bus_formats[i];
+	}
+
+	return NULL;
+}
+
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+		       const struct drm_crtc_state *state)
+{
+	const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+	const struct dispc_bus_format *fmt;
+
+	fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+				    tstate->bus_flags);
+	if (!fmt) {
+		dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+			__func__, tstate->bus_format);
+		return -EINVAL;
+	}
+
+	if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+	    fmt->is_oldi_fmt) {
+		dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+			__func__, dispc->feat->vp_name[hw_videoport]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+{
+	u32 val = power ? 0 : OLDI_PWRDN_TX;
+
+	if (WARN_ON(!dispc->oldi_io_ctrl))
+		return;
+
+	regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
+			   OLDI_PWRDN_TX, val);
+	regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
+			   OLDI_PWRDN_TX, val);
+	regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
+			   OLDI_PWRDN_TX, val);
+	regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
+			   OLDI_PWRDN_TX, val);
+	regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
+			   OLDI_PWRDN_TX, val);
+}
+
+static void dispc_set_num_datalines(struct dispc_device *dispc,
+				    u32 hw_videoport, int num_lines)
+{
+	int v;
+
+	switch (num_lines) {
+	case 12:
+		v = 0; break;
+	case 16:
+		v = 1; break;
+	case 18:
+		v = 2; break;
+	case 24:
+		v = 3; break;
+	case 30:
+		v = 4; break;
+	case 36:
+		v = 5; break;
+	default:
+		WARN_ON(1);
+		v = 3;
+	}
+
+	VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+}
+
+static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
+			      const struct dispc_bus_format *fmt)
+{
+	u32 oldi_cfg = 0;
+	u32 oldi_reset_bit = BIT(5 + hw_videoport);
+	int count = 0;
+
+	/*
+	 * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC
+	 * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0.
+	 */
+
+	if (fmt->data_width == 24)
+		oldi_cfg |= BIT(8); /* MSB */
+	else if (fmt->data_width != 18)
+		dev_warn(dispc->dev, "%s: %d port width not supported\n",
+			 __func__, fmt->data_width);
+
+	oldi_cfg |= BIT(7); /* DEPOL */
+
+	oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+
+	oldi_cfg |= BIT(12); /* SOFTRST */
+
+	oldi_cfg |= BIT(0); /* ENABLE */
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+	while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) &&
+	       count < 10000)
+		count++;
+
+	if (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)))
+		dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n",
+			 __func__);
+}
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+		      const struct drm_crtc_state *state)
+{
+	const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+	const struct dispc_bus_format *fmt;
+
+	fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+				    tstate->bus_flags);
+
+	if (WARN_ON(!fmt))
+		return;
+
+	if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+		dispc_oldi_tx_power(dispc, true);
+
+		dispc_enable_oldi(dispc, hw_videoport, fmt);
+	}
+}
+
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+		     const struct drm_crtc_state *state)
+{
+	const struct drm_display_mode *mode = &state->adjusted_mode;
+	const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+	bool align, onoff, rf, ieo, ipc, ihs, ivs;
+	const struct dispc_bus_format *fmt;
+	u32 hsw, hfp, hbp, vsw, vfp, vbp;
+
+	fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+				    tstate->bus_flags);
+
+	if (WARN_ON(!fmt))
+		return;
+
+	dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
+
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsw = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
+		       FLD_VAL(hsw - 1, 7, 0) |
+		       FLD_VAL(hfp - 1, 19, 8) |
+		       FLD_VAL(hbp - 1, 31, 20));
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
+		       FLD_VAL(vsw - 1, 7, 0) |
+		       FLD_VAL(vfp, 19, 8) |
+		       FLD_VAL(vbp, 31, 20));
+
+	ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+	ihs = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+	ieo = !!(tstate->bus_flags & DRM_BUS_FLAG_DE_LOW);
+
+	ipc = !!(tstate->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE);
+
+	/* always use the 'rf' setting */
+	onoff = true;
+
+	rf = !!(tstate->bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE);
+
+	/* always use aligned syncs */
+	align = true;
+
+	/* always use DE_HIGH for OLDI */
+	if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+		ieo = false;
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
+		       FLD_VAL(align, 18, 18) |
+		       FLD_VAL(onoff, 17, 17) |
+		       FLD_VAL(rf, 16, 16) |
+		       FLD_VAL(ieo, 15, 15) |
+		       FLD_VAL(ipc, 14, 14) |
+		       FLD_VAL(ihs, 13, 13) |
+		       FLD_VAL(ivs, 12, 12));
+
+	dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
+		       FLD_VAL(mode->hdisplay - 1, 11, 0) |
+		       FLD_VAL(mode->vdisplay - 1, 27, 16));
+
+	VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+}
+
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
+{
+	VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
+{
+	if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+		dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+
+		dispc_oldi_tx_power(dispc, false);
+	}
+}
+
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
+{
+	return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+}
+
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
+{
+	WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
+	VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+}
+
+enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
+
+static u16 c8_to_c12(u8 c8, enum c8_to_c12_mode mode)
+{
+	u16 c12;
+
+	c12 = c8 << 4;
+
+	switch (mode) {
+	case C8_TO_C12_REPLICATE:
+		/* Copy c8 4 MSB to 4 LSB for full scale c12 */
+		c12 |= c8 >> 4;
+		break;
+	case C8_TO_C12_MAX:
+		c12 |= 0xF;
+		break;
+	default:
+	case C8_TO_C12_MIN:
+		break;
+	}
+
+	return c12;
+}
+
+static u64 argb8888_to_argb12121212(u32 argb8888, enum c8_to_c12_mode m)
+{
+	u8 a, r, g, b;
+	u64 v;
+
+	a = (argb8888 >> 24) & 0xff;
+	r = (argb8888 >> 16) & 0xff;
+	g = (argb8888 >> 8) & 0xff;
+	b = (argb8888 >> 0) & 0xff;
+
+	v = ((u64)c8_to_c12(a, m) << 36) | ((u64)c8_to_c12(r, m) << 24) |
+		((u64)c8_to_c12(g, m) << 12) | (u64)c8_to_c12(b, m);
+
+	return v;
+}
+
+static void dispc_vp_set_default_color(struct dispc_device *dispc,
+				       u32 hw_videoport, u32 default_color)
+{
+	u64 v;
+
+	v = argb8888_to_argb12121212(default_color, C8_TO_C12_REPLICATE);
+
+	dispc_ovr_write(dispc, hw_videoport,
+			DISPC_OVR_DEFAULT_COLOR, v & 0xffffffff);
+	dispc_ovr_write(dispc, hw_videoport,
+			DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff);
+}
+
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+					 u32 hw_videoport,
+					 const struct drm_display_mode *mode)
+{
+	u32 hsw, hfp, hbp, vsw, vfp, vbp;
+	enum dispc_vp_bus_type bus_type;
+	int max_pclk;
+
+	bus_type = dispc->feat->vp_bus_type[hw_videoport];
+
+	max_pclk = dispc->feat->max_pclk_khz[bus_type];
+
+	if (WARN_ON(max_pclk == 0))
+		return MODE_BAD;
+
+	if (mode->clock < dispc->feat->min_pclk_khz)
+		return MODE_CLOCK_LOW;
+
+	if (mode->clock > max_pclk)
+		return MODE_CLOCK_HIGH;
+
+	if (mode->hdisplay > 4096)
+		return MODE_BAD;
+
+	if (mode->vdisplay > 4096)
+		return MODE_BAD;
+
+	/* TODO: add interlace support */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	/*
+	 * Enforce the output width is divisible by 2. Actually this
+	 * is only needed in following cases:
+	 * - YUV output selected (BT656, BT1120)
+	 * - Dithering enabled
+	 * - TDM with TDMCycleFormat == 3
+	 * But for simplicity we enforce that always.
+	 */
+	if ((mode->hdisplay % 2) != 0)
+		return MODE_BAD_HVALUE;
+
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsw = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	if (hsw < 1 || hsw > 256 ||
+	    hfp < 1 || hfp > 4096 ||
+	    hbp < 1 || hbp > 4096)
+		return MODE_BAD_HVALUE;
+
+	if (vsw < 1 || vsw > 256 ||
+	    vfp > 4095 || vbp > 4095)
+		return MODE_BAD_VVALUE;
+
+	if (dispc->memory_bandwidth_limit) {
+		const unsigned int bpp = 4;
+		u64 bandwidth;
+
+		bandwidth = 1000 * mode->clock;
+		bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+		bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+		if (dispc->memory_bandwidth_limit < bandwidth)
+			return MODE_BAD;
+	}
+
+	return MODE_OK;
+}
+
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+	int ret = clk_prepare_enable(dispc->vp_clk[hw_videoport]);
+
+	if (ret)
+		dev_err(dispc->dev, "%s: enabling clk failed: %d\n", __func__,
+			ret);
+
+	return ret;
+}
+
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+	clk_disable_unprepare(dispc->vp_clk[hw_videoport]);
+}
+
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
+{
+	int r = rate / 100, rr = real_rate / 100;
+
+	return (unsigned int)(abs(((rr - r) * 100) / r));
+}
+
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+			  unsigned long rate)
+{
+	int r;
+	unsigned long new_rate;
+
+	r = clk_set_rate(dispc->vp_clk[hw_videoport], rate);
+	if (r) {
+		dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n",
+			hw_videoport, rate);
+		return r;
+	}
+
+	new_rate = clk_get_rate(dispc->vp_clk[hw_videoport]);
+
+	if (dispc_pclk_diff(rate, new_rate) > 5)
+		dev_warn(dispc->dev,
+			 "vp%d: Clock rate %lu differs over 5%% from requested %lu\n",
+			 hw_videoport, new_rate, rate);
+
+	dev_dbg(dispc->dev, "vp%d: new rate %lu Hz (requested %lu Hz)\n",
+		hw_videoport, clk_get_rate(dispc->vp_clk[hw_videoport]), rate);
+
+	return 0;
+}
+
+/* OVR */
+static void dispc_k2g_ovr_set_plane(struct dispc_device *dispc,
+				    u32 hw_plane, u32 hw_videoport,
+				    u32 x, u32 y, u32 layer)
+{
+	/* On k2g there is only one plane and no need for ovr */
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_POSITION,
+			x | (y << 16));
+}
+
+static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
+				      u32 hw_plane, u32 hw_videoport,
+				      u32 x, u32 y, u32 layer)
+{
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+			hw_plane, 4, 1);
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+			x, 17, 6);
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+			y, 30, 19);
+}
+
+static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
+				      u32 hw_plane, u32 hw_videoport,
+				      u32 x, u32 y, u32 layer)
+{
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+			hw_plane, 4, 1);
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+			x, 13, 0);
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+			y, 29, 16);
+}
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+			 u32 hw_videoport, u32 x, u32 y, u32 layer)
+{
+	switch (dispc->feat->subrev) {
+	case DISPC_K2G:
+		dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
+					x, y, layer);
+		break;
+	case DISPC_AM65X:
+		dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
+					  x, y, layer);
+		break;
+	case DISPC_J721E:
+		dispc_j721e_ovr_set_plane(dispc, hw_plane, hw_videoport,
+					  x, y, layer);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+			    u32 hw_videoport, u32 layer, bool enable)
+{
+	if (dispc->feat->subrev == DISPC_K2G)
+		return;
+
+	OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+			!!enable, 0, 0);
+}
+
+/* CSC */
+enum csc_ctm {
+	CSC_RR, CSC_RG, CSC_RB,
+	CSC_GR, CSC_GG, CSC_GB,
+	CSC_BR, CSC_BG, CSC_BB,
+};
+
+enum csc_yuv2rgb {
+	CSC_RY, CSC_RCB, CSC_RCR,
+	CSC_GY, CSC_GCB, CSC_GCR,
+	CSC_BY, CSC_BCB, CSC_BCR,
+};
+
+enum csc_rgb2yuv {
+	CSC_YR,  CSC_YG,  CSC_YB,
+	CSC_CBR, CSC_CBG, CSC_CBB,
+	CSC_CRR, CSC_CRG, CSC_CRB,
+};
+
+struct dispc_csc_coef {
+	void (*to_regval)(const struct dispc_csc_coef *csc, u32 *regval);
+	int m[9];
+	int preoffset[3];
+	int postoffset[3];
+	enum { CLIP_LIMITED_RANGE = 0, CLIP_FULL_RANGE = 1, } cliping;
+	const char *name;
+};
+
+#define DISPC_CSC_REGVAL_LEN 8
+
+static
+void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+	regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
+	regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
+	regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
+#undef OVAL
+}
+
+#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+static
+void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+	regval[0] = CVAL(csc->m[CSC_RY], csc->m[CSC_RCR]);
+	regval[1] = CVAL(csc->m[CSC_RCB], csc->m[CSC_GY]);
+	regval[2] = CVAL(csc->m[CSC_GCR], csc->m[CSC_GCB]);
+	regval[3] = CVAL(csc->m[CSC_BY], csc->m[CSC_BCR]);
+	regval[4] = CVAL(csc->m[CSC_BCB], 0);
+
+	dispc_csc_offset_regval(csc, regval);
+}
+
+__maybe_unused static
+void dispc_csc_rgb2yuv_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+	regval[0] = CVAL(csc->m[CSC_YR], csc->m[CSC_YG]);
+	regval[1] = CVAL(csc->m[CSC_YB], csc->m[CSC_CRR]);
+	regval[2] = CVAL(csc->m[CSC_CRG], csc->m[CSC_CRB]);
+	regval[3] = CVAL(csc->m[CSC_CBR], csc->m[CSC_CBG]);
+	regval[4] = CVAL(csc->m[CSC_CBB], 0);
+
+	dispc_csc_offset_regval(csc, regval);
+}
+
+static void dispc_csc_cpr_regval(const struct dispc_csc_coef *csc,
+				 u32 *regval)
+{
+	regval[0] = CVAL(csc->m[CSC_RR], csc->m[CSC_RG]);
+	regval[1] = CVAL(csc->m[CSC_RB], csc->m[CSC_GR]);
+	regval[2] = CVAL(csc->m[CSC_GG], csc->m[CSC_GB]);
+	regval[3] = CVAL(csc->m[CSC_BR], csc->m[CSC_BG]);
+	regval[4] = CVAL(csc->m[CSC_BB], 0);
+
+	dispc_csc_offset_regval(csc, regval);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+				    const struct dispc_csc_coef *csc)
+{
+	static const u16 dispc_vid_csc_coef_reg[] = {
+		DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+		DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+		DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+		DISPC_VID_CSC_COEF(6), /* K2G has no post offset support */
+	};
+	u32 regval[DISPC_CSC_REGVAL_LEN];
+	unsigned int i;
+
+	csc->to_regval(csc, regval);
+
+	if (regval[7] != 0)
+		dev_warn(dispc->dev, "%s: No post offset support for %s\n",
+			 __func__, csc->name);
+
+	for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+		dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+				regval[i]);
+}
+
+static void dispc_k3_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+				   const struct dispc_csc_coef *csc)
+{
+	static const u16 dispc_vid_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+		DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+		DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+		DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+		DISPC_VID_CSC_COEF(6), DISPC_VID_CSC_COEF7,
+	};
+	u32 regval[DISPC_CSC_REGVAL_LEN];
+	unsigned int i;
+
+	csc->to_regval(csc, regval);
+
+	for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+		dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+				regval[i]);
+}
+
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_full = {
+	dispc_csc_yuv2rgb_regval,
+	{ 256,   0,  358,	/* ry, rcb, rcr |1.000  0.000  1.402|*/
+	  256, -88, -182,	/* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+	  256, 452,    0, },	/* by, bcb, bcr |1.000  1.772  0.000|*/
+	{    0, -2048, -2048, },	/* full range */
+	{    0,     0,     0, },
+	CLIP_FULL_RANGE,
+	"BT.601 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_lim = {
+	dispc_csc_yuv2rgb_regval,
+	{ 298,    0,  409,	/* ry, rcb, rcr |1.164  0.000  1.596|*/
+	  298, -100, -208,	/* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+	  298,  516,    0, },	/* by, bcb, bcr |1.164  2.017  0.000|*/
+	{ -256, -2048, -2048, },	/* limited range */
+	{    0,     0,     0, },
+	CLIP_FULL_RANGE,
+	"BT.601 Limited",
+};
+
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_full = {
+	dispc_csc_yuv2rgb_regval,
+	{ 256,	  0,  402,	/* ry, rcb, rcr |1.000	0.000  1.570|*/
+	  256,  -48, -120,	/* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+	  256,  475,    0, },	/* by, bcb, bcr |1.000	1.856  0.000|*/
+	{    0, -2048, -2048, },	/* full range */
+	{    0,     0,     0, },
+	CLIP_FULL_RANGE,
+	"BT.709 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_lim = {
+	dispc_csc_yuv2rgb_regval,
+	{ 298,    0,  459,	/* ry, rcb, rcr |1.164  0.000  1.793|*/
+	  298,  -55, -136,	/* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+	  298,  541,    0, },	/* by, bcb, bcr |1.164  2.112  0.000|*/
+	{ -256, -2048, -2048, },	/* limited range */
+	{    0,     0,     0, },
+	CLIP_FULL_RANGE,
+	"BT.709 Limited",
+};
+
+static const struct {
+	enum drm_color_encoding encoding;
+	enum drm_color_range range;
+	const struct dispc_csc_coef *csc;
+} dispc_csc_table[] = {
+	{ DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
+	  &csc_yuv2rgb_bt601_full, },
+	{ DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
+	  &csc_yuv2rgb_bt601_lim, },
+	{ DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_FULL_RANGE,
+	  &csc_yuv2rgb_bt709_full, },
+	{ DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE,
+	  &csc_yuv2rgb_bt709_lim, },
+};
+
+static const
+struct dispc_csc_coef *dispc_find_csc(enum drm_color_encoding encoding,
+				      enum drm_color_range range)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(dispc_csc_table); i++) {
+		if (dispc_csc_table[i].encoding == encoding &&
+		    dispc_csc_table[i].range == range) {
+			return dispc_csc_table[i].csc;
+		}
+	}
+	return NULL;
+}
+
+static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
+				const struct drm_plane_state *state)
+{
+	const struct dispc_csc_coef *coef;
+
+	coef = dispc_find_csc(state->color_encoding, state->color_range);
+	if (!coef) {
+		dev_err(dispc->dev, "%s: CSC (%u,%u) not found\n",
+			__func__, state->color_encoding, state->color_range);
+		return;
+	}
+
+	if (dispc->feat->subrev == DISPC_K2G)
+		dispc_k2g_vid_write_csc(dispc, hw_plane, coef);
+	else
+		dispc_k3_vid_write_csc(dispc, hw_plane, coef);
+}
+
+static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
+				 bool enable)
+{
+	VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+}
+
+/* SCALER */
+
+static u32 dispc_calc_fir_inc(u32 in, u32 out)
+{
+	return (u32)div_u64(0x200000ull * in, out);
+}
+
+enum dispc_vid_fir_coef_set {
+	DISPC_VID_FIR_COEF_HORIZ,
+	DISPC_VID_FIR_COEF_HORIZ_UV,
+	DISPC_VID_FIR_COEF_VERT,
+	DISPC_VID_FIR_COEF_VERT_UV,
+};
+
+static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
+				      u32 hw_plane,
+				      enum dispc_vid_fir_coef_set coef_set,
+				      const struct tidss_scale_coefs *coefs)
+{
+	static const u16 c0_regs[] = {
+		[DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H0,
+		[DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H0_C,
+		[DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V0,
+		[DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V0_C,
+	};
+
+	static const u16 c12_regs[] = {
+		[DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H12,
+		[DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H12_C,
+		[DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V12,
+		[DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V12_C,
+	};
+
+	const u16 c0_base = c0_regs[coef_set];
+	const u16 c12_base = c12_regs[coef_set];
+	int phase;
+
+	if (!coefs) {
+		dev_err(dispc->dev, "%s: No coefficients given.\n", __func__);
+		return;
+	}
+
+	for (phase = 0; phase <= 8; ++phase) {
+		u16 reg = c0_base + phase * 4;
+		u16 c0 = coefs->c0[phase];
+
+		dispc_vid_write(dispc, hw_plane, reg, c0);
+	}
+
+	for (phase = 0; phase <= 15; ++phase) {
+		u16 reg = c12_base + phase * 4;
+		s16 c1, c2;
+		u32 c12;
+
+		c1 = coefs->c1[phase];
+		c2 = coefs->c2[phase];
+		c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+
+		dispc_vid_write(dispc, hw_plane, reg, c12);
+	}
+}
+
+static bool dispc_fourcc_is_yuv(u32 fourcc)
+{
+	switch (fourcc) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_NV12:
+		return true;
+	default:
+		return false;
+	}
+}
+
+struct dispc_scaling_params {
+	int xinc, yinc;
+	u32 in_w, in_h, in_w_uv, in_h_uv;
+	u32 fir_xinc, fir_yinc, fir_xinc_uv, fir_yinc_uv;
+	bool scale_x, scale_y;
+	const struct tidss_scale_coefs *xcoef, *ycoef, *xcoef_uv, *ycoef_uv;
+	bool five_taps;
+};
+
+static int dispc_vid_calc_scaling(struct dispc_device *dispc,
+				  const struct drm_plane_state *state,
+				  struct dispc_scaling_params *sp,
+				  bool lite_plane)
+{
+	const struct dispc_features_scaling *f = &dispc->feat->scaling;
+	u32 fourcc = state->fb->format->format;
+	u32 in_width_max_5tap = f->in_width_max_5tap_rgb;
+	u32 in_width_max_3tap = f->in_width_max_3tap_rgb;
+	u32 downscale_limit;
+	u32 in_width_max;
+
+	memset(sp, 0, sizeof(*sp));
+	sp->xinc = 1;
+	sp->yinc = 1;
+	sp->in_w = state->src_w >> 16;
+	sp->in_w_uv = sp->in_w;
+	sp->in_h = state->src_h >> 16;
+	sp->in_h_uv = sp->in_h;
+
+	sp->scale_x = sp->in_w != state->crtc_w;
+	sp->scale_y = sp->in_h != state->crtc_h;
+
+	if (dispc_fourcc_is_yuv(fourcc)) {
+		in_width_max_5tap = f->in_width_max_5tap_yuv;
+		in_width_max_3tap = f->in_width_max_3tap_yuv;
+
+		sp->in_w_uv >>= 1;
+		sp->scale_x = true;
+
+		if (fourcc == DRM_FORMAT_NV12) {
+			sp->in_h_uv >>= 1;
+			sp->scale_y = true;
+		}
+	}
+
+	/* Skip the rest if no scaling is used */
+	if ((!sp->scale_x && !sp->scale_y) || lite_plane)
+		return 0;
+
+	if (sp->in_w > in_width_max_5tap) {
+		sp->five_taps = false;
+		in_width_max = in_width_max_3tap;
+		downscale_limit = f->downscale_limit_3tap;
+	} else {
+		sp->five_taps = true;
+		in_width_max = in_width_max_5tap;
+		downscale_limit = f->downscale_limit_5tap;
+	}
+
+	if (sp->scale_x) {
+		sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+
+		if (sp->fir_xinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+			dev_dbg(dispc->dev,
+				"%s: X-scaling factor %u/%u > %u\n",
+				__func__, state->crtc_w, state->src_w >> 16,
+				f->upscale_limit);
+			return -EINVAL;
+		}
+
+		if (sp->fir_xinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+			sp->xinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_w,
+							     state->crtc_w),
+						downscale_limit);
+
+			if (sp->xinc > f->xinc_max) {
+				dev_dbg(dispc->dev,
+					"%s: X-scaling factor %u/%u < 1/%u\n",
+					__func__, state->crtc_w,
+					state->src_w >> 16,
+					downscale_limit * f->xinc_max);
+				return -EINVAL;
+			}
+
+			sp->in_w = (state->src_w >> 16) / sp->xinc;
+		}
+
+		while (sp->in_w > in_width_max) {
+			sp->xinc++;
+			sp->in_w = (state->src_w >> 16) / sp->xinc;
+		}
+
+		if (sp->xinc > f->xinc_max) {
+			dev_dbg(dispc->dev,
+				"%s: Too wide input buffer %u > %u\n", __func__,
+				state->src_w >> 16, in_width_max * f->xinc_max);
+			return -EINVAL;
+		}
+
+		/*
+		 * We need even line length for YUV formats. Decimation
+		 * can lead to odd length, so we need to make it even
+		 * again.
+		 */
+		if (dispc_fourcc_is_yuv(fourcc))
+			sp->in_w &= ~1;
+
+		sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+	}
+
+	if (sp->scale_y) {
+		sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h);
+
+		if (sp->fir_yinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+			dev_dbg(dispc->dev,
+				"%s: Y-scaling factor %u/%u > %u\n",
+				__func__, state->crtc_h, state->src_h >> 16,
+				f->upscale_limit);
+			return -EINVAL;
+		}
+
+		if (sp->fir_yinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+			sp->yinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_h,
+							     state->crtc_h),
+						downscale_limit);
+
+			sp->in_h /= sp->yinc;
+			sp->fir_yinc = dispc_calc_fir_inc(sp->in_h,
+							  state->crtc_h);
+		}
+	}
+
+	dev_dbg(dispc->dev,
+		"%s: %ux%u decim %ux%u -> %ux%u firinc %u.%03ux%u.%03u taps %u -> %ux%u\n",
+		__func__, state->src_w >> 16, state->src_h >> 16,
+		sp->xinc, sp->yinc, sp->in_w, sp->in_h,
+		sp->fir_xinc / 0x200000u,
+		((sp->fir_xinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+		sp->fir_yinc / 0x200000u,
+		((sp->fir_yinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+		sp->five_taps ? 5 : 3,
+		state->crtc_w, state->crtc_h);
+
+	if (dispc_fourcc_is_yuv(fourcc)) {
+		if (sp->scale_x) {
+			sp->in_w_uv /= sp->xinc;
+			sp->fir_xinc_uv = dispc_calc_fir_inc(sp->in_w_uv,
+							     state->crtc_w);
+			sp->xcoef_uv = tidss_get_scale_coefs(dispc->dev,
+							     sp->fir_xinc_uv,
+							     true);
+		}
+		if (sp->scale_y) {
+			sp->in_h_uv /= sp->yinc;
+			sp->fir_yinc_uv = dispc_calc_fir_inc(sp->in_h_uv,
+							     state->crtc_h);
+			sp->ycoef_uv = tidss_get_scale_coefs(dispc->dev,
+							     sp->fir_yinc_uv,
+							     sp->five_taps);
+		}
+	}
+
+	if (sp->scale_x)
+		sp->xcoef = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc,
+						  true);
+
+	if (sp->scale_y)
+		sp->ycoef = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc,
+						  sp->five_taps);
+
+	return 0;
+}
+
+static void dispc_vid_set_scaling(struct dispc_device *dispc,
+				  u32 hw_plane,
+				  struct dispc_scaling_params *sp,
+				  u32 fourcc)
+{
+	/* HORIZONTAL RESIZE ENABLE */
+	VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+			sp->scale_x, 7, 7);
+
+	/* VERTICAL RESIZE ENABLE */
+	VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+			sp->scale_y, 8, 8);
+
+	/* Skip the rest if no scaling is used */
+	if (!sp->scale_x && !sp->scale_y)
+		return;
+
+	/* VERTICAL 5-TAPS  */
+	VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+			sp->five_taps, 21, 21);
+
+	if (dispc_fourcc_is_yuv(fourcc)) {
+		if (sp->scale_x) {
+			dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH2,
+					sp->fir_xinc_uv);
+			dispc_vid_write_fir_coefs(dispc, hw_plane,
+						  DISPC_VID_FIR_COEF_HORIZ_UV,
+						  sp->xcoef_uv);
+		}
+		if (sp->scale_y) {
+			dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV2,
+					sp->fir_yinc_uv);
+			dispc_vid_write_fir_coefs(dispc, hw_plane,
+						  DISPC_VID_FIR_COEF_VERT_UV,
+						  sp->ycoef_uv);
+		}
+	}
+
+	if (sp->scale_x) {
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH, sp->fir_xinc);
+		dispc_vid_write_fir_coefs(dispc, hw_plane,
+					  DISPC_VID_FIR_COEF_HORIZ,
+					  sp->xcoef);
+	}
+
+	if (sp->scale_y) {
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV, sp->fir_yinc);
+		dispc_vid_write_fir_coefs(dispc, hw_plane,
+					  DISPC_VID_FIR_COEF_VERT, sp->ycoef);
+	}
+}
+
+/* OTHER */
+
+static const struct {
+	u32 fourcc;
+	u8 dss_code;
+} dispc_color_formats[] = {
+	{ DRM_FORMAT_ARGB4444, 0x0, },
+	{ DRM_FORMAT_ABGR4444, 0x1, },
+	{ DRM_FORMAT_RGBA4444, 0x2, },
+
+	{ DRM_FORMAT_RGB565, 0x3, },
+	{ DRM_FORMAT_BGR565, 0x4, },
+
+	{ DRM_FORMAT_ARGB1555, 0x5, },
+	{ DRM_FORMAT_ABGR1555, 0x6, },
+
+	{ DRM_FORMAT_ARGB8888, 0x7, },
+	{ DRM_FORMAT_ABGR8888, 0x8, },
+	{ DRM_FORMAT_RGBA8888, 0x9, },
+	{ DRM_FORMAT_BGRA8888, 0xa, },
+
+	{ DRM_FORMAT_RGB888, 0xb, },
+	{ DRM_FORMAT_BGR888, 0xc, },
+
+	{ DRM_FORMAT_ARGB2101010, 0xe, },
+	{ DRM_FORMAT_ABGR2101010, 0xf, },
+
+	{ DRM_FORMAT_XRGB4444, 0x20, },
+	{ DRM_FORMAT_XBGR4444, 0x21, },
+	{ DRM_FORMAT_RGBX4444, 0x22, },
+
+	{ DRM_FORMAT_ARGB1555, 0x25, },
+	{ DRM_FORMAT_ABGR1555, 0x26, },
+
+	{ DRM_FORMAT_XRGB8888, 0x27, },
+	{ DRM_FORMAT_XBGR8888, 0x28, },
+	{ DRM_FORMAT_RGBX8888, 0x29, },
+	{ DRM_FORMAT_BGRX8888, 0x2a, },
+
+	{ DRM_FORMAT_XRGB2101010, 0x2e, },
+	{ DRM_FORMAT_XBGR2101010, 0x2f, },
+
+	{ DRM_FORMAT_YUYV, 0x3e, },
+	{ DRM_FORMAT_UYVY, 0x3f, },
+
+	{ DRM_FORMAT_NV12, 0x3d, },
+};
+
+static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
+					 u32 hw_plane, u32 fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+		if (dispc_color_formats[i].fourcc == fourcc) {
+			VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+					dispc_color_formats[i].dss_code,
+					6, 1);
+			return;
+		}
+	}
+
+	WARN_ON(1);
+}
+
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len)
+{
+	WARN_ON(!dispc->fourccs);
+
+	*len = dispc->num_fourccs;
+
+	return dispc->fourccs;
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+	if (pixels == 1)
+		return 1;
+	else if (pixels > 1)
+		return 1 + (pixels - 1) * ps;
+	else if (pixels < 0)
+		return 1 - (-pixels + 1) * ps;
+
+	WARN_ON(1);
+	return 0;
+}
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+		      const struct drm_plane_state *state,
+		      u32 hw_videoport)
+{
+	bool lite = dispc->feat->vid_lite[hw_plane];
+	u32 fourcc = state->fb->format->format;
+	bool need_scaling = state->src_w >> 16 != state->crtc_w ||
+		state->src_h >> 16 != state->crtc_h;
+	struct dispc_scaling_params scaling;
+	int ret;
+
+	if (dispc_fourcc_is_yuv(fourcc)) {
+		if (!dispc_find_csc(state->color_encoding,
+				    state->color_range)) {
+			dev_dbg(dispc->dev,
+				"%s: Unsupported CSC (%u,%u) for HW plane %u\n",
+				__func__, state->color_encoding,
+				state->color_range, hw_plane);
+			return -EINVAL;
+		}
+	}
+
+	if (need_scaling) {
+		if (lite) {
+			dev_dbg(dispc->dev,
+				"%s: Lite plane %u can't scale %ux%u!=%ux%u\n",
+				__func__, hw_plane,
+				state->src_w >> 16, state->src_h >> 16,
+				state->crtc_w, state->crtc_h);
+			return -EINVAL;
+		}
+		ret = dispc_vid_calc_scaling(dispc, state, &scaling, false);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static
+dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_gem_cma_object *gem;
+	u32 x = state->src_x >> 16;
+	u32 y = state->src_y >> 16;
+
+	gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+
+	return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+		y * fb->pitches[0];
+}
+
+static
+dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_gem_cma_object *gem;
+	u32 x = state->src_x >> 16;
+	u32 y = state->src_y >> 16;
+
+	if (WARN_ON(state->fb->format->num_planes != 2))
+		return 0;
+
+	gem = drm_fb_cma_get_gem_obj(fb, 1);
+
+	return gem->paddr + fb->offsets[1] +
+		(x * fb->format->cpp[1] / fb->format->hsub) +
+		(y * fb->pitches[1] / fb->format->vsub);
+}
+
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+		      const struct drm_plane_state *state,
+		      u32 hw_videoport)
+{
+	bool lite = dispc->feat->vid_lite[hw_plane];
+	u32 fourcc = state->fb->format->format;
+	u16 cpp = state->fb->format->cpp[0];
+	u32 fb_width = state->fb->pitches[0] / cpp;
+	dma_addr_t paddr = dispc_plane_state_paddr(state);
+	struct dispc_scaling_params scale;
+
+	dispc_vid_calc_scaling(dispc, state, &scale, lite);
+
+	dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
+
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
+			(scale.in_w - 1) | ((scale.in_h - 1) << 16));
+
+	/* For YUV422 format we use the macropixel size for pixel inc */
+	if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+				pixinc(scale.xinc, cpp * 2));
+	else
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+				pixinc(scale.xinc, cpp));
+
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC,
+			pixinc(1 + (scale.yinc * fb_width -
+				    scale.xinc * scale.in_w),
+			       cpp));
+
+	if (state->fb->format->num_planes == 2) {
+		u16 cpp_uv = state->fb->format->cpp[1];
+		u32 fb_width_uv = state->fb->pitches[1] / cpp_uv;
+		dma_addr_t p_uv_addr = dispc_plane_state_p_uv_addr(state);
+
+		dispc_vid_write(dispc, hw_plane,
+				DISPC_VID_BA_UV_0, p_uv_addr & 0xffffffff);
+		dispc_vid_write(dispc, hw_plane,
+				DISPC_VID_BA_UV_EXT_0, (u64)p_uv_addr >> 32);
+		dispc_vid_write(dispc, hw_plane,
+				DISPC_VID_BA_UV_1, p_uv_addr & 0xffffffff);
+		dispc_vid_write(dispc, hw_plane,
+				DISPC_VID_BA_UV_EXT_1, (u64)p_uv_addr >> 32);
+
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC_UV,
+				pixinc(1 + (scale.yinc * fb_width_uv -
+					    scale.xinc * scale.in_w_uv),
+				       cpp_uv));
+	}
+
+	if (!lite) {
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
+				(state->crtc_w - 1) |
+				((state->crtc_h - 1) << 16));
+
+		dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
+	}
+
+	/* enable YUV->RGB color conversion */
+	if (dispc_fourcc_is_yuv(fourcc)) {
+		dispc_vid_csc_setup(dispc, hw_plane, state);
+		dispc_vid_csc_enable(dispc, hw_plane, true);
+	} else {
+		dispc_vid_csc_enable(dispc, hw_plane, false);
+	}
+
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
+			0xFF & (state->alpha >> 8));
+
+	if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+		VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+				28, 28);
+	else
+		VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+				28, 28);
+
+	return 0;
+}
+
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+{
+	VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+
+	return 0;
+}
+
+static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
+{
+	return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+}
+
+static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
+					  u32 hw_plane, u32 low, u32 high)
+{
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
+			FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
+					u32 hw_plane, u32 low, u32 high)
+{
+	dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+			FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_k2g_plane_init(struct dispc_device *dispc)
+{
+	unsigned int hw_plane;
+
+	dev_dbg(dispc->dev, "%s()\n", __func__);
+
+	/* MFLAG_CTRL = ENABLED */
+	REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+	/* MFLAG_START = MFLAGNORMALSTARTMODE */
+	REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+	for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+		u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+		u32 thr_low, thr_high;
+		u32 mflag_low, mflag_high;
+		u32 preload;
+
+		thr_high = size - 1;
+		thr_low = size / 2;
+
+		mflag_high = size * 2 / 3;
+		mflag_low = size / 3;
+
+		preload = thr_low;
+
+		dev_dbg(dispc->dev,
+			"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+			dispc->feat->vid_name[hw_plane],
+			size,
+			thr_high, thr_low,
+			mflag_high, mflag_low,
+			preload);
+
+		dispc_vid_set_buf_threshold(dispc, hw_plane,
+					    thr_low, thr_high);
+		dispc_vid_set_mflag_threshold(dispc, hw_plane,
+					      mflag_low, mflag_high);
+
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+		/*
+		 * Prefetch up to fifo high-threshold value to minimize the
+		 * possibility of underflows. Note that this means the PRELOAD
+		 * register is ignored.
+		 */
+		VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+				19, 19);
+	}
+}
+
+static void dispc_k3_plane_init(struct dispc_device *dispc)
+{
+	unsigned int hw_plane;
+	u32 cba_lo_pri = 1;
+	u32 cba_hi_pri = 0;
+
+	dev_dbg(dispc->dev, "%s()\n", __func__);
+
+	REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+	REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+
+	/* MFLAG_CTRL = ENABLED */
+	REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+	/* MFLAG_START = MFLAGNORMALSTARTMODE */
+	REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+	for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+		u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+		u32 thr_low, thr_high;
+		u32 mflag_low, mflag_high;
+		u32 preload;
+
+		thr_high = size - 1;
+		thr_low = size / 2;
+
+		mflag_high = size * 2 / 3;
+		mflag_low = size / 3;
+
+		preload = thr_low;
+
+		dev_dbg(dispc->dev,
+			"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+			dispc->feat->vid_name[hw_plane],
+			size,
+			thr_high, thr_low,
+			mflag_high, mflag_low,
+			preload);
+
+		dispc_vid_set_buf_threshold(dispc, hw_plane,
+					    thr_low, thr_high);
+		dispc_vid_set_mflag_threshold(dispc, hw_plane,
+					      mflag_low, mflag_high);
+
+		dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+		/* Prefech up to PRELOAD value */
+		VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+				19, 19);
+	}
+}
+
+static void dispc_plane_init(struct dispc_device *dispc)
+{
+	switch (dispc->feat->subrev) {
+	case DISPC_K2G:
+		dispc_k2g_plane_init(dispc);
+		break;
+	case DISPC_AM65X:
+	case DISPC_J721E:
+		dispc_k3_plane_init(dispc);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+static void dispc_vp_init(struct dispc_device *dispc)
+{
+	unsigned int i;
+
+	dev_dbg(dispc->dev, "%s()\n", __func__);
+
+	/* Enable the gamma Shadow bit-field for all VPs*/
+	for (i = 0; i < dispc->feat->num_vps; i++)
+		VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+}
+
+static void dispc_initial_config(struct dispc_device *dispc)
+{
+	dispc_plane_init(dispc);
+	dispc_vp_init(dispc);
+
+	/* Note: Hardcoded DPI routing on J721E for now */
+	if (dispc->feat->subrev == DISPC_J721E) {
+		dispc_write(dispc, DISPC_CONNECTIONS,
+			    FLD_VAL(2, 3, 0) |		/* VP1 to DPI0 */
+			    FLD_VAL(8, 7, 4)		/* VP3 to DPI1 */
+			);
+	}
+}
+
+static void dispc_k2g_vp_write_gamma_table(struct dispc_device *dispc,
+					   u32 hw_videoport)
+{
+	u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+	u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+	unsigned int i;
+
+	dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+	if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+		return;
+
+	for (i = 0; i < hwlen; ++i) {
+		u32 v = table[i];
+
+		v |= i << 24;
+
+		dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_GAMMA_TABLE,
+			       v);
+	}
+}
+
+static void dispc_am65x_vp_write_gamma_table(struct dispc_device *dispc,
+					     u32 hw_videoport)
+{
+	u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+	u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+	unsigned int i;
+
+	dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+	if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+		return;
+
+	for (i = 0; i < hwlen; ++i) {
+		u32 v = table[i];
+
+		v |= i << 24;
+
+		dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+	}
+}
+
+static void dispc_j721e_vp_write_gamma_table(struct dispc_device *dispc,
+					     u32 hw_videoport)
+{
+	u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+	u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+	unsigned int i;
+
+	dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+	if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_10BIT))
+		return;
+
+	for (i = 0; i < hwlen; ++i) {
+		u32 v = table[i];
+
+		if (i == 0)
+			v |= 1 << 31;
+
+		dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+	}
+}
+
+static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
+				       u32 hw_videoport)
+{
+	switch (dispc->feat->subrev) {
+	case DISPC_K2G:
+		dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
+		break;
+	case DISPC_AM65X:
+		dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
+		break;
+	case DISPC_J721E:
+		dispc_j721e_vp_write_gamma_table(dispc, hw_videoport);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+}
+
+static const struct drm_color_lut dispc_vp_gamma_default_lut[] = {
+	{ .red = 0, .green = 0, .blue = 0, },
+	{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+static void dispc_vp_set_gamma(struct dispc_device *dispc,
+			       u32 hw_videoport,
+			       const struct drm_color_lut *lut,
+			       unsigned int length)
+{
+	u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+	u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+	u32 hwbits;
+	unsigned int i;
+
+	dev_dbg(dispc->dev, "%s: hw_videoport %d, lut len %u, hw len %u\n",
+		__func__, hw_videoport, length, hwlen);
+
+	if (dispc->feat->vp_feat.color.gamma_type == TIDSS_GAMMA_10BIT)
+		hwbits = 10;
+	else
+		hwbits = 8;
+
+	if (!lut || length < 2) {
+		lut = dispc_vp_gamma_default_lut;
+		length = ARRAY_SIZE(dispc_vp_gamma_default_lut);
+	}
+
+	for (i = 0; i < length - 1; ++i) {
+		unsigned int first = i * (hwlen - 1) / (length - 1);
+		unsigned int last = (i + 1) * (hwlen - 1) / (length - 1);
+		unsigned int w = last - first;
+		u16 r, g, b;
+		unsigned int j;
+
+		if (w == 0)
+			continue;
+
+		for (j = 0; j <= w; j++) {
+			r = (lut[i].red * (w - j) + lut[i + 1].red * j) / w;
+			g = (lut[i].green * (w - j) + lut[i + 1].green * j) / w;
+			b = (lut[i].blue * (w - j) + lut[i + 1].blue * j) / w;
+
+			r >>= 16 - hwbits;
+			g >>= 16 - hwbits;
+			b >>= 16 - hwbits;
+
+			table[first + j] = (r << (hwbits * 2)) |
+				(g << hwbits) | b;
+		}
+	}
+
+	dispc_vp_write_gamma_table(dispc, hw_videoport);
+}
+
+static s16 dispc_S31_32_to_s2_8(s64 coef)
+{
+	u64 sign_bit = 1ULL << 63;
+	u64 cbits = (u64)coef;
+	s16 ret;
+
+	if (cbits & sign_bit)
+		ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x200);
+	else
+		ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF);
+
+	return ret;
+}
+
+static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
+				   struct dispc_csc_coef *cpr)
+{
+	memset(cpr, 0, sizeof(*cpr));
+
+	cpr->to_regval = dispc_csc_cpr_regval;
+	cpr->m[CSC_RR] = dispc_S31_32_to_s2_8(ctm->matrix[0]);
+	cpr->m[CSC_RG] = dispc_S31_32_to_s2_8(ctm->matrix[1]);
+	cpr->m[CSC_RB] = dispc_S31_32_to_s2_8(ctm->matrix[2]);
+	cpr->m[CSC_GR] = dispc_S31_32_to_s2_8(ctm->matrix[3]);
+	cpr->m[CSC_GG] = dispc_S31_32_to_s2_8(ctm->matrix[4]);
+	cpr->m[CSC_GB] = dispc_S31_32_to_s2_8(ctm->matrix[5]);
+	cpr->m[CSC_BR] = dispc_S31_32_to_s2_8(ctm->matrix[6]);
+	cpr->m[CSC_BG] = dispc_S31_32_to_s2_8(ctm->matrix[7]);
+	cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
+}
+
+#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) |	\
+			  FLD_VAL(xB, 31, 22))
+
+static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
+					u32 *regval)
+{
+	regval[0] = CVAL(csc->m[CSC_BB], csc->m[CSC_BG], csc->m[CSC_BR]);
+	regval[1] = CVAL(csc->m[CSC_GB], csc->m[CSC_GG], csc->m[CSC_GR]);
+	regval[2] = CVAL(csc->m[CSC_RB], csc->m[CSC_RG], csc->m[CSC_RR]);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+				   const struct dispc_csc_coef *csc)
+{
+	static const u16 dispc_vp_cpr_coef_reg[] = {
+		DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+		/* K2G CPR is packed to three registers. */
+	};
+	u32 regval[DISPC_CSC_REGVAL_LEN];
+	unsigned int i;
+
+	dispc_k2g_vp_csc_cpr_regval(csc, regval);
+
+	for (i = 0; i < ARRAY_SIZE(dispc_vp_cpr_coef_reg); i++)
+		dispc_vp_write(dispc, hw_videoport, dispc_vp_cpr_coef_reg[i],
+			       regval[i]);
+}
+
+static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+				 struct drm_color_ctm *ctm)
+{
+	u32 cprenable = 0;
+
+	if (ctm) {
+		struct dispc_csc_coef cpr;
+
+		dispc_k2g_cpr_from_ctm(ctm, &cpr);
+		dispc_k2g_vp_write_csc(dispc, hw_videoport, &cpr);
+		cprenable = 1;
+	}
+
+	VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+		       cprenable, 15, 15);
+}
+
+static s16 dispc_S31_32_to_s3_8(s64 coef)
+{
+	u64 sign_bit = 1ULL << 63;
+	u64 cbits = (u64)coef;
+	s16 ret;
+
+	if (cbits & sign_bit)
+		ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x400);
+	else
+		ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x3FF);
+
+	return ret;
+}
+
+static void dispc_csc_from_ctm(const struct drm_color_ctm *ctm,
+			       struct dispc_csc_coef *cpr)
+{
+	memset(cpr, 0, sizeof(*cpr));
+
+	cpr->to_regval = dispc_csc_cpr_regval;
+	cpr->m[CSC_RR] = dispc_S31_32_to_s3_8(ctm->matrix[0]);
+	cpr->m[CSC_RG] = dispc_S31_32_to_s3_8(ctm->matrix[1]);
+	cpr->m[CSC_RB] = dispc_S31_32_to_s3_8(ctm->matrix[2]);
+	cpr->m[CSC_GR] = dispc_S31_32_to_s3_8(ctm->matrix[3]);
+	cpr->m[CSC_GG] = dispc_S31_32_to_s3_8(ctm->matrix[4]);
+	cpr->m[CSC_GB] = dispc_S31_32_to_s3_8(ctm->matrix[5]);
+	cpr->m[CSC_BR] = dispc_S31_32_to_s3_8(ctm->matrix[6]);
+	cpr->m[CSC_BG] = dispc_S31_32_to_s3_8(ctm->matrix[7]);
+	cpr->m[CSC_BB] = dispc_S31_32_to_s3_8(ctm->matrix[8]);
+}
+
+static void dispc_k3_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+				  const struct dispc_csc_coef *csc)
+{
+	static const u16 dispc_vp_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+		DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+		DISPC_VP_CSC_COEF3, DISPC_VP_CSC_COEF4, DISPC_VP_CSC_COEF5,
+		DISPC_VP_CSC_COEF6, DISPC_VP_CSC_COEF7,
+	};
+	u32 regval[DISPC_CSC_REGVAL_LEN];
+	unsigned int i;
+
+	csc->to_regval(csc, regval);
+
+	for (i = 0; i < ARRAY_SIZE(regval); i++)
+		dispc_vp_write(dispc, hw_videoport, dispc_vp_csc_coef_reg[i],
+			       regval[i]);
+}
+
+static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+				struct drm_color_ctm *ctm)
+{
+	u32 colorconvenable = 0;
+
+	if (ctm) {
+		struct dispc_csc_coef csc;
+
+		dispc_csc_from_ctm(ctm, &csc);
+		dispc_k3_vp_write_csc(dispc, hw_videoport, &csc);
+		colorconvenable = 1;
+	}
+
+	VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+		       colorconvenable, 24, 24);
+}
+
+static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
+				    u32 hw_videoport,
+				    const struct drm_crtc_state *state,
+				    bool newmodeset)
+{
+	struct drm_color_lut *lut = NULL;
+	struct drm_color_ctm *ctm = NULL;
+	unsigned int length = 0;
+
+	if (!(state->color_mgmt_changed || newmodeset))
+		return;
+
+	if (state->gamma_lut) {
+		lut = (struct drm_color_lut *)state->gamma_lut->data;
+		length = state->gamma_lut->length / sizeof(*lut);
+	}
+
+	dispc_vp_set_gamma(dispc, hw_videoport, lut, length);
+
+	if (state->ctm)
+		ctm = (struct drm_color_ctm *)state->ctm->data;
+
+	if (dispc->feat->subrev == DISPC_K2G)
+		dispc_k2g_vp_set_ctm(dispc, hw_videoport, ctm);
+	else
+		dispc_k3_vp_set_ctm(dispc, hw_videoport, ctm);
+}
+
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+		    const struct drm_crtc_state *state, bool newmodeset)
+{
+	dispc_vp_set_default_color(dispc, hw_videoport, 0);
+	dispc_vp_set_color_mgmt(dispc, hw_videoport, state, newmodeset);
+}
+
+int dispc_runtime_suspend(struct dispc_device *dispc)
+{
+	dev_dbg(dispc->dev, "suspend\n");
+
+	dispc->is_enabled = false;
+
+	clk_disable_unprepare(dispc->fclk);
+
+	return 0;
+}
+
+int dispc_runtime_resume(struct dispc_device *dispc)
+{
+	dev_dbg(dispc->dev, "resume\n");
+
+	clk_prepare_enable(dispc->fclk);
+
+	if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+		dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
+
+	dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
+		dispc_read(dispc, DSS_REVISION));
+
+	dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
+		REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
+		REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
+		REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+
+	if (dispc->feat->subrev == DISPC_AM65X)
+		dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
+			REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
+			REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
+			REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+
+	dev_dbg(dispc->dev, "DISPC IDLE %d\n",
+		REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+
+	dispc_initial_config(dispc);
+
+	dispc->is_enabled = true;
+
+	tidss_irq_resume(dispc->tidss);
+
+	return 0;
+}
+
+void dispc_remove(struct tidss_device *tidss)
+{
+	dev_dbg(tidss->dev, "%s\n", __func__);
+
+	tidss->dispc = NULL;
+}
+
+static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
+				void __iomem **base)
+{
+	struct resource *res;
+	void __iomem *b;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name);
+		return -EINVAL;
+	}
+
+	b = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(b)) {
+		dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name);
+		return PTR_ERR(b);
+	}
+
+	*base = b;
+
+	return 0;
+}
+
+static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
+					 struct dispc_device *dispc)
+{
+	dispc->oldi_io_ctrl =
+		syscon_regmap_lookup_by_phandle(dev->of_node,
+						"ti,am65x-oldi-io-ctrl");
+	if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
+		dispc->oldi_io_ctrl = NULL;
+	} else if (IS_ERR(dispc->oldi_io_ctrl)) {
+		dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
+			__func__, PTR_ERR(dispc->oldi_io_ctrl));
+		return PTR_ERR(dispc->oldi_io_ctrl);
+	}
+	return 0;
+}
+
+int dispc_init(struct tidss_device *tidss)
+{
+	struct device *dev = tidss->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dispc_device *dispc;
+	const struct dispc_features *feat;
+	unsigned int i, num_fourccs;
+	int r = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	feat = tidss->feat;
+
+	if (feat->subrev != DISPC_K2G) {
+		r = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+		if (r)
+			dev_warn(dev, "cannot set DMA masks to 48-bit\n");
+	}
+
+	dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
+	if (!dispc)
+		return -ENOMEM;
+
+	dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats),
+				      sizeof(*dispc->fourccs), GFP_KERNEL);
+	if (!dispc->fourccs)
+		return -ENOMEM;
+
+	num_fourccs = 0;
+	for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+		if (feat->errata.i2000 &&
+		    dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
+			continue;
+		dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
+	}
+	dispc->num_fourccs = num_fourccs;
+	dispc->tidss = tidss;
+	dispc->dev = dev;
+	dispc->feat = feat;
+
+	dispc_common_regmap = dispc->feat->common_regs;
+
+	r = dispc_iomap_resource(pdev, dispc->feat->common,
+				 &dispc->base_common);
+	if (r)
+		return r;
+
+	for (i = 0; i < dispc->feat->num_planes; i++) {
+		r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+					 &dispc->base_vid[i]);
+		if (r)
+			return r;
+	}
+
+	for (i = 0; i < dispc->feat->num_vps; i++) {
+		u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+		u32 *gamma_table;
+		struct clk *clk;
+
+		r = dispc_iomap_resource(pdev, dispc->feat->ovr_name[i],
+					 &dispc->base_ovr[i]);
+		if (r)
+			return r;
+
+		r = dispc_iomap_resource(pdev, dispc->feat->vp_name[i],
+					 &dispc->base_vp[i]);
+		if (r)
+			return r;
+
+		clk = devm_clk_get(dev, dispc->feat->vpclk_name[i]);
+		if (IS_ERR(clk)) {
+			dev_err(dev, "%s: Failed to get clk %s:%ld\n", __func__,
+				dispc->feat->vpclk_name[i], PTR_ERR(clk));
+			return PTR_ERR(clk);
+		}
+		dispc->vp_clk[i] = clk;
+
+		gamma_table = devm_kmalloc_array(dev, gamma_size,
+						 sizeof(*gamma_table),
+						 GFP_KERNEL);
+		if (!gamma_table)
+			return -ENOMEM;
+		dispc->vp_data[i].gamma_table = gamma_table;
+	}
+
+	if (feat->subrev == DISPC_AM65X) {
+		r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
+		if (r)
+			return r;
+	}
+
+	dispc->fclk = devm_clk_get(dev, "fck");
+	if (IS_ERR(dispc->fclk)) {
+		dev_err(dev, "%s: Failed to get fclk: %ld\n",
+			__func__, PTR_ERR(dispc->fclk));
+		return PTR_ERR(dispc->fclk);
+	}
+	dev_dbg(dev, "DSS fclk %lu Hz\n", clk_get_rate(dispc->fclk));
+
+	of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+			     &dispc->memory_bandwidth_limit);
+
+	tidss->dispc = dispc;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
new file mode 100644
index 0000000..a4a6824
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_H__
+#define __TIDSS_DISPC_H__
+
+#include "tidss_drv.h"
+
+struct dispc_device;
+
+struct drm_crtc_state;
+
+enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
+
+struct tidss_vp_feat {
+	struct tidss_vp_color_feat {
+		u32 gamma_size;
+		enum tidss_gamma_type gamma_type;
+		bool has_ctm;
+	} color;
+};
+
+struct tidss_plane_feat {
+	struct tidss_plane_color_feat {
+		u32 encodings;
+		u32 ranges;
+		enum drm_color_encoding default_encoding;
+		enum drm_color_range default_range;
+	} color;
+	struct tidss_plane_blend_feat {
+		bool global_alpha;
+	} blend;
+};
+
+struct dispc_features_scaling {
+	u32 in_width_max_5tap_rgb;
+	u32 in_width_max_3tap_rgb;
+	u32 in_width_max_5tap_yuv;
+	u32 in_width_max_3tap_yuv;
+	u32 upscale_limit;
+	u32 downscale_limit_5tap;
+	u32 downscale_limit_3tap;
+	u32 xinc_max;
+};
+
+struct dispc_errata {
+	bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
+};
+
+enum dispc_vp_bus_type {
+	DISPC_VP_DPI,		/* DPI output */
+	DISPC_VP_OLDI,		/* OLDI (LVDS) output */
+	DISPC_VP_INTERNAL,	/* SoC internal routing */
+	DISPC_VP_MAX_BUS_TYPE,
+};
+
+enum dispc_dss_subrevision {
+	DISPC_K2G,
+	DISPC_AM65X,
+	DISPC_J721E,
+};
+
+struct dispc_features {
+	int min_pclk_khz;
+	int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE];
+
+	struct dispc_features_scaling scaling;
+
+	enum dispc_dss_subrevision subrev;
+
+	const char *common;
+	const u16 *common_regs;
+	u32 num_vps;
+	const char *vp_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+	const char *ovr_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+	const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
+	const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
+	struct tidss_vp_feat vp_feat;
+	u32 num_planes;
+	const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
+	bool vid_lite[TIDSS_MAX_PLANES];
+	u32 vid_order[TIDSS_MAX_PLANES];
+
+	struct dispc_errata errata;
+};
+
+extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am65x_feats;
+extern const struct dispc_features dispc_j721e_feats;
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+			 u32 hw_videoport, u32 x, u32 y, u32 layer);
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+			    u32 hw_videoport, u32 layer, bool enable);
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+		      const struct drm_crtc_state *state);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+		     const struct drm_crtc_state *state);
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport);
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+		       const struct drm_crtc_state *state);
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+					 u32 hw_videoport,
+					 const struct drm_display_mode *mode);
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+			  unsigned long rate);
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+		    const struct drm_crtc_state *state, bool newmodeset);
+
+int dispc_runtime_suspend(struct dispc_device *dispc);
+int dispc_runtime_resume(struct dispc_device *dispc);
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+		      const struct drm_plane_state *state,
+		      u32 hw_videoport);
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+		      const struct drm_plane_state *state,
+		      u32 hw_videoport);
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
+
+int dispc_init(struct tidss_device *tidss);
+void dispc_remove(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
new file mode 100644
index 0000000..88a83a4
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_REGS_H
+#define __TIDSS_DISPC_REGS_H
+
+enum dispc_common_regs {
+	NOT_APPLICABLE_OFF = 0,
+	DSS_REVISION_OFF,
+	DSS_SYSCONFIG_OFF,
+	DSS_SYSSTATUS_OFF,
+	DISPC_IRQ_EOI_OFF,
+	DISPC_IRQSTATUS_RAW_OFF,
+	DISPC_IRQSTATUS_OFF,
+	DISPC_IRQENABLE_SET_OFF,
+	DISPC_IRQENABLE_CLR_OFF,
+	DISPC_VID_IRQENABLE_OFF,
+	DISPC_VID_IRQSTATUS_OFF,
+	DISPC_VP_IRQENABLE_OFF,
+	DISPC_VP_IRQSTATUS_OFF,
+	WB_IRQENABLE_OFF,
+	WB_IRQSTATUS_OFF,
+	DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF,
+	DISPC_GLOBAL_OUTPUT_ENABLE_OFF,
+	DISPC_GLOBAL_BUFFER_OFF,
+	DSS_CBA_CFG_OFF,
+	DISPC_DBG_CONTROL_OFF,
+	DISPC_DBG_STATUS_OFF,
+	DISPC_CLKGATING_DISABLE_OFF,
+	DISPC_SECURE_DISABLE_OFF,
+	FBDC_REVISION_1_OFF,
+	FBDC_REVISION_2_OFF,
+	FBDC_REVISION_3_OFF,
+	FBDC_REVISION_4_OFF,
+	FBDC_REVISION_5_OFF,
+	FBDC_REVISION_6_OFF,
+	FBDC_COMMON_CONTROL_OFF,
+	FBDC_CONSTANT_COLOR_0_OFF,
+	FBDC_CONSTANT_COLOR_1_OFF,
+	DISPC_CONNECTIONS_OFF,
+	DISPC_MSS_VP1_OFF,
+	DISPC_MSS_VP3_OFF,
+	DISPC_COMMON_REG_TABLE_LEN,
+};
+
+/*
+ * dispc_common_regmap should be defined as const u16 * and pointing
+ * to a valid dss common register map for the platform, before the
+ * macros bellow can be used.
+ */
+
+#define REG(r) (dispc_common_regmap[r ## _OFF])
+
+#define DSS_REVISION			REG(DSS_REVISION)
+#define DSS_SYSCONFIG			REG(DSS_SYSCONFIG)
+#define DSS_SYSSTATUS			REG(DSS_SYSSTATUS)
+#define DISPC_IRQ_EOI			REG(DISPC_IRQ_EOI)
+#define DISPC_IRQSTATUS_RAW		REG(DISPC_IRQSTATUS_RAW)
+#define DISPC_IRQSTATUS			REG(DISPC_IRQSTATUS)
+#define DISPC_IRQENABLE_SET		REG(DISPC_IRQENABLE_SET)
+#define DISPC_IRQENABLE_CLR		REG(DISPC_IRQENABLE_CLR)
+#define DISPC_VID_IRQENABLE(n)		(REG(DISPC_VID_IRQENABLE) + (n) * 4)
+#define DISPC_VID_IRQSTATUS(n)		(REG(DISPC_VID_IRQSTATUS) + (n) * 4)
+#define DISPC_VP_IRQENABLE(n)		(REG(DISPC_VP_IRQENABLE) + (n) * 4)
+#define DISPC_VP_IRQSTATUS(n)		(REG(DISPC_VP_IRQSTATUS) + (n) * 4)
+#define WB_IRQENABLE			REG(WB_IRQENABLE)
+#define WB_IRQSTATUS			REG(WB_IRQSTATUS)
+
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE	REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_OUTPUT_ENABLE	REG(DISPC_GLOBAL_OUTPUT_ENABLE)
+#define DISPC_GLOBAL_BUFFER		REG(DISPC_GLOBAL_BUFFER)
+#define DSS_CBA_CFG			REG(DSS_CBA_CFG)
+#define DISPC_DBG_CONTROL		REG(DISPC_DBG_CONTROL)
+#define DISPC_DBG_STATUS		REG(DISPC_DBG_STATUS)
+#define DISPC_CLKGATING_DISABLE		REG(DISPC_CLKGATING_DISABLE)
+#define DISPC_SECURE_DISABLE		REG(DISPC_SECURE_DISABLE)
+
+#define FBDC_REVISION_1			REG(FBDC_REVISION_1)
+#define FBDC_REVISION_2			REG(FBDC_REVISION_2)
+#define FBDC_REVISION_3			REG(FBDC_REVISION_3)
+#define FBDC_REVISION_4			REG(FBDC_REVISION_4)
+#define FBDC_REVISION_5			REG(FBDC_REVISION_5)
+#define FBDC_REVISION_6			REG(FBDC_REVISION_6)
+#define FBDC_COMMON_CONTROL		REG(FBDC_COMMON_CONTROL)
+#define FBDC_CONSTANT_COLOR_0		REG(FBDC_CONSTANT_COLOR_0)
+#define FBDC_CONSTANT_COLOR_1		REG(FBDC_CONSTANT_COLOR_1)
+#define DISPC_CONNECTIONS		REG(DISPC_CONNECTIONS)
+#define DISPC_MSS_VP1			REG(DISPC_MSS_VP1)
+#define DISPC_MSS_VP3			REG(DISPC_MSS_VP3)
+
+/* VID */
+
+#define DISPC_VID_ACCUH_0		0x0
+#define DISPC_VID_ACCUH_1		0x4
+#define DISPC_VID_ACCUH2_0		0x8
+#define DISPC_VID_ACCUH2_1		0xc
+#define DISPC_VID_ACCUV_0		0x10
+#define DISPC_VID_ACCUV_1		0x14
+#define DISPC_VID_ACCUV2_0		0x18
+#define DISPC_VID_ACCUV2_1		0x1c
+#define DISPC_VID_ATTRIBUTES		0x20
+#define DISPC_VID_ATTRIBUTES2		0x24
+#define DISPC_VID_BA_0			0x28
+#define DISPC_VID_BA_1			0x2c
+#define DISPC_VID_BA_UV_0		0x30
+#define DISPC_VID_BA_UV_1		0x34
+#define DISPC_VID_BUF_SIZE_STATUS	0x38
+#define DISPC_VID_BUF_THRESHOLD		0x3c
+#define DISPC_VID_CSC_COEF(n)		(0x40 + (n) * 4)
+
+#define DISPC_VID_FIRH			0x5c
+#define DISPC_VID_FIRH2			0x60
+#define DISPC_VID_FIRV			0x64
+#define DISPC_VID_FIRV2			0x68
+
+#define DISPC_VID_FIR_COEFS_H0		0x6c
+#define DISPC_VID_FIR_COEF_H0(phase)	(0x6c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H0_C	0x90
+#define DISPC_VID_FIR_COEF_H0_C(phase)	(0x90 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_H12		0xb4
+#define DISPC_VID_FIR_COEF_H12(phase)	(0xb4 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H12_C	0xf4
+#define DISPC_VID_FIR_COEF_H12_C(phase)	(0xf4 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V0		0x134
+#define DISPC_VID_FIR_COEF_V0(phase)	(0x134 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V0_C	0x158
+#define DISPC_VID_FIR_COEF_V0_C(phase)	(0x158 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V12		0x17c
+#define DISPC_VID_FIR_COEF_V12(phase)	(0x17c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V12_C	0x1bc
+#define DISPC_VID_FIR_COEF_V12_C(phase)	(0x1bc + (phase) * 4)
+
+#define DISPC_VID_GLOBAL_ALPHA		0x1fc
+#define DISPC_VID_K2G_IRQENABLE		0x200 /* K2G */
+#define DISPC_VID_K2G_IRQSTATUS		0x204 /* K2G */
+#define DISPC_VID_MFLAG_THRESHOLD	0x208
+#define DISPC_VID_PICTURE_SIZE		0x20c
+#define DISPC_VID_PIXEL_INC		0x210
+#define DISPC_VID_K2G_POSITION		0x214 /* K2G */
+#define DISPC_VID_PRELOAD		0x218
+#define DISPC_VID_ROW_INC		0x21c
+#define DISPC_VID_SIZE			0x220
+#define DISPC_VID_BA_EXT_0		0x22c
+#define DISPC_VID_BA_EXT_1		0x230
+#define DISPC_VID_BA_UV_EXT_0		0x234
+#define DISPC_VID_BA_UV_EXT_1		0x238
+#define DISPC_VID_CSC_COEF7		0x23c
+#define DISPC_VID_ROW_INC_UV		0x248
+#define DISPC_VID_CLUT			0x260
+#define DISPC_VID_SAFETY_ATTRIBUTES	0x2a0
+#define DISPC_VID_SAFETY_CAPT_SIGNATURE	0x2a4
+#define DISPC_VID_SAFETY_POSITION	0x2a8
+#define DISPC_VID_SAFETY_REF_SIGNATURE	0x2ac
+#define DISPC_VID_SAFETY_SIZE		0x2b0
+#define DISPC_VID_SAFETY_LFSR_SEED	0x2b4
+#define DISPC_VID_LUMAKEY		0x2b8
+#define DISPC_VID_DMA_BUFSIZE		0x2bc /* J721E */
+
+/* OVR */
+
+#define DISPC_OVR_CONFIG		0x0
+#define DISPC_OVR_VIRTVP		0x4 /* J721E */
+#define DISPC_OVR_DEFAULT_COLOR		0x8
+#define DISPC_OVR_DEFAULT_COLOR2	0xc
+#define DISPC_OVR_TRANS_COLOR_MAX	0x10
+#define DISPC_OVR_TRANS_COLOR_MAX2	0x14
+#define DISPC_OVR_TRANS_COLOR_MIN	0x18
+#define DISPC_OVR_TRANS_COLOR_MIN2	0x1c
+#define DISPC_OVR_ATTRIBUTES(n)		(0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES2(n)	(0x34 + (n) * 4) /* J721E */
+/* VP */
+
+#define DISPC_VP_CONFIG				0x0
+#define DISPC_VP_CONTROL			0x4
+#define DISPC_VP_CSC_COEF0			0x8
+#define DISPC_VP_CSC_COEF1			0xc
+#define DISPC_VP_CSC_COEF2			0x10
+#define DISPC_VP_DATA_CYCLE_0			0x14
+#define DISPC_VP_DATA_CYCLE_1			0x18
+#define DISPC_VP_K2G_GAMMA_TABLE		0x20 /* K2G */
+#define DISPC_VP_K2G_IRQENABLE			0x3c /* K2G */
+#define DISPC_VP_K2G_IRQSTATUS			0x40 /* K2G */
+#define DISPC_VP_DATA_CYCLE_2			0x1c
+#define DISPC_VP_LINE_NUMBER			0x44
+#define DISPC_VP_POL_FREQ			0x4c
+#define DISPC_VP_SIZE_SCREEN			0x50
+#define DISPC_VP_TIMING_H			0x54
+#define DISPC_VP_TIMING_V			0x58
+#define DISPC_VP_CSC_COEF3			0x5c
+#define DISPC_VP_CSC_COEF4			0x60
+#define DISPC_VP_CSC_COEF5			0x64
+#define DISPC_VP_CSC_COEF6			0x68
+#define DISPC_VP_CSC_COEF7			0x6c
+#define DISPC_VP_SAFETY_ATTRIBUTES_0		0x70
+#define DISPC_VP_SAFETY_ATTRIBUTES_1		0x74
+#define DISPC_VP_SAFETY_ATTRIBUTES_2		0x78
+#define DISPC_VP_SAFETY_ATTRIBUTES_3		0x7c
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_0	0x90
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_1	0x94
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_2	0x98
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_3	0x9c
+#define DISPC_VP_SAFETY_POSITION_0		0xb0
+#define DISPC_VP_SAFETY_POSITION_1		0xb4
+#define DISPC_VP_SAFETY_POSITION_2		0xb8
+#define DISPC_VP_SAFETY_POSITION_3		0xbc
+#define DISPC_VP_SAFETY_REF_SIGNATURE_0		0xd0
+#define DISPC_VP_SAFETY_REF_SIGNATURE_1		0xd4
+#define DISPC_VP_SAFETY_REF_SIGNATURE_2		0xd8
+#define DISPC_VP_SAFETY_REF_SIGNATURE_3		0xdc
+#define DISPC_VP_SAFETY_SIZE_0			0xf0
+#define DISPC_VP_SAFETY_SIZE_1			0xf4
+#define DISPC_VP_SAFETY_SIZE_2			0xf8
+#define DISPC_VP_SAFETY_SIZE_3			0xfc
+#define DISPC_VP_SAFETY_LFSR_SEED		0x110
+#define DISPC_VP_GAMMA_TABLE			0x120
+#define DISPC_VP_DSS_OLDI_CFG			0x160
+#define DISPC_VP_DSS_OLDI_STATUS		0x164
+#define DISPC_VP_DSS_OLDI_LB			0x168
+#define DISPC_VP_DSS_MERGE_SPLIT		0x16c /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE		0x170 /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS	0x174 /* J721E */
+
+/*
+ * OLDI IO_CTRL register offsets. On AM654 the registers are found
+ * from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
+ * CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
+ * register range.
+ */
+#define OLDI_DAT0_IO_CTRL			0x00
+#define OLDI_DAT1_IO_CTRL			0x04
+#define OLDI_DAT2_IO_CTRL			0x08
+#define OLDI_DAT3_IO_CTRL			0x0C
+#define OLDI_CLK_IO_CTRL			0x10
+
+#define OLDI_PWRDN_TX				BIT(8)
+
+#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
new file mode 100644
index 0000000..d95e4be
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/console.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_kms.h"
+#include "tidss_irq.h"
+
+/* Power management */
+
+int tidss_runtime_get(struct tidss_device *tidss)
+{
+	int r;
+
+	dev_dbg(tidss->dev, "%s\n", __func__);
+
+	r = pm_runtime_get_sync(tidss->dev);
+	WARN_ON(r < 0);
+	return r < 0 ? r : 0;
+}
+
+void tidss_runtime_put(struct tidss_device *tidss)
+{
+	int r;
+
+	dev_dbg(tidss->dev, "%s\n", __func__);
+
+	r = pm_runtime_put_sync(tidss->dev);
+	WARN_ON(r < 0);
+}
+
+static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
+{
+	struct tidss_device *tidss = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	return dispc_runtime_suspend(tidss->dispc);
+}
+
+static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
+{
+	struct tidss_device *tidss = dev_get_drvdata(dev);
+	int r;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	r = dispc_runtime_resume(tidss->dispc);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static int __maybe_unused tidss_suspend(struct device *dev)
+{
+	struct tidss_device *tidss = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	return drm_mode_config_helper_suspend(&tidss->ddev);
+}
+
+static int __maybe_unused tidss_resume(struct device *dev)
+{
+	struct tidss_device *tidss = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	return drm_mode_config_helper_resume(&tidss->ddev);
+}
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops tidss_pm_ops = {
+	.runtime_suspend = tidss_pm_runtime_suspend,
+	.runtime_resume = tidss_pm_runtime_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
+};
+
+#endif /* CONFIG_PM */
+
+/* DRM device Information */
+
+static void tidss_release(struct drm_device *ddev)
+{
+	struct tidss_device *tidss = ddev->dev_private;
+
+	drm_kms_helper_poll_fini(ddev);
+
+	tidss_modeset_cleanup(tidss);
+
+	drm_dev_fini(ddev);
+
+	kfree(tidss);
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+
+static struct drm_driver tidss_driver = {
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+	.fops			= &tidss_fops,
+	.release		= tidss_release,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
+	.name			= "tidss",
+	.desc			= "TI Keystone DSS",
+	.date			= "20180215",
+	.major			= 1,
+	.minor			= 0,
+
+	.irq_preinstall		= tidss_irq_preinstall,
+	.irq_postinstall	= tidss_irq_postinstall,
+	.irq_handler		= tidss_irq_handler,
+	.irq_uninstall		= tidss_irq_uninstall,
+};
+
+static int tidss_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tidss_device *tidss;
+	struct drm_device *ddev;
+	int ret;
+	int irq;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	/* Can't use devm_* since drm_device's lifetime may exceed dev's */
+	tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
+	if (!tidss)
+		return -ENOMEM;
+
+	ddev = &tidss->ddev;
+
+	ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
+	if (ret) {
+		kfree(ddev);
+		return ret;
+	}
+
+	tidss->dev = dev;
+	tidss->feat = of_device_get_match_data(dev);
+
+	platform_set_drvdata(pdev, tidss);
+
+	ddev->dev_private = tidss;
+
+	ret = dispc_init(tidss);
+	if (ret) {
+		dev_err(dev, "failed to initialize dispc: %d\n", ret);
+		return ret;
+	}
+
+	pm_runtime_enable(dev);
+
+#ifndef CONFIG_PM
+	/* If we don't have PM, we need to call resume manually */
+	dispc_runtime_resume(tidss->dispc);
+#endif
+
+	ret = tidss_modeset_init(tidss);
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to init DRM/KMS (%d)\n", ret);
+		goto err_runtime_suspend;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		goto err_runtime_suspend;
+	}
+
+	ret = drm_irq_install(ddev, irq);
+	if (ret) {
+		dev_err(dev, "drm_irq_install failed: %d\n", ret);
+		goto err_runtime_suspend;
+	}
+
+	drm_kms_helper_poll_init(ddev);
+
+	drm_mode_config_reset(ddev);
+
+	ret = drm_dev_register(ddev, 0);
+	if (ret) {
+		dev_err(dev, "failed to register DRM device\n");
+		goto err_irq_uninstall;
+	}
+
+	drm_fbdev_generic_setup(ddev, 32);
+
+	dev_dbg(dev, "%s done\n", __func__);
+
+	return 0;
+
+err_irq_uninstall:
+	drm_irq_uninstall(ddev);
+
+err_runtime_suspend:
+#ifndef CONFIG_PM
+	dispc_runtime_suspend(tidss->dispc);
+#endif
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+
+static int tidss_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tidss_device *tidss = platform_get_drvdata(pdev);
+	struct drm_device *ddev = &tidss->ddev;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	drm_dev_unregister(ddev);
+
+	drm_atomic_helper_shutdown(ddev);
+
+	drm_irq_uninstall(ddev);
+
+#ifndef CONFIG_PM
+	/* If we don't have PM, we need to call suspend manually */
+	dispc_runtime_suspend(tidss->dispc);
+#endif
+	pm_runtime_disable(dev);
+
+	/* devm allocated dispc goes away with the dev so mark it NULL */
+	dispc_remove(tidss);
+
+	dev_dbg(dev, "%s done\n", __func__);
+
+	return 0;
+}
+
+static void tidss_shutdown(struct platform_device *pdev)
+{
+	drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static const struct of_device_id tidss_of_table[] = {
+	{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+	{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
+	{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, tidss_of_table);
+
+static struct platform_driver tidss_platform_driver = {
+	.probe		= tidss_probe,
+	.remove		= tidss_remove,
+	.shutdown	= tidss_shutdown,
+	.driver		= {
+		.name	= "tidss",
+#ifdef CONFIG_PM
+		.pm	= &tidss_pm_ops,
+#endif
+		.of_match_table = tidss_of_table,
+		.suppress_bind_attrs = true,
+	},
+};
+
+module_platform_driver(tidss_platform_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("TI Keystone DSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
new file mode 100644
index 0000000..e2aa643
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_DRV_H__
+#define __TIDSS_DRV_H__
+
+#include <linux/spinlock.h>
+
+#define TIDSS_MAX_PORTS 4
+#define TIDSS_MAX_PLANES 4
+
+typedef u32 dispc_irq_t;
+
+struct tidss_device {
+	struct drm_device ddev;		/* DRM device for DSS */
+	struct device *dev;		/* Underlying DSS device */
+
+	const struct dispc_features *feat;
+	struct dispc_device *dispc;
+
+	unsigned int num_crtcs;
+	struct drm_crtc *crtcs[TIDSS_MAX_PORTS];
+
+	unsigned int num_planes;
+	struct drm_plane *planes[TIDSS_MAX_PLANES];
+
+	spinlock_t wait_lock;	/* protects the irq masks */
+	dispc_irq_t irq_mask;	/* enabled irqs in addition to wait_list */
+
+	struct drm_atomic_state *saved_state;
+};
+
+int tidss_runtime_get(struct tidss_device *tidss);
+void tidss_runtime_put(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
new file mode 100644
index 0000000..83785b0
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <linux/export.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include "tidss_crtc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+
+static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
+				      struct drm_crtc_state *crtc_state,
+				      struct drm_connector_state *conn_state)
+{
+	struct drm_device *ddev = encoder->dev;
+	struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
+	struct drm_display_info *di = &conn_state->connector->display_info;
+	struct drm_bridge *bridge;
+	bool bus_flags_set = false;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	/*
+	 * Take the bus_flags from the first bridge that defines
+	 * bridge timings, or from the connector's display_info if no
+	 * bridge defines the timings.
+	 */
+	drm_for_each_bridge_in_chain(encoder, bridge) {
+		if (!bridge->timings)
+			continue;
+
+		tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
+		bus_flags_set = true;
+		break;
+	}
+
+	if (!di->bus_formats || di->num_bus_formats == 0)  {
+		dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	// XXX any cleaner way to set bus format and flags?
+	tcrtc_state->bus_format = di->bus_formats[0];
+	if (!bus_flags_set)
+		tcrtc_state->bus_flags = di->bus_flags;
+
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.atomic_check = tidss_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+					 u32 encoder_type, u32 possible_crtcs)
+{
+	struct drm_encoder *enc;
+	int ret;
+
+	enc = devm_kzalloc(tidss->dev, sizeof(*enc), GFP_KERNEL);
+	if (!enc)
+		return ERR_PTR(-ENOMEM);
+
+	enc->possible_crtcs = possible_crtcs;
+
+	ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
+			       encoder_type, NULL);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	drm_encoder_helper_add(enc, &encoder_helper_funcs);
+
+	dev_dbg(tidss->dev, "Encoder create done\n");
+
+	return enc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.h b/drivers/gpu/drm/tidss/tidss_encoder.h
new file mode 100644
index 0000000..06854d6
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_ENCODER_H__
+#define __TIDSS_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+
+struct tidss_device;
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+					 u32 encoder_type, u32 possible_crtcs);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
new file mode 100644
index 0000000..612c0467
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_print.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* call with wait_lock and dispc runtime held */
+static void tidss_irq_update(struct tidss_device *tidss)
+{
+	assert_spin_locked(&tidss->wait_lock);
+
+	dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
+}
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc)
+{
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	u32 hw_videoport = tcrtc->hw_videoport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tidss->wait_lock, flags);
+	tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+			   DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+	tidss_irq_update(tidss);
+	spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_disable_vblank(struct drm_crtc *crtc)
+{
+	struct drm_device *ddev = crtc->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+	u32 hw_videoport = tcrtc->hw_videoport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tidss->wait_lock, flags);
+	tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+			     DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
+	tidss_irq_update(tidss);
+	spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+irqreturn_t tidss_irq_handler(int irq, void *arg)
+{
+	struct drm_device *ddev = (struct drm_device *)arg;
+	struct tidss_device *tidss = ddev->dev_private;
+	unsigned int id;
+	dispc_irq_t irqstatus;
+
+	if (WARN_ON(!ddev->irq_enabled))
+		return IRQ_NONE;
+
+	irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+
+	for (id = 0; id < tidss->num_crtcs; id++) {
+		struct drm_crtc *crtc = tidss->crtcs[id];
+		struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+		u32 hw_videoport = tcrtc->hw_videoport;
+
+		if (irqstatus & (DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+				 DSS_IRQ_VP_VSYNC_ODD(hw_videoport)))
+			tidss_crtc_vblank_irq(crtc);
+
+		if (irqstatus & (DSS_IRQ_VP_FRAME_DONE(hw_videoport)))
+			tidss_crtc_framedone_irq(crtc);
+
+		if (irqstatus & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+			tidss_crtc_error_irq(crtc, irqstatus);
+	}
+
+	if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
+		dev_err_ratelimited(tidss->dev, "OCP error\n");
+
+	return IRQ_HANDLED;
+}
+
+void tidss_irq_resume(struct tidss_device *tidss)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tidss->wait_lock, flags);
+	tidss_irq_update(tidss);
+	spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_preinstall(struct drm_device *ddev)
+{
+	struct tidss_device *tidss = ddev->dev_private;
+
+	spin_lock_init(&tidss->wait_lock);
+
+	tidss_runtime_get(tidss);
+
+	dispc_set_irqenable(tidss->dispc, 0);
+	dispc_read_and_clear_irqstatus(tidss->dispc);
+
+	tidss_runtime_put(tidss);
+}
+
+int tidss_irq_postinstall(struct drm_device *ddev)
+{
+	struct tidss_device *tidss = ddev->dev_private;
+	unsigned long flags;
+	unsigned int i;
+
+	tidss_runtime_get(tidss);
+
+	spin_lock_irqsave(&tidss->wait_lock, flags);
+
+	tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+
+	for (i = 0; i < tidss->num_crtcs; ++i) {
+		struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
+
+		tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
+
+		tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
+	}
+
+	tidss_irq_update(tidss);
+
+	spin_unlock_irqrestore(&tidss->wait_lock, flags);
+
+	tidss_runtime_put(tidss);
+
+	return 0;
+}
+
+void tidss_irq_uninstall(struct drm_device *ddev)
+{
+	struct tidss_device *tidss = ddev->dev_private;
+
+	tidss_runtime_get(tidss);
+	dispc_set_irqenable(tidss->dispc, 0);
+	tidss_runtime_put(tidss);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
new file mode 100644
index 0000000..aa92db4
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_IRQ_H__
+#define __TIDSS_IRQ_H__
+
+#include <linux/types.h>
+
+#include "tidss_drv.h"
+
+/*
+ * The IRQ status from various DISPC IRQ registers are packed into a single
+ * value, where the bits are defined as follows:
+ *
+ * bit group |dev|wb |mrg0|mrg1|mrg2|mrg3|plane0-3| <unused> |
+ * bit use   |D  |fou|FEOL|FEOL|FEOL|FEOL|  UUUU  |          |
+ * bit number|0  |1-3|4-7 |8-11|  12-19  | 20-23  |  24-31   |
+ *
+ * device bits:	D = OCP error
+ * WB bits:	f = frame done wb, o = wb buffer overflow,
+ *		u = wb buffer uncomplete
+ * vp bits:	F = frame done, E = vsync even, O = vsync odd, L = sync lost
+ * plane bits:	U = fifo underflow
+ */
+
+#define DSS_IRQ_DEVICE_OCP_ERR			BIT(0)
+
+#define DSS_IRQ_DEVICE_FRAMEDONEWB		BIT(1)
+#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW		BIT(2)
+#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR	BIT(3)
+#define DSS_IRQ_DEVICE_WB_MASK			GENMASK(3, 1)
+
+#define DSS_IRQ_VP_BIT_N(ch, bit)	(4 + 4 * (ch) + (bit))
+#define DSS_IRQ_PLANE_BIT_N(plane, bit) \
+	(DSS_IRQ_VP_BIT_N(TIDSS_MAX_PORTS, 0) + 1 * (plane) + (bit))
+
+#define DSS_IRQ_VP_BIT(ch, bit)	BIT(DSS_IRQ_VP_BIT_N((ch), (bit)))
+#define DSS_IRQ_PLANE_BIT(plane, bit) \
+	BIT(DSS_IRQ_PLANE_BIT_N((plane), (bit)))
+
+static inline dispc_irq_t DSS_IRQ_VP_MASK(u32 ch)
+{
+	return GENMASK(DSS_IRQ_VP_BIT_N((ch), 3), DSS_IRQ_VP_BIT_N((ch), 0));
+}
+
+static inline dispc_irq_t DSS_IRQ_PLANE_MASK(u32 plane)
+{
+	return GENMASK(DSS_IRQ_PLANE_BIT_N((plane), 0),
+		       DSS_IRQ_PLANE_BIT_N((plane), 0));
+}
+
+#define DSS_IRQ_VP_FRAME_DONE(ch)	DSS_IRQ_VP_BIT((ch), 0)
+#define DSS_IRQ_VP_VSYNC_EVEN(ch)	DSS_IRQ_VP_BIT((ch), 1)
+#define DSS_IRQ_VP_VSYNC_ODD(ch)	DSS_IRQ_VP_BIT((ch), 2)
+#define DSS_IRQ_VP_SYNC_LOST(ch)	DSS_IRQ_VP_BIT((ch), 3)
+
+#define DSS_IRQ_PLANE_FIFO_UNDERFLOW(plane)	DSS_IRQ_PLANE_BIT((plane), 0)
+
+struct drm_crtc;
+struct drm_device;
+
+struct tidss_device;
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc);
+void tidss_irq_disable_vblank(struct drm_crtc *crtc);
+
+void tidss_irq_preinstall(struct drm_device *ddev);
+int tidss_irq_postinstall(struct drm_device *ddev);
+void tidss_irq_uninstall(struct drm_device *ddev);
+irqreturn_t tidss_irq_handler(int irq, void *arg);
+
+void tidss_irq_resume(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
new file mode 100644
index 0000000..7d41996
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+#include "tidss_kms.h"
+#include "tidss_plane.h"
+
+static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+	struct drm_device *ddev = old_state->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	tidss_runtime_get(tidss);
+
+	drm_atomic_helper_commit_modeset_disables(ddev, old_state);
+	drm_atomic_helper_commit_planes(ddev, old_state, 0);
+	drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+	drm_atomic_helper_commit_hw_done(old_state);
+	drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+	drm_atomic_helper_cleanup_planes(ddev, old_state);
+
+	tidss_runtime_put(tidss);
+}
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+	.atomic_commit_tail = tidss_atomic_commit_tail,
+};
+
+static int tidss_atomic_check(struct drm_device *ddev,
+			      struct drm_atomic_state *state)
+{
+	struct drm_plane_state *opstate;
+	struct drm_plane_state *npstate;
+	struct drm_plane *plane;
+	struct drm_crtc_state *cstate;
+	struct drm_crtc *crtc;
+	int ret, i;
+
+	ret = drm_atomic_helper_check(ddev, state);
+	if (ret)
+		return ret;
+
+	/*
+	 * Add all active planes on a CRTC to the atomic state, if
+	 * x/y/z position or activity of any plane on that CRTC
+	 * changes. This is needed for updating the plane positions in
+	 * tidss_crtc_position_planes() which is called from
+	 * crtc_atomic_enable() and crtc_atomic_flush(). We have an
+	 * extra flag to to mark x,y-position changes and together
+	 * with zpos_changed the condition recognizes all the above
+	 * cases.
+	 */
+	for_each_oldnew_plane_in_state(state, plane, opstate, npstate, i) {
+		if (!npstate->crtc || !npstate->visible)
+			continue;
+
+		if (!opstate->crtc || opstate->crtc_x != npstate->crtc_x ||
+		    opstate->crtc_y != npstate->crtc_y) {
+			cstate = drm_atomic_get_crtc_state(state,
+							   npstate->crtc);
+			if (IS_ERR(cstate))
+				return PTR_ERR(cstate);
+			to_tidss_crtc_state(cstate)->plane_pos_changed = true;
+		}
+	}
+
+	for_each_new_crtc_in_state(state, crtc, cstate, i) {
+		if (to_tidss_crtc_state(cstate)->plane_pos_changed ||
+		    cstate->zpos_changed) {
+			ret = drm_atomic_add_affected_planes(state, crtc);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+	.fb_create = drm_gem_fb_create,
+	.atomic_check = tidss_atomic_check,
+	.atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tidss_dispc_modeset_init(struct tidss_device *tidss)
+{
+	struct device *dev = tidss->dev;
+	unsigned int fourccs_len;
+	const u32 *fourccs = dispc_plane_formats(tidss->dispc, &fourccs_len);
+	unsigned int i;
+
+	struct pipe {
+		u32 hw_videoport;
+		struct drm_bridge *bridge;
+		u32 enc_type;
+	};
+
+	const struct dispc_features *feat = tidss->feat;
+	u32 max_vps = feat->num_vps;
+	u32 max_planes = feat->num_planes;
+
+	struct pipe pipes[TIDSS_MAX_PORTS];
+	u32 num_pipes = 0;
+	u32 crtc_mask;
+
+	/* first find all the connected panels & bridges */
+
+	for (i = 0; i < max_vps; i++) {
+		struct drm_panel *panel;
+		struct drm_bridge *bridge;
+		u32 enc_type = DRM_MODE_ENCODER_NONE;
+		int ret;
+
+		ret = drm_of_find_panel_or_bridge(dev->of_node, i, 0,
+						  &panel, &bridge);
+		if (ret == -ENODEV) {
+			dev_dbg(dev, "no panel/bridge for port %d\n", i);
+			continue;
+		} else if (ret) {
+			dev_dbg(dev, "port %d probe returned %d\n", i, ret);
+			return ret;
+		}
+
+		if (panel) {
+			u32 conn_type;
+
+			dev_dbg(dev, "Setting up panel for port %d\n", i);
+
+			switch (feat->vp_bus_type[i]) {
+			case DISPC_VP_OLDI:
+				enc_type = DRM_MODE_ENCODER_LVDS;
+				conn_type = DRM_MODE_CONNECTOR_LVDS;
+				break;
+			case DISPC_VP_DPI:
+				enc_type = DRM_MODE_ENCODER_DPI;
+				conn_type = DRM_MODE_CONNECTOR_LVDS;
+				break;
+			default:
+				WARN_ON(1);
+				return -EINVAL;
+			}
+
+			if (panel->connector_type != conn_type) {
+				dev_err(dev,
+					"%s: Panel %s has incompatible connector type for vp%d (%d != %d)\n",
+					 __func__, dev_name(panel->dev), i,
+					 panel->connector_type, conn_type);
+				return -EINVAL;
+			}
+
+			bridge = devm_drm_panel_bridge_add(dev, panel);
+			if (IS_ERR(bridge)) {
+				dev_err(dev,
+					"failed to set up panel bridge for port %d\n",
+					i);
+				return PTR_ERR(bridge);
+			}
+		}
+
+		pipes[num_pipes].hw_videoport = i;
+		pipes[num_pipes].bridge = bridge;
+		pipes[num_pipes].enc_type = enc_type;
+		num_pipes++;
+	}
+
+	/* all planes can be on any crtc */
+	crtc_mask = (1 << num_pipes) - 1;
+
+	/* then create a plane, a crtc and an encoder for each panel/bridge */
+
+	for (i = 0; i < num_pipes; ++i) {
+		struct tidss_plane *tplane;
+		struct tidss_crtc *tcrtc;
+		struct drm_encoder *enc;
+		u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+		int ret;
+
+		tplane = tidss_plane_create(tidss, hw_plane_id,
+					    DRM_PLANE_TYPE_PRIMARY, crtc_mask,
+					    fourccs, fourccs_len);
+		if (IS_ERR(tplane)) {
+			dev_err(tidss->dev, "plane create failed\n");
+			return PTR_ERR(tplane);
+		}
+
+		tidss->planes[tidss->num_planes++] = &tplane->plane;
+
+		tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport,
+					  &tplane->plane);
+		if (IS_ERR(tcrtc)) {
+			dev_err(tidss->dev, "crtc create failed\n");
+			return PTR_ERR(tcrtc);
+		}
+
+		tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
+
+		enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+					   1 << tcrtc->crtc.index);
+		if (IS_ERR(enc)) {
+			dev_err(tidss->dev, "encoder create failed\n");
+			return PTR_ERR(enc);
+		}
+
+		ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
+		if (ret) {
+			dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+			return ret;
+		}
+	}
+
+	/* create overlay planes of the leftover planes */
+
+	while (tidss->num_planes < max_planes) {
+		struct tidss_plane *tplane;
+		u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+
+		tplane = tidss_plane_create(tidss, hw_plane_id,
+					    DRM_PLANE_TYPE_OVERLAY, crtc_mask,
+					    fourccs, fourccs_len);
+
+		if (IS_ERR(tplane)) {
+			dev_err(tidss->dev, "plane create failed\n");
+			return PTR_ERR(tplane);
+		}
+
+		tidss->planes[tidss->num_planes++] = &tplane->plane;
+	}
+
+	return 0;
+}
+
+int tidss_modeset_init(struct tidss_device *tidss)
+{
+	struct drm_device *ddev = &tidss->ddev;
+	unsigned int i;
+	int ret;
+
+	dev_dbg(tidss->dev, "%s\n", __func__);
+
+	drm_mode_config_init(ddev);
+
+	ddev->mode_config.min_width = 8;
+	ddev->mode_config.min_height = 8;
+	ddev->mode_config.max_width = 8096;
+	ddev->mode_config.max_height = 8096;
+	ddev->mode_config.normalize_zpos = true;
+	ddev->mode_config.funcs = &mode_config_funcs;
+	ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+	ret = tidss_dispc_modeset_init(tidss);
+	if (ret)
+		goto err_mode_config_cleanup;
+
+	ret = drm_vblank_init(ddev, tidss->num_crtcs);
+	if (ret)
+		goto err_mode_config_cleanup;
+
+	/* Start with vertical blanking interrupt reporting disabled. */
+	for (i = 0; i < tidss->num_crtcs; ++i)
+		drm_crtc_vblank_reset(tidss->crtcs[i]);
+
+	drm_mode_config_reset(ddev);
+
+	dev_dbg(tidss->dev, "%s done\n", __func__);
+
+	return 0;
+
+err_mode_config_cleanup:
+	drm_mode_config_cleanup(ddev);
+	return ret;
+}
+
+void tidss_modeset_cleanup(struct tidss_device *tidss)
+{
+	struct drm_device *ddev = &tidss->ddev;
+
+	drm_mode_config_cleanup(ddev);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
new file mode 100644
index 0000000..dda5625
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_KMS_H__
+#define __TIDSS_KMS_H__
+
+struct tidss_device;
+
+int tidss_modeset_init(struct tidss_device *tidss);
+void tidss_modeset_cleanup(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
new file mode 100644
index 0000000..ff99b2d
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_plane.h"
+
+/* drm_plane_helper_funcs */
+
+static int tidss_plane_atomic_check(struct drm_plane *plane,
+				    struct drm_plane_state *state)
+{
+	struct drm_device *ddev = plane->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct tidss_plane *tplane = to_tidss_plane(plane);
+	const struct drm_format_info *finfo;
+	struct drm_crtc_state *crtc_state;
+	u32 hw_plane = tplane->hw_plane_id;
+	u32 hw_videoport;
+	int ret;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	if (!state->crtc) {
+		/*
+		 * The visible field is not reset by the DRM core but only
+		 * updated by drm_plane_helper_check_state(), set it manually.
+		 */
+		state->visible = false;
+		return 0;
+	}
+
+	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+	if (IS_ERR(crtc_state))
+		return PTR_ERR(crtc_state);
+
+	ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+						  INT_MAX, true, true);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * The HW is only able to start drawing at subpixel boundary
+	 * (the two first checks bellow). At the end of a row the HW
+	 * can only jump integer number of subpixels forward to the
+	 * beginning of the next row. So we can only show picture with
+	 * integer subpixel width (the third check). However, after
+	 * reaching the end of the drawn picture the drawing starts
+	 * again at the absolute memory address where top left corner
+	 * position of the drawn picture is (so there is no need to
+	 * check for odd height).
+	 */
+
+	finfo = drm_format_info(state->fb->format->format);
+
+	if ((state->src_x >> 16) % finfo->hsub != 0) {
+		dev_dbg(ddev->dev,
+			"%s: x-position %u not divisible subpixel size %u\n",
+			__func__, (state->src_x >> 16), finfo->hsub);
+		return -EINVAL;
+	}
+
+	if ((state->src_y >> 16) % finfo->vsub != 0) {
+		dev_dbg(ddev->dev,
+			"%s: y-position %u not divisible subpixel size %u\n",
+			__func__, (state->src_y >> 16), finfo->vsub);
+		return -EINVAL;
+	}
+
+	if ((state->src_w >> 16) % finfo->hsub != 0) {
+		dev_dbg(ddev->dev,
+			"%s: src width %u not divisible by subpixel size %u\n",
+			 __func__, (state->src_w >> 16), finfo->hsub);
+		return -EINVAL;
+	}
+
+	if (!state->visible)
+		return 0;
+
+	hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+	ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void tidss_plane_atomic_update(struct drm_plane *plane,
+				      struct drm_plane_state *old_state)
+{
+	struct drm_device *ddev = plane->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct tidss_plane *tplane = to_tidss_plane(plane);
+	struct drm_plane_state *state = plane->state;
+	u32 hw_videoport;
+	int ret;
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	if (!state->visible) {
+		dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+		return;
+	}
+
+	hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+	ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
+				state, hw_videoport);
+
+	if (ret) {
+		dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
+			__func__, tplane->hw_plane_id);
+		dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+		return;
+	}
+
+	dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
+}
+
+static void tidss_plane_atomic_disable(struct drm_plane *plane,
+				       struct drm_plane_state *old_state)
+{
+	struct drm_device *ddev = plane->dev;
+	struct tidss_device *tidss = ddev->dev_private;
+	struct tidss_plane *tplane = to_tidss_plane(plane);
+
+	dev_dbg(ddev->dev, "%s\n", __func__);
+
+	dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+}
+
+static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+	.atomic_check = tidss_plane_atomic_check,
+	.atomic_update = tidss_plane_atomic_update,
+	.atomic_disable = tidss_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs tidss_plane_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.reset = drm_atomic_helper_plane_reset,
+	.destroy = drm_plane_cleanup,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+				       u32 hw_plane_id, u32 plane_type,
+				       u32 crtc_mask, const u32 *formats,
+				       u32 num_formats)
+{
+	struct tidss_plane *tplane;
+	enum drm_plane_type type;
+	u32 possible_crtcs;
+	u32 num_planes = tidss->feat->num_planes;
+	u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
+			       BIT(DRM_COLOR_YCBCR_BT709));
+	u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+			    BIT(DRM_COLOR_YCBCR_LIMITED_RANGE));
+	u32 default_encoding = DRM_COLOR_YCBCR_BT601;
+	u32 default_range = DRM_COLOR_YCBCR_FULL_RANGE;
+	u32 blend_modes = (BIT(DRM_MODE_BLEND_PREMULTI) |
+			   BIT(DRM_MODE_BLEND_COVERAGE));
+	int ret;
+
+	tplane = devm_kzalloc(tidss->dev, sizeof(*tplane), GFP_KERNEL);
+	if (!tplane)
+		return ERR_PTR(-ENOMEM);
+
+	tplane->hw_plane_id = hw_plane_id;
+
+	possible_crtcs = crtc_mask;
+	type = plane_type;
+
+	ret = drm_universal_plane_init(&tidss->ddev, &tplane->plane,
+				       possible_crtcs,
+				       &tidss_plane_funcs,
+				       formats, num_formats,
+				       NULL, type, NULL);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+
+	drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+				       num_planes - 1);
+
+	ret = drm_plane_create_color_properties(&tplane->plane,
+						color_encodings,
+						color_ranges,
+						default_encoding,
+						default_range);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = drm_plane_create_alpha_property(&tplane->plane);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return tplane;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
new file mode 100644
index 0000000..80ff1c5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ */
+
+#ifndef __TIDSS_PLANE_H__
+#define __TIDSS_PLANE_H__
+
+#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
+
+struct tidss_device;
+
+struct tidss_plane {
+	struct drm_plane plane;
+
+	u32 hw_plane_id;
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+				       u32 hw_plane_id, u32 plane_type,
+				       u32 crtc_mask, const u32 *formats,
+				       u32 num_formats);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.c b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
new file mode 100644
index 0000000..5ec6838
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "tidss_scale_coefs.h"
+
+/*
+ * These are interpolated with a custom python script from DSS5
+ * (drivers/gpu/drm/omapdrm/dss/dispc_coef.c) coefficients.
+ */
+static const struct tidss_scale_coefs coef5_m32 = {
+	.c2 = { 28, 34, 40, 46, 52, 58, 64, 70, 0, 2, 4, 8, 12, 16, 20, 24, },
+	.c1 = { 132, 138, 144, 150, 156, 162, 168, 174, 76, 84, 92, 98, 104, 110, 116, 124, },
+	.c0 = { 192, 192, 192, 190, 188, 186, 184, 182, 180, },
+};
+
+static const struct tidss_scale_coefs coef5_m26 = {
+	.c2 = { 24, 28, 32, 38, 44, 50, 56, 64, 0, 2, 4, 6, 8, 12, 16, 20, },
+	.c1 = { 132, 138, 144, 152, 160, 166, 172, 178, 72, 80, 88, 94, 100, 108, 116, 124, },
+	.c0 = { 200, 202, 204, 202, 200, 196, 192, 188, 184, },
+};
+
+static const struct tidss_scale_coefs coef5_m22 = {
+	.c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14, },
+	.c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122, },
+	.c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192, },
+};
+
+static const struct tidss_scale_coefs coef5_m19 = {
+	.c2 = { 12, 14, 16, 22, 28, 34, 40, 48, 0, 0, 0, 2, 4, 4, 4, 8, },
+	.c1 = { 128, 140, 152, 160, 168, 176, 184, 192, 56, 64, 72, 82, 92, 100, 108, 118, },
+	.c0 = { 232, 232, 232, 226, 220, 218, 216, 208, 200, },
+};
+
+static const struct tidss_scale_coefs coef5_m16 = {
+	.c2 = { 0, 2, 4, 8, 12, 18, 24, 32, 0, 0, 0, -2, -4, -4, -4, -2, },
+	.c1 = { 124, 138, 152, 164, 176, 186, 196, 206, 40, 48, 56, 68, 80, 90, 100, 112, },
+	.c0 = { 264, 262, 260, 254, 248, 242, 236, 226, 216, },
+};
+
+static const struct tidss_scale_coefs coef5_m14 = {
+	.c2 = { -8, -6, -4, -2, 0, 6, 12, 18, 0, -2, -4, -6, -8, -8, -8, -8, },
+	.c1 = { 120, 134, 148, 164, 180, 194, 208, 220, 24, 32, 40, 52, 64, 78, 92, 106, },
+	.c0 = { 288, 286, 284, 280, 276, 266, 256, 244, 232, },
+};
+
+static const struct tidss_scale_coefs coef5_m13 = {
+	.c2 = { -12, -12, -12, -10, -8, -4, 0, 6, 0, -2, -4, -6, -8, -10, -12, -12, },
+	.c1 = { 112, 130, 148, 164, 180, 196, 212, 228, 12, 22, 32, 44, 56, 70, 84, 98, },
+	.c0 = { 312, 308, 304, 298, 292, 282, 272, 258, 244, },
+};
+
+static const struct tidss_scale_coefs coef5_m12 = {
+	.c2 = { -16, -18, -20, -18, -16, -14, -12, -6, 0, -2, -4, -6, -8, -10, -12, -14, },
+	.c1 = { 104, 124, 144, 164, 184, 202, 220, 238, 0, 10, 20, 30, 40, 56, 72, 88, },
+	.c0 = { 336, 332, 328, 320, 312, 300, 288, 272, 256, },
+};
+
+static const struct tidss_scale_coefs coef5_m11 = {
+	.c2 = { -20, -22, -24, -24, -24, -24, -24, -20, 0, -2, -4, -6, -8, -10, -12, -16, },
+	.c1 = { 92, 114, 136, 158, 180, 204, 228, 250, -16, -8, 0, 12, 24, 38, 52, 72, },
+	.c0 = { 368, 364, 360, 350, 340, 326, 312, 292, 272, },
+};
+
+static const struct tidss_scale_coefs coef5_m10 = {
+	.c2 = { -16, -20, -24, -28, -32, -34, -36, -34, 0, 0, 0, -2, -4, -8, -12, -14, },
+	.c1 = { 72, 96, 120, 148, 176, 204, 232, 260, -32, -26, -20, -10, 0, 16, 32, 52, },
+	.c0 = { 400, 398, 396, 384, 372, 354, 336, 312, 288, },
+};
+
+static const struct tidss_scale_coefs coef5_m9 = {
+	.c2 = { -12, -18, -24, -28, -32, -38, -44, -46, 0, 2, 4, 2, 0, -2, -4, -8, },
+	.c1 = { 40, 68, 96, 128, 160, 196, 232, 268, -48, -46, -44, -36, -28, -14, 0, 20, },
+	.c0 = { 456, 450, 444, 428, 412, 388, 364, 334, 304, },
+};
+
+static const struct tidss_scale_coefs coef5_m8 = {
+	.c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2, },
+	.c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20, },
+	.c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312, },
+};
+
+static const struct tidss_scale_coefs coef3_m32 = {
+	.c1 = { 108, 92, 76, 62, 48, 36, 24, 140, 256, 236, 216, 198, 180, 162, 144, 126, },
+	.c0 = { 296, 294, 292, 288, 284, 278, 272, 136, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m26 = {
+	.c1 = { 104, 90, 76, 60, 44, 32, 20, 138, 256, 236, 216, 198, 180, 160, 140, 122, },
+	.c0 = { 304, 300, 296, 292, 288, 282, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m22 = {
+	.c1 = { 100, 84, 68, 54, 40, 30, 20, 138, 256, 236, 216, 196, 176, 156, 136, 118, },
+	.c0 = { 312, 310, 308, 302, 296, 286, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m19 = {
+	.c1 = { 96, 80, 64, 50, 36, 26, 16, 136, 256, 236, 216, 194, 172, 152, 132, 114, },
+	.c0 = { 320, 318, 316, 310, 304, 292, 280, 140, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m16 = {
+	.c1 = { 88, 72, 56, 44, 32, 22, 12, 134, 256, 234, 212, 190, 168, 148, 128, 108, },
+	.c0 = { 336, 332, 328, 320, 312, 300, 288, 144, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m14 = {
+	.c1 = { 80, 64, 48, 36, 24, 16, 8, 132, 256, 232, 208, 186, 164, 142, 120, 100, },
+	.c0 = { 352, 348, 344, 334, 324, 310, 296, 148, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m13 = {
+	.c1 = { 72, 56, 40, 30, 20, 12, 4, 130, 256, 232, 208, 184, 160, 136, 112, 92, },
+	.c0 = { 368, 364, 360, 346, 332, 316, 300, 150, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m12 = {
+	.c1 = { 64, 50, 36, 26, 16, 10, 4, 130, 256, 230, 204, 178, 152, 128, 104, 84, },
+	.c0 = { 384, 378, 372, 358, 344, 324, 304, 152, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m11 = {
+	.c1 = { 56, 40, 24, 16, 8, 4, 0, 128, 256, 228, 200, 172, 144, 120, 96, 76, },
+	.c0 = { 400, 396, 392, 376, 360, 336, 312, 156, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m10 = {
+	.c1 = { 40, 26, 12, 6, 0, -2, -4, 126, 256, 226, 196, 166, 136, 110, 84, 62, },
+	.c0 = { 432, 424, 416, 396, 376, 348, 320, 160, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m9 = {
+	.c1 = { 24, 12, 0, -4, -8, -8, -8, 124, 256, 222, 188, 154, 120, 92, 64, 44, },
+	.c0 = { 464, 456, 448, 424, 400, 366, 332, 166, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m8 = {
+	.c1 = { 0, -8, -16, -16, -16, -12, -8, 124, 256, 214, 172, 134, 96, 66, 36, 18, },
+	.c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+						      u32 firinc,
+						      bool five_taps)
+{
+	int i;
+	int inc;
+	static const struct {
+		int mmin;
+		int mmax;
+		const struct tidss_scale_coefs *coef3;
+		const struct tidss_scale_coefs *coef5;
+		const char *name;
+	} coefs[] = {
+		{ 27, 32, &coef3_m32, &coef5_m32, "M32" },
+		{ 23, 26, &coef3_m26, &coef5_m26, "M26" },
+		{ 20, 22, &coef3_m22, &coef5_m22, "M22" },
+		{ 17, 19, &coef3_m19, &coef5_m19, "M19" },
+		{ 15, 16, &coef3_m16, &coef5_m16, "M16" },
+		{ 14, 14, &coef3_m14, &coef5_m14, "M14" },
+		{ 13, 13, &coef3_m13, &coef5_m13, "M13" },
+		{ 12, 12, &coef3_m12, &coef5_m12, "M12" },
+		{ 11, 11, &coef3_m11, &coef5_m11, "M11" },
+		{ 10, 10, &coef3_m10, &coef5_m10, "M10" },
+		{  9,  9, &coef3_m9, &coef5_m9, "M9" },
+		{  4,  8, &coef3_m8, &coef5_m8, "M8" },
+		/*
+		 * When upscaling more than two times, blockiness and outlines
+		 * around the image are observed when M8 tables are used. M11,
+		 * M16 and M19 tables are used to prevent this.
+		 */
+		{  3,  3, &coef3_m11, &coef5_m11, "M11" },
+		{  2,  2, &coef3_m16, &coef5_m16, "M16" },
+		{  0,  1, &coef3_m19, &coef5_m19, "M19" },
+	};
+
+	/*
+	 * inc is result of 0x200000 * in_size / out_size. This dividing
+	 * by 0x40000 scales it down to 8 * in_size / out_size. After
+	 * division the actual scaling factor is 8/inc.
+	 */
+	inc = firinc / 0x40000;
+	for (i = 0; i < ARRAY_SIZE(coefs); ++i) {
+		if (inc >= coefs[i].mmin && inc <= coefs[i].mmax) {
+			if (five_taps)
+				return coefs[i].coef5;
+			else
+				return coefs[i].coef3;
+		}
+	}
+
+	dev_err(dev, "%s: Coefficients not found for firinc 0x%08x, inc %d\n",
+		__func__, firinc, inc);
+
+	return NULL;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
new file mode 100644
index 0000000..64b5af5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ */
+
+#ifndef __TIDSS_DISPC_COEF_H__
+#define __TIDSS_DISPC_COEF_H__
+
+#include <linux/types.h>
+
+struct tidss_scale_coefs {
+	s16 c2[16];
+	s16 c1[16];
+	u16 c0[9];
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+						      u32 firinc,
+						      bool five_taps);
+
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 51d034e..28b7f70 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -95,7 +95,7 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
 
 	priv->external_encoder->possible_crtcs = BIT(0);
 
-	ret = drm_bridge_attach(priv->external_encoder, bridge, NULL);
+	ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0);
 	if (ret) {
 		dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index a46ac28..4160e74 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -47,6 +47,20 @@
 
 	  If M is selected the module will be called ili9341.
 
+config TINYDRM_ILI9486
+	tristate "DRM support for ILI9486 display panels"
+	depends on DRM && SPI
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_MIPI_DBI
+	select BACKLIGHT_CLASS_DEVICE
+	help
+	  DRM driver for the following Ilitek ILI9486 panels:
+	  * PISCREEN 3.5" 320x480 TFT (Ozzmaker 3.5")
+	  * RPILCD 3.5" 320x480 TFT (Waveshare 3.5")
+
+	  If M is selected the module will be called ili9486.
+
 config TINYDRM_MI0283QT
 	tristate "DRM support for MI0283QT"
 	depends on DRM && SPI
@@ -85,14 +99,16 @@
 	  If M is selected the module will be called st7586.
 
 config TINYDRM_ST7735R
-	tristate "DRM support for Sitronix ST7735R display panels"
+	tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
 	depends on DRM && SPI
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_MIPI_DBI
 	select BACKLIGHT_CLASS_DEVICE
 	help
-	  DRM driver Sitronix ST7735R with one of the following LCDs:
-	  * JD-T18003-T01 1.8" 128x160 TFT
+	  DRM driver for Sitronix ST7715R/ST7735R with one of the following
+	  LCDs:
+	  * Jianda JD-T18003-T01 1.8" 128x160 TFT
+	  * Okaya RH128128T 1.44" 128x128 TFT
 
 	  If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 896cf31..c96ceee 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -4,6 +4,7 @@
 obj-$(CONFIG_TINYDRM_HX8357D)		+= hx8357d.o
 obj-$(CONFIG_TINYDRM_ILI9225)		+= ili9225.o
 obj-$(CONFIG_TINYDRM_ILI9341)		+= ili9341.o
+obj-$(CONFIG_TINYDRM_ILI9486)		+= ili9486.o
 obj-$(CONFIG_TINYDRM_MI0283QT)		+= mi0283qt.o
 obj-$(CONFIG_TINYDRM_REPAPER)		+= repaper.o
 obj-$(CONFIG_TINYDRM_ST7586)		+= st7586.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 94fb1f5..a481734 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -22,7 +22,6 @@
 #include <drm/drm_modeset_helper_vtables.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
 
 static bool eco_mode;
 module_param(eco_mode, bool, 0644);
@@ -610,18 +609,10 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
 				 struct drm_plane_state *old_state)
 {
 	struct drm_plane_state *state = pipe->plane.state;
-	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_rect rect;
 
 	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
 		gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		crtc->state->event = NULL;
-		spin_unlock_irq(&crtc->dev->event_lock);
-	}
 }
 
 static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index c66acc5..802fb8d 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -26,7 +26,6 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
 
 #define ILI9225_DRIVER_READ_CODE	0x00
 #define ILI9225_DRIVER_OUTPUT_CONTROL	0x01
@@ -165,18 +164,10 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
 				struct drm_plane_state *old_state)
 {
 	struct drm_plane_state *state = pipe->plane.state;
-	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_rect rect;
 
 	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
 		ili9225_fb_dirty(state->fb, &rect);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		spin_unlock_irq(&crtc->dev->event_lock);
-		crtc->state->event = NULL;
-	}
 }
 
 static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
new file mode 100644
index 0000000..532560ae
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9486 panels
+ *
+ * Copyright 2020 Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modeset_helper.h>
+
+#define ILI9486_ITFCTR1         0xb0
+#define ILI9486_PWCTRL1         0xc2
+#define ILI9486_VMCTRL1         0xc5
+#define ILI9486_PGAMCTRL        0xe0
+#define ILI9486_NGAMCTRL        0xe1
+#define ILI9486_DGAMCTRL        0xe2
+#define ILI9486_MADCTL_BGR      BIT(3)
+#define ILI9486_MADCTL_MV       BIT(5)
+#define ILI9486_MADCTL_MX       BIT(6)
+#define ILI9486_MADCTL_MY       BIT(7)
+
+/*
+ * The PiScreen/waveshare rpi-lcd-35 has a SPI to 16-bit parallel bus converter
+ * in front of the  display controller. This means that 8-bit values have to be
+ * transferred as 16-bit.
+ */
+static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+			     size_t num)
+{
+	struct spi_device *spi = mipi->spi;
+	void *data = par;
+	u32 speed_hz;
+	int i, ret;
+	__be16 *buf;
+
+	buf = kmalloc(32 * sizeof(u16), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/*
+	 * The displays are Raspberry Pi HATs and connected to the 8-bit only
+	 * SPI controller, so 16-bit command and parameters need byte swapping
+	 * before being transferred as 8-bit on the big endian SPI bus.
+	 * Pixel data bytes have already been swapped before this function is
+	 * called.
+	 */
+	buf[0] = cpu_to_be16(*cmd);
+	gpiod_set_value_cansleep(mipi->dc, 0);
+	speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
+	ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+	if (ret || !num)
+		goto free;
+
+	/* 8-bit configuration data, not 16-bit pixel data */
+	if (num <= 32) {
+		for (i = 0; i < num; i++)
+			buf[i] = cpu_to_be16(par[i]);
+		num *= 2;
+		speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+		data = buf;
+	}
+
+	gpiod_set_value_cansleep(mipi->dc, 1);
+	ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ free:
+	kfree(buf);
+
+	return ret;
+}
+
+static void waveshare_enable(struct drm_simple_display_pipe *pipe,
+			     struct drm_crtc_state *crtc_state,
+			     struct drm_plane_state *plane_state)
+{
+	struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+	struct mipi_dbi *dbi = &dbidev->dbi;
+	u8 addr_mode;
+	int ret, idx;
+
+	if (!drm_dev_enter(pipe->crtc.dev, &idx))
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+	ret = mipi_dbi_poweron_conditional_reset(dbidev);
+	if (ret < 0)
+		goto out_exit;
+	if (ret == 1)
+		goto out_enable;
+
+	mipi_dbi_command(dbi, ILI9486_ITFCTR1);
+	mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+	msleep(250);
+
+	mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+
+	mipi_dbi_command(dbi, ILI9486_PWCTRL1, 0x44);
+
+	mipi_dbi_command(dbi, ILI9486_VMCTRL1, 0x00, 0x00, 0x00, 0x00);
+
+	mipi_dbi_command(dbi, ILI9486_PGAMCTRL,
+			 0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
+			 0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x0);
+	mipi_dbi_command(dbi, ILI9486_NGAMCTRL,
+			 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+			 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+	mipi_dbi_command(dbi, ILI9486_DGAMCTRL,
+			 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+			 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+
+	mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+	msleep(100);
+
+ out_enable:
+	switch (dbidev->rotation) {
+	case 90:
+		addr_mode = ILI9486_MADCTL_MY;
+		break;
+	case 180:
+		addr_mode = ILI9486_MADCTL_MV;
+		break;
+	case 270:
+		addr_mode = ILI9486_MADCTL_MX;
+		break;
+	default:
+		addr_mode = ILI9486_MADCTL_MV | ILI9486_MADCTL_MY |
+			ILI9486_MADCTL_MX;
+		break;
+	}
+	addr_mode |= ILI9486_MADCTL_BGR;
+	mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+	mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
+ out_exit:
+	drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+	.enable = waveshare_enable,
+	.disable = mipi_dbi_pipe_disable,
+	.update = mipi_dbi_pipe_update,
+	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode waveshare_mode = {
+	DRM_SIMPLE_MODE(480, 320, 73, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+
+static struct drm_driver ili9486_driver = {
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+	.fops			= &ili9486_fops,
+	.release		= mipi_dbi_release,
+	DRM_GEM_CMA_VMAP_DRIVER_OPS,
+	.debugfs_init		= mipi_dbi_debugfs_init,
+	.name			= "ili9486",
+	.desc			= "Ilitek ILI9486",
+	.date			= "20200118",
+	.major			= 1,
+	.minor			= 0,
+};
+
+static const struct of_device_id ili9486_of_match[] = {
+	{ .compatible = "waveshare,rpi-lcd-35" },
+	{ .compatible = "ozzmaker,piscreen" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ili9486_of_match);
+
+static const struct spi_device_id ili9486_id[] = {
+	{ "ili9486", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, ili9486_id);
+
+static int ili9486_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct mipi_dbi_dev *dbidev;
+	struct drm_device *drm;
+	struct mipi_dbi *dbi;
+	struct gpio_desc *dc;
+	u32 rotation = 0;
+	int ret;
+
+	dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+	if (!dbidev)
+		return -ENOMEM;
+
+	dbi = &dbidev->dbi;
+	drm = &dbidev->drm;
+	ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
+	if (ret) {
+		kfree(dbidev);
+		return ret;
+	}
+
+	drm_mode_config_init(drm);
+
+	dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(dbi->reset)) {
+		DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+		return PTR_ERR(dbi->reset);
+	}
+
+	dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+	if (IS_ERR(dc)) {
+		DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+		return PTR_ERR(dc);
+	}
+
+	dbidev->backlight = devm_of_find_backlight(dev);
+	if (IS_ERR(dbidev->backlight))
+		return PTR_ERR(dbidev->backlight);
+
+	device_property_read_u32(dev, "rotation", &rotation);
+
+	ret = mipi_dbi_spi_init(spi, dbi, dc);
+	if (ret)
+		return ret;
+
+	dbi->command = waveshare_command;
+	dbi->read_commands = NULL;
+
+	ret = mipi_dbi_dev_init(dbidev, &waveshare_pipe_funcs,
+				&waveshare_mode, rotation);
+	if (ret)
+		return ret;
+
+	drm_mode_config_reset(drm);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		return ret;
+
+	spi_set_drvdata(spi, drm);
+
+	drm_fbdev_generic_setup(drm, 0);
+
+	return 0;
+}
+
+static int ili9486_remove(struct spi_device *spi)
+{
+	struct drm_device *drm = spi_get_drvdata(spi);
+
+	drm_dev_unplug(drm);
+	drm_atomic_helper_shutdown(drm);
+
+	return 0;
+}
+
+static void ili9486_shutdown(struct spi_device *spi)
+{
+	drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
+
+static struct spi_driver ili9486_spi_driver = {
+	.driver = {
+		.name = "ili9486",
+		.of_match_table = ili9486_of_match,
+	},
+	.id_table = ili9486_id,
+	.probe = ili9486_probe,
+	.remove = ili9486_remove,
+	.shutdown = ili9486_shutdown,
+};
+module_spi_driver(ili9486_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9486 DRM driver");
+MODULE_AUTHOR("Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 76d1792..f5ebcaf 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -17,7 +17,7 @@
 #include <linux/dma-buf.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
 #include <linux/sched/clock.h>
 #include <linux/spi/spi.h>
 #include <linux/thermal.h>
@@ -33,13 +33,13 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
 #define REPAPER_RID_G2_COG_ID	0x12
 
 enum repaper_model {
+	/* 0 is reserved to avoid clashing with NULL */
 	E1144CS021 = 1,
 	E1190CS021,
 	E2200CS021,
@@ -856,18 +856,10 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
 				struct drm_plane_state *old_state)
 {
 	struct drm_plane_state *state = pipe->plane.state;
-	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_rect rect;
 
 	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
 		repaper_fb_dirty(state->fb);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		spin_unlock_irq(&crtc->dev->event_lock);
-		crtc->state->event = NULL;
-	}
 }
 
 static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -995,21 +987,21 @@ static int repaper_probe(struct spi_device *spi)
 {
 	const struct drm_display_mode *mode;
 	const struct spi_device_id *spi_id;
-	const struct of_device_id *match;
 	struct device *dev = &spi->dev;
 	enum repaper_model model;
 	const char *thermal_zone;
 	struct repaper_epd *epd;
 	size_t line_buffer_size;
 	struct drm_device *drm;
+	const void *match;
 	int ret;
 
-	match = of_match_device(repaper_of_match, dev);
+	match = device_get_match_data(dev);
 	if (match) {
-		model = (enum repaper_model)match->data;
+		model = (enum repaper_model)match;
 	} else {
 		spi_id = spi_get_device_id(spi);
-		model = spi_id->driver_data;
+		model = (enum repaper_model)spi_id->driver_data;
 	}
 
 	/* The SPI device is used to allocate dma memory */
@@ -1197,7 +1189,6 @@ static void repaper_shutdown(struct spi_device *spi)
 static struct spi_driver repaper_spi_driver = {
 	.driver = {
 		.name = "repaper",
-		.owner = THIS_MODULE,
 		.of_match_table = repaper_of_match,
 	},
 	.id_table = repaper_id,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 060cc75..9ef559d 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -23,7 +23,6 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_mipi_dbi.h>
 #include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
 
 /* controller-specific commands */
 #define ST7586_DISP_MODE_GRAY	0x38
@@ -159,18 +158,10 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
 			       struct drm_plane_state *old_state)
 {
 	struct drm_plane_state *state = pipe->plane.state;
-	struct drm_crtc *crtc = &pipe->crtc;
 	struct drm_rect rect;
 
 	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
 		st7586_fb_dirty(state->fb, &rect);
-
-	if (crtc->state->event) {
-		spin_lock_irq(&crtc->dev->event_lock);
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-		spin_unlock_irq(&crtc->dev->event_lock);
-		crtc->state->event = NULL;
-	}
 }
 
 static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3f4487c..3cd9b8d 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -1,8 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
- * DRM driver for Sitronix ST7735R panels
+ * DRM driver for display panels connected to a Sitronix ST7715R or ST7735R
+ * display controller in SPI mode.
  *
  * Copyright 2017 David Lechner <david@lechnology.com>
+ * Copyright (C) 2019 Glider bvba
  */
 
 #include <linux/backlight.h>
@@ -37,12 +39,28 @@
 #define ST7735R_MY	BIT(7)
 #define ST7735R_MX	BIT(6)
 #define ST7735R_MV	BIT(5)
+#define ST7735R_RGB	BIT(3)
 
-static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
-				      struct drm_crtc_state *crtc_state,
-				      struct drm_plane_state *plane_state)
+struct st7735r_cfg {
+	const struct drm_display_mode mode;
+	unsigned int left_offset;
+	unsigned int top_offset;
+	unsigned int write_only:1;
+	unsigned int rgb:1;		/* RGB (vs. BGR) */
+};
+
+struct st7735r_priv {
+	struct mipi_dbi_dev dbidev;	/* Must be first for .release() */
+	const struct st7735r_cfg *cfg;
+};
+
+static void st7735r_pipe_enable(struct drm_simple_display_pipe *pipe,
+				struct drm_crtc_state *crtc_state,
+				struct drm_plane_state *plane_state)
 {
 	struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+	struct st7735r_priv *priv = container_of(dbidev, struct st7735r_priv,
+						 dbidev);
 	struct mipi_dbi *dbi = &dbidev->dbi;
 	int ret, idx;
 	u8 addr_mode;
@@ -87,6 +105,10 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
 		addr_mode = ST7735R_MY | ST7735R_MV;
 		break;
 	}
+
+	if (priv->cfg->rgb)
+		addr_mode |= ST7735R_RGB;
+
 	mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
 	mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
 			 MIPI_DCS_PIXEL_FMT_16BIT);
@@ -109,15 +131,24 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
 	drm_dev_exit(idx);
 }
 
-static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
-	.enable		= jd_t18003_t01_pipe_enable,
+static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+	.enable		= st7735r_pipe_enable,
 	.disable	= mipi_dbi_pipe_disable,
 	.update		= mipi_dbi_pipe_update,
 	.prepare_fb	= drm_gem_fb_simple_display_pipe_prepare_fb,
 };
 
-static const struct drm_display_mode jd_t18003_t01_mode = {
-	DRM_SIMPLE_MODE(128, 160, 28, 35),
+static const struct st7735r_cfg jd_t18003_t01_cfg = {
+	.mode		= { DRM_SIMPLE_MODE(128, 160, 28, 35) },
+	/* Cannot read from Adafruit 1.8" display via SPI */
+	.write_only	= true,
+};
+
+static const struct st7735r_cfg rh128128t_cfg = {
+	.mode		= { DRM_SIMPLE_MODE(128, 128, 25, 26) },
+	.left_offset	= 2,
+	.top_offset	= 3,
+	.rgb		= true,
 };
 
 DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -136,13 +167,14 @@ static struct drm_driver st7735r_driver = {
 };
 
 static const struct of_device_id st7735r_of_match[] = {
-	{ .compatible = "jianda,jd-t18003-t01" },
+	{ .compatible = "jianda,jd-t18003-t01", .data = &jd_t18003_t01_cfg },
+	{ .compatible = "okaya,rh128128t", .data = &rh128128t_cfg },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, st7735r_of_match);
 
 static const struct spi_device_id st7735r_id[] = {
-	{ "jd-t18003-t01", 0 },
+	{ "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
 	{ },
 };
 MODULE_DEVICE_TABLE(spi, st7735r_id);
@@ -150,17 +182,26 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
 static int st7735r_probe(struct spi_device *spi)
 {
 	struct device *dev = &spi->dev;
+	const struct st7735r_cfg *cfg;
 	struct mipi_dbi_dev *dbidev;
+	struct st7735r_priv *priv;
 	struct drm_device *drm;
 	struct mipi_dbi *dbi;
 	struct gpio_desc *dc;
 	u32 rotation = 0;
 	int ret;
 
-	dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
-	if (!dbidev)
+	cfg = device_get_match_data(&spi->dev);
+	if (!cfg)
+		cfg = (void *)spi_get_device_id(spi)->driver_data;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
 		return -ENOMEM;
 
+	dbidev = &priv->dbidev;
+	priv->cfg = cfg;
+
 	dbi = &dbidev->dbi;
 	drm = &dbidev->drm;
 	ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
@@ -193,10 +234,14 @@ static int st7735r_probe(struct spi_device *spi)
 	if (ret)
 		return ret;
 
-	/* Cannot read from Adafruit 1.8" display via SPI */
-	dbi->read_commands = NULL;
+	if (cfg->write_only)
+		dbi->read_commands = NULL;
 
-	ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+	dbidev->left_offset = cfg->left_offset;
+	dbidev->top_offset = cfg->top_offset;
+
+	ret = mipi_dbi_dev_init(dbidev, &st7735r_pipe_funcs, &cfg->mode,
+				rotation);
 	if (ret)
 		return ret;
 
@@ -231,7 +276,6 @@ static void st7735r_shutdown(struct spi_device *spi)
 static struct spi_driver st7735r_spi_driver = {
 	.driver = {
 		.name = "st7735r",
-		.owner = THIS_MODULE,
 		.of_match_table = st7735r_of_match,
 	},
 	.id_table = st7735r_id,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5df596f..9e07c3f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -145,34 +145,12 @@ static inline uint32_t ttm_bo_type_flags(unsigned type)
 	return 1 << (type);
 }
 
-static void ttm_bo_release_list(struct kref *list_kref)
-{
-	struct ttm_buffer_object *bo =
-	    container_of(list_kref, struct ttm_buffer_object, list_kref);
-	size_t acc_size = bo->acc_size;
-
-	BUG_ON(kref_read(&bo->list_kref));
-	BUG_ON(kref_read(&bo->kref));
-	BUG_ON(bo->mem.mm_node != NULL);
-	BUG_ON(!list_empty(&bo->lru));
-	BUG_ON(!list_empty(&bo->ddestroy));
-	ttm_tt_destroy(bo->ttm);
-	atomic_dec(&ttm_bo_glob.bo_count);
-	dma_fence_put(bo->moving);
-	if (!ttm_bo_uses_embedded_gem_object(bo))
-		dma_resv_fini(&bo->base._resv);
-	bo->destroy(bo);
-	ttm_mem_global_free(&ttm_mem_glob, acc_size);
-}
-
 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *mem)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
 
-	dma_resv_assert_held(bo->base.resv);
-
 	if (!list_empty(&bo->lru))
 		return;
 
@@ -181,21 +159,14 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 
 	man = &bdev->man[mem->mem_type];
 	list_add_tail(&bo->lru, &man->lru[bo->priority]);
-	kref_get(&bo->list_kref);
 
 	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
 	    !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
 				     TTM_PAGE_FLAG_SWAPPED))) {
 		list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
-		kref_get(&bo->list_kref);
 	}
 }
 
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
-	BUG();
-}
-
 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -203,12 +174,10 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 
 	if (!list_empty(&bo->swap)) {
 		list_del_init(&bo->swap);
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 		notify = true;
 	}
 	if (!list_empty(&bo->lru)) {
 		list_del_init(&bo->lru);
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
 		notify = true;
 	}
 
@@ -372,14 +341,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 	}
 
 moved:
-	if (bo->evicted) {
-		if (bdev->driver->invalidate_caches) {
-			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
-			if (ret)
-				pr_err("Can not flush read caches\n");
-		}
-		bo->evicted = false;
-	}
+	bo->evicted = false;
 
 	if (bo->mem.mm_node)
 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
@@ -428,92 +390,49 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
 	BUG_ON(!dma_resv_trylock(&bo->base._resv));
 
 	r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+	dma_resv_unlock(&bo->base._resv);
 	if (r)
-		dma_resv_unlock(&bo->base._resv);
+		return r;
+
+	if (bo->type != ttm_bo_type_sg) {
+		/* This works because the BO is about to be destroyed and nobody
+		 * reference it any more. The only tricky case is the trylock on
+		 * the resv object while holding the lru_lock.
+		 */
+		spin_lock(&ttm_bo_glob.lru_lock);
+		bo->base.resv = &bo->base._resv;
+		spin_unlock(&ttm_bo_glob.lru_lock);
+	}
 
 	return r;
 }
 
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
+	struct dma_resv *resv = &bo->base._resv;
 	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	int i;
 
-	fobj = dma_resv_get_list(&bo->base._resv);
-	fence = dma_resv_get_excl(&bo->base._resv);
+	rcu_read_lock();
+	fobj = rcu_dereference(resv->fence);
+	fence = rcu_dereference(resv->fence_excl);
 	if (fence && !fence->ops->signaled)
 		dma_fence_enable_sw_signaling(fence);
 
 	for (i = 0; fobj && i < fobj->shared_count; ++i) {
-		fence = rcu_dereference_protected(fobj->shared[i],
-					dma_resv_held(bo->base.resv));
+		fence = rcu_dereference(fobj->shared[i]);
 
 		if (!fence->ops->signaled)
 			dma_fence_enable_sw_signaling(fence);
 	}
-}
-
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
-{
-	struct ttm_bo_device *bdev = bo->bdev;
-	int ret;
-
-	ret = ttm_bo_individualize_resv(bo);
-	if (ret) {
-		/* Last resort, if we fail to allocate memory for the
-		 * fences block for the BO to become idle
-		 */
-		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
-						    30 * HZ);
-		spin_lock(&ttm_bo_glob.lru_lock);
-		goto error;
-	}
-
-	spin_lock(&ttm_bo_glob.lru_lock);
-	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
-	if (!ret) {
-		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
-			ttm_bo_del_from_lru(bo);
-			spin_unlock(&ttm_bo_glob.lru_lock);
-			if (bo->base.resv != &bo->base._resv)
-				dma_resv_unlock(&bo->base._resv);
-
-			ttm_bo_cleanup_memtype_use(bo);
-			dma_resv_unlock(bo->base.resv);
-			return;
-		}
-
-		ttm_bo_flush_all_fences(bo);
-
-		/*
-		 * Make NO_EVICT bos immediately available to
-		 * shrinkers, now that they are queued for
-		 * destruction.
-		 */
-		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
-			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
-			ttm_bo_move_to_lru_tail(bo, NULL);
-		}
-
-		dma_resv_unlock(bo->base.resv);
-	}
-	if (bo->base.resv != &bo->base._resv)
-		dma_resv_unlock(&bo->base._resv);
-
-error:
-	kref_get(&bo->list_kref);
-	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
-	spin_unlock(&ttm_bo_glob.lru_lock);
-
-	schedule_delayed_work(&bdev->wq,
-			      ((HZ / 100) < 1) ? 1 : HZ / 100);
+	rcu_read_unlock();
 }
 
 /**
  * function ttm_bo_cleanup_refs
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, do nothing.
+ * If bo idle, remove from lru lists, and unref.
+ * If not idle, block if possible.
  *
  * Must be called with lru_lock and reservation held, this function
  * will drop the lru lock and optionally the reservation lock before returning.
@@ -527,14 +446,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			       bool interruptible, bool no_wait_gpu,
 			       bool unlock_resv)
 {
-	struct dma_resv *resv;
+	struct dma_resv *resv = &bo->base._resv;
 	int ret;
 
-	if (unlikely(list_empty(&bo->ddestroy)))
-		resv = bo->base.resv;
-	else
-		resv = &bo->base._resv;
-
 	if (dma_resv_test_signaled_rcu(resv, true))
 		ret = 0;
 	else
@@ -547,9 +461,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			dma_resv_unlock(bo->base.resv);
 		spin_unlock(&ttm_bo_glob.lru_lock);
 
-		lret = dma_resv_wait_timeout_rcu(resv, true,
-							   interruptible,
-							   30 * HZ);
+		lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+						 30 * HZ);
 
 		if (lret < 0)
 			return lret;
@@ -581,14 +494,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 
 	ttm_bo_del_from_lru(bo);
 	list_del_init(&bo->ddestroy);
-	kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
 	spin_unlock(&ttm_bo_glob.lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
 
 	if (unlock_resv)
 		dma_resv_unlock(bo->base.resv);
 
+	ttm_bo_put(bo);
+
 	return 0;
 }
 
@@ -610,8 +523,9 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 
 		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
 				      ddestroy);
-		kref_get(&bo->list_kref);
 		list_move_tail(&bo->ddestroy, &removed);
+		if (!ttm_bo_get_unless_zero(bo))
+			continue;
 
 		if (remove_all || bo->base.resv != &bo->base._resv) {
 			spin_unlock(&glob->lru_lock);
@@ -626,7 +540,7 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 			spin_unlock(&glob->lru_lock);
 		}
 
-		kref_put(&bo->list_kref, ttm_bo_release_list);
+		ttm_bo_put(bo);
 		spin_lock(&glob->lru_lock);
 	}
 	list_splice_tail(&removed, &bdev->ddestroy);
@@ -652,16 +566,69 @@ static void ttm_bo_release(struct kref *kref)
 	    container_of(kref, struct ttm_buffer_object, kref);
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+	size_t acc_size = bo->acc_size;
+	int ret;
 
-	if (bo->bdev->driver->release_notify)
-		bo->bdev->driver->release_notify(bo);
+	if (!bo->deleted) {
+		ret = ttm_bo_individualize_resv(bo);
+		if (ret) {
+			/* Last resort, if we fail to allocate memory for the
+			 * fences block for the BO to become idle
+			 */
+			dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+						  30 * HZ);
+		}
 
-	drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
-	ttm_mem_io_lock(man, false);
-	ttm_mem_io_free_vm(bo);
-	ttm_mem_io_unlock(man);
-	ttm_bo_cleanup_refs_or_queue(bo);
-	kref_put(&bo->list_kref, ttm_bo_release_list);
+		if (bo->bdev->driver->release_notify)
+			bo->bdev->driver->release_notify(bo);
+
+		drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
+		ttm_mem_io_lock(man, false);
+		ttm_mem_io_free_vm(bo);
+		ttm_mem_io_unlock(man);
+	}
+
+	if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+		/* The BO is not idle, resurrect it for delayed destroy */
+		ttm_bo_flush_all_fences(bo);
+		bo->deleted = true;
+
+		spin_lock(&ttm_bo_glob.lru_lock);
+
+		/*
+		 * Make NO_EVICT bos immediately available to
+		 * shrinkers, now that they are queued for
+		 * destruction.
+		 */
+		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+			ttm_bo_del_from_lru(bo);
+			ttm_bo_add_mem_to_lru(bo, &bo->mem);
+		}
+
+		kref_init(&bo->kref);
+		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+		spin_unlock(&ttm_bo_glob.lru_lock);
+
+		schedule_delayed_work(&bdev->wq,
+				      ((HZ / 100) < 1) ? 1 : HZ / 100);
+		return;
+	}
+
+	spin_lock(&ttm_bo_glob.lru_lock);
+	ttm_bo_del_from_lru(bo);
+	list_del(&bo->ddestroy);
+	spin_unlock(&ttm_bo_glob.lru_lock);
+
+	ttm_bo_cleanup_memtype_use(bo);
+
+	BUG_ON(bo->mem.mm_node != NULL);
+	atomic_dec(&ttm_bo_glob.bo_count);
+	dma_fence_put(bo->moving);
+	if (!ttm_bo_uses_embedded_gem_object(bo))
+		dma_resv_fini(&bo->base._resv);
+	bo->destroy(bo);
+	ttm_mem_global_free(&ttm_mem_glob, acc_size);
 }
 
 void ttm_bo_put(struct ttm_buffer_object *bo)
@@ -764,8 +731,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
 
 	if (bo->base.resv == ctx->resv) {
 		dma_resv_assert_held(bo->base.resv);
-		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
-		    || !list_empty(&bo->ddestroy))
+		if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
 			ret = true;
 		*locked = false;
 		if (busy)
@@ -846,6 +812,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 					dma_resv_unlock(bo->base.resv);
 				continue;
 			}
+			if (!ttm_bo_get_unless_zero(bo)) {
+				if (locked)
+					dma_resv_unlock(bo->base.resv);
+				continue;
+			}
 			break;
 		}
 
@@ -857,21 +828,19 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	}
 
 	if (!bo) {
-		if (busy_bo)
-			kref_get(&busy_bo->list_kref);
+		if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
+			busy_bo = NULL;
 		spin_unlock(&ttm_bo_glob.lru_lock);
 		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
 		if (busy_bo)
-			kref_put(&busy_bo->list_kref, ttm_bo_release_list);
+			ttm_bo_put(busy_bo);
 		return ret;
 	}
 
-	kref_get(&bo->list_kref);
-
-	if (!list_empty(&bo->ddestroy)) {
+	if (bo->deleted) {
 		ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
 					  ctx->no_wait_gpu, locked);
-		kref_put(&bo->list_kref, ttm_bo_release_list);
+		ttm_bo_put(bo);
 		return ret;
 	}
 
@@ -881,7 +850,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	if (locked)
 		ttm_bo_unreserve(bo);
 
-	kref_put(&bo->list_kref, ttm_bo_release_list);
+	ttm_bo_put(bo);
 	return ret;
 }
 
@@ -1226,6 +1195,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
 	uint32_t new_flags;
 
 	dma_resv_assert_held(bo->base.resv);
+
+	/*
+	 * Remove the backing store if no placement is given.
+	 */
+	if (!placement->num_placement && !placement->num_busy_placement) {
+		ret = ttm_bo_pipeline_gutting(bo);
+		if (ret)
+			return ret;
+
+		return ttm_tt_create(bo, false);
+	}
+
 	/*
 	 * Check whether we need to move buffer.
 	 */
@@ -1293,7 +1274,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
 	kref_init(&bo->kref);
-	kref_init(&bo->list_kref);
 	INIT_LIST_HEAD(&bo->lru);
 	INIT_LIST_HEAD(&bo->ddestroy);
 	INIT_LIST_HEAD(&bo->swap);
@@ -1813,11 +1793,18 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
 	spin_lock(&glob->lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		list_for_each_entry(bo, &glob->swap_lru[i], swap) {
-			if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
-							   NULL)) {
-				ret = 0;
-				break;
+			if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+							    NULL))
+				continue;
+
+			if (!ttm_bo_get_unless_zero(bo)) {
+				if (locked)
+					dma_resv_unlock(bo->base.resv);
+				continue;
 			}
+
+			ret = 0;
+			break;
 		}
 		if (!ret)
 			break;
@@ -1828,11 +1815,9 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
 		return ret;
 	}
 
-	kref_get(&bo->list_kref);
-
-	if (!list_empty(&bo->ddestroy)) {
+	if (bo->deleted) {
 		ret = ttm_bo_cleanup_refs(bo, false, false, locked);
-		kref_put(&bo->list_kref, ttm_bo_release_list);
+		ttm_bo_put(bo);
 		return ret;
 	}
 
@@ -1886,7 +1871,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
 	 */
 	if (locked)
 		dma_resv_unlock(bo->base.resv);
-	kref_put(&bo->list_kref, ttm_bo_release_list);
+	ttm_bo_put(bo);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_swapout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 953c82a..52d2b71 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -507,11 +507,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	fbo->base.moving = NULL;
 	drm_vma_node_reset(&fbo->base.base.vma_node);
 
-	kref_init(&fbo->base.list_kref);
 	kref_init(&fbo->base.kref);
 	fbo->base.destroy = &ttm_transfered_destroy;
 	fbo->base.acc_size = 0;
-	if (bo->base.resv == &bo->base._resv)
+	if (bo->type != ttm_bo_type_sg)
 		fbo->base.base.resv = &fbo->base.base._resv;
 
 	dma_resv_init(&fbo->base.base._resv);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bf876fa..faefaae 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -604,7 +604,7 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
 	p = pool->name;
 	for (i = 0; i < ARRAY_SIZE(t); i++) {
 		if (type & t[i]) {
-			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+			p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
 				      "%s", n[i]);
 		}
 	}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e9671d3..0afdfb0 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -109,7 +109,6 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
 };
 
 static const struct drm_connector_funcs udl_connector_funcs = {
-	.dpms = drm_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = udl_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 22af179..d59ebac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -375,8 +375,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
 	char *wrptr;
 	int color_depth = UDL_COLOR_DEPTH_16BPP;
 
-	crtc_state->no_vblank = true;
-
 	buf = (char *)udl->mode_buf;
 
 	/* This first section has to do with setting the base address on the
@@ -428,14 +426,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
 	udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
 }
 
-static int
-udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
-			      struct drm_plane_state *plane_state,
-			      struct drm_crtc_state *crtc_state)
-{
-	return 0;
-}
-
 static void
 udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
 			       struct drm_plane_state *old_plane_state)
@@ -457,7 +447,6 @@ struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
 	.mode_valid = udl_simple_display_pipe_mode_valid,
 	.enable = udl_simple_display_pipe_enable,
 	.disable = udl_simple_display_pipe_disable,
-	.check = udl_simple_display_pipe_check,
 	.update = udl_simple_display_pipe_update,
 	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
 };
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9a35c55..ac26033 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -254,27 +254,42 @@ struct v3d_csd_job {
 };
 
 /**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
  *
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
  */
-#define wait_for(COND, MS) ({ \
-	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
-	int ret__ = 0;							\
-	while (!(COND)) {						\
-		if (time_after(jiffies, timeout__)) {			\
-			if (!(COND))					\
-				ret__ = -ETIMEDOUT;			\
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
+	int ret__;							\
+	might_sleep();							\
+	for (;;) {							\
+		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+		OP;							\
+		/* Guarantee COND check prior to timeout */		\
+		barrier();						\
+		if (COND) {						\
+			ret__ = 0;					\
 			break;						\
 		}							\
-		msleep(1);					\
+		if (expired__) {					\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+		usleep_range(wait__, wait__ * 2);			\
+		if (wait__ < (Wmax))					\
+			wait__ <<= 1;					\
 	}								\
 	ret__;								\
 })
 
+#define _wait_for(COND, US, Wmin, Wmax)	__wait_for(, (COND), (US), (Wmin), \
+						   (Wmax))
+#define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
+
 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
 {
 	/* nsecs_to_jiffies64() does not guard against overflow */
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 1961213..0883a43 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -18,7 +18,6 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
 
 #include "hgsmi_channels.h"
 #include "vbox_drv.h"
@@ -226,17 +225,6 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
 static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
 				   struct drm_crtc_state *old_crtc_state)
 {
-	struct drm_pending_vblank_event *event;
-	unsigned long flags;
-
-	if (crtc->state && crtc->state->event) {
-		event = crtc->state->event;
-		crtc->state->event = NULL;
-
-		spin_lock_irqsave(&crtc->dev->event_lock, flags);
-		drm_crtc_send_vblank_event(crtc, event);
-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-	}
 }
 
 static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
@@ -838,6 +826,7 @@ static int vbox_connector_init(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs vbox_mode_funcs = {
 	.fb_create = drm_gem_fb_create_with_dirty,
+	.mode_valid = drm_vram_helper_mode_valid,
 	.atomic_check = drm_atomic_helper_check,
 	.atomic_commit = drm_atomic_helper_commit,
 };
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004..a5de40f 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -138,7 +138,7 @@ struct vbva_buffer {
 
 	u32 data_len;
 	/* variable size for the rest of the vbva_buffer area in VRAM. */
-	u8 data[0];
+	u8 data[];
 } __packed;
 
 #define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index b00e20f..1208258 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -84,13 +84,14 @@ static const struct debugfs_reg32 crtc_regs[] = {
 	VC4_REG32(PV_HACT_ACT),
 };
 
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
-			     bool in_vblank_irq, int *vpos, int *hpos,
-			     ktime_t *stime, ktime_t *etime,
-			     const struct drm_display_mode *mode)
+static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
+					  bool in_vblank_irq,
+					  int *vpos, int *hpos,
+					  ktime_t *stime, ktime_t *etime,
+					  const struct drm_display_mode *mode)
 {
+	struct drm_device *dev = crtc->dev;
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
-	struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
 	u32 val;
 	int fifo_lines;
@@ -1030,6 +1031,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
 	.enable_vblank = vc4_enable_vblank,
 	.disable_vblank = vc4_disable_vblank,
+	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
 };
 
 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1039,6 +1041,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
 	.atomic_flush = vc4_crtc_atomic_flush,
 	.atomic_enable = vc4_crtc_atomic_enable,
 	.atomic_disable = vc4_crtc_atomic_disable,
+	.get_scanout_position = vc4_crtc_get_scanout_position,
 };
 
 static const struct vc4_crtc_data pv0_data = {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c586325d..6dfede0 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -252,7 +252,7 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
 		bridge = drm_panel_bridge_add_typed(panel,
 						    DRM_MODE_CONNECTOR_DPI);
 
-	return drm_bridge_attach(dpi->encoder, bridge, NULL);
+	return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
 }
 
 static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5e6fb6c..76f93b6 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -190,9 +190,6 @@ static struct drm_driver vc4_drm_driver = {
 	.irq_postinstall = vc4_irq_postinstall,
 	.irq_uninstall = vc4_irq_uninstall,
 
-	.get_scanout_position = vc4_crtc_get_scanoutpos,
-	.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = vc4_debugfs_init,
 #endif
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6627b20..139d25a 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -65,7 +65,7 @@ struct vc4_perfmon {
 	 * Note that counter values can't be reset, but you can fake a reset by
 	 * destroying the perfmon and creating a new one.
 	 */
-	u64 counters[0];
+	u64 counters[];
 };
 
 struct vc4_dev {
@@ -677,32 +677,41 @@ struct vc4_validated_shader_info {
 };
 
 /**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
  *
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
  */
-#define _wait_for(COND, MS, W) ({ \
-	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
-	int ret__ = 0;							\
-	while (!(COND)) {						\
-		if (time_after(jiffies, timeout__)) {			\
-			if (!(COND))					\
-				ret__ = -ETIMEDOUT;			\
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
+	int ret__;							\
+	might_sleep();							\
+	for (;;) {							\
+		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+		OP;							\
+		/* Guarantee COND check prior to timeout */		\
+		barrier();						\
+		if (COND) {						\
+			ret__ = 0;					\
 			break;						\
 		}							\
-		if (W && drm_can_sleep())  {				\
-			msleep(W);					\
-		} else {						\
-			cpu_relax();					\
+		if (expired__) {					\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
 		}							\
+		usleep_range(wait__, wait__ * 2);			\
+		if (wait__ < (Wmax))					\
+			wait__ <<= 1;					\
 	}								\
 	ret__;								\
 })
 
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define _wait_for(COND, US, Wmin, Wmax)	__wait_for(, (COND), (US), (Wmin), \
+						   (Wmax))
+#define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
 
 /* vc4_bo.c */
 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
@@ -743,10 +752,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
 
 /* vc4_crtc.c */
 extern struct platform_driver vc4_crtc_driver;
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
-			     bool in_vblank_irq, int *vpos, int *hpos,
-			     ktime_t *stime, ktime_t *etime,
-			     const struct drm_display_mode *mode);
 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
 void vc4_crtc_txp_armed(struct drm_crtc_state *state);
 void vc4_crtc_get_margins(struct drm_crtc_state *state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index fd8a2eb..d99b1d5 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1619,7 +1619,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 			 DRM_MODE_ENCODER_DSI, NULL);
 	drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
 
-	ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+	ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
 	if (ret) {
 		dev_err(dev, "bridge attach failed: %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4934127..91e408f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -139,7 +139,7 @@ static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
 
 static bool plane_enabled(struct drm_plane_state *state)
 {
-	return state->fb && state->crtc;
+	return state->fb && !WARN_ON(!state->crtc);
 }
 
 static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5156e6b..e27120d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
 
 	virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
 	virtio_add_bool(m, "edid", vgdev->has_edid);
+	virtio_add_bool(m, "indirect", vgdev->has_indirect);
 	virtio_add_int(m, "cap sets", vgdev->num_capsets);
 	virtio_add_int(m, "scanouts", vgdev->num_scanouts);
 	return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 0966208..2b7e6ae 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,7 +30,6 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
 
 #include "virtgpu_drv.h"
 
@@ -91,6 +90,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
 				   crtc->mode.hdisplay,
 				   crtc->mode.vdisplay, 0, 0);
+	virtio_gpu_notify(vgdev);
 }
 
 static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -109,6 +109,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
 	struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
 
 	virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+	virtio_gpu_notify(vgdev);
 	output->enabled = false;
 }
 
@@ -121,13 +122,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
 static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
 					 struct drm_crtc_state *old_state)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&crtc->dev->event_lock, flags);
-	if (crtc->state->event)
-		drm_crtc_send_vblank_event(crtc, crtc->state->event);
-	crtc->state->event = NULL;
-	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -332,6 +326,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 	drm_atomic_helper_commit_planes(dev, state, 0);
 
+	drm_atomic_helper_fake_vblank(state);
 	drm_atomic_helper_commit_hw_done(state);
 
 	drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -375,6 +370,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
 
 	for (i = 0 ; i < vgdev->num_scanouts; ++i)
 		kfree(vgdev->outputs[i].edid);
-	drm_atomic_helper_shutdown(vgdev->ddev);
 	drm_mode_config_cleanup(vgdev->ddev);
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8cf27af..ab4bed7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -31,6 +31,7 @@
 #include <linux/pci.h>
 
 #include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
 
@@ -135,7 +136,8 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
 {
 	struct drm_device *dev = vdev->priv;
 
-	drm_dev_unregister(dev);
+	drm_dev_unplug(dev);
+	drm_atomic_helper_shutdown(dev);
 	virtio_gpu_deinit(dev);
 	drm_dev_put(dev);
 }
@@ -214,4 +216,6 @@ static struct drm_driver driver = {
 	.major = DRIVER_MAJOR,
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
+
+	.release = virtio_gpu_release,
 };
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06..c1824bd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -32,6 +32,7 @@
 #include <linux/virtio_gpu.h>
 
 #include <drm/drm_atomic.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
@@ -68,15 +69,21 @@ struct virtio_gpu_object_params {
 struct virtio_gpu_object {
 	struct drm_gem_shmem_object base;
 	uint32_t hw_res_handle;
-
-	struct sg_table *pages;
-	uint32_t mapped;
 	bool dumb;
 	bool created;
 };
 #define gem_to_virtio_gpu_obj(gobj) \
 	container_of((gobj), struct virtio_gpu_object, base.base)
 
+struct virtio_gpu_object_shmem {
+	struct virtio_gpu_object base;
+	struct sg_table *pages;
+	uint32_t mapped;
+};
+
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
+	container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+
 struct virtio_gpu_object_array {
 	struct ww_acquire_ctx ticket;
 	struct list_head next;
@@ -114,6 +121,7 @@ struct virtio_gpu_vbuffer {
 	char *resp_buf;
 	int resp_size;
 	virtio_gpu_resp_cb resp_cb;
+	void *resp_cb_data;
 
 	struct virtio_gpu_object_array *objs;
 	struct list_head list;
@@ -175,10 +183,8 @@ struct virtio_gpu_device {
 	struct virtio_gpu_queue ctrlq;
 	struct virtio_gpu_queue cursorq;
 	struct kmem_cache *vbufs;
-	bool vqs_ready;
 
-	bool disable_notify;
-	bool pending_notify;
+	atomic_t pending_commands;
 
 	struct ida	resource_ida;
 
@@ -193,6 +199,7 @@ struct virtio_gpu_device {
 
 	bool has_virgl_3d;
 	bool has_edid;
+	bool has_indirect;
 
 	struct work_struct config_changed_work;
 
@@ -207,6 +214,8 @@ struct virtio_gpu_device {
 
 struct virtio_gpu_fpriv {
 	uint32_t ctx_id;
+	bool context_created;
+	struct mutex context_lock;
 };
 
 /* virtio_ioctl.c */
@@ -216,6 +225,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
 /* virtio_kms.c */
 int virtio_gpu_init(struct drm_device *dev);
 void virtio_gpu_deinit(struct drm_device *dev);
+void virtio_gpu_release(struct drm_device *dev);
 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
 
@@ -262,7 +272,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 				    struct virtio_gpu_object_array *objs,
 				    struct virtio_gpu_fence *fence);
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
-				   uint32_t resource_id);
+				   struct virtio_gpu_object *bo);
 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 					uint64_t offset,
 					uint32_t width, uint32_t height,
@@ -279,9 +289,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 				uint32_t x, uint32_t y);
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object *obj,
-			     struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
-			      struct virtio_gpu_object *obj);
+			     struct virtio_gpu_mem_entry *ents,
+			     unsigned int nents);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -332,8 +341,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
 void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
 void virtio_gpu_dequeue_fence_func(struct work_struct *work);
 
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
 
 /* virtio_gpu_display.c */
 void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
@@ -355,12 +363,16 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
 				    u64 last_seq);
 
 /* virtio_gpu_object */
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
 						size_t size);
 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object_params *params,
 			     struct virtio_gpu_object **bo_ptr,
 			     struct virtio_gpu_fence *fence);
+
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+
 /* virtgpu_prime.c */
 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 	struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 0a2b622..0d6152c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
 
 	virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
 					       objs);
+	virtio_gpu_notify(vgdev);
 	return 0;
 }
 
@@ -143,6 +144,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
 
 	virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
 					       objs);
+	virtio_gpu_notify(vgdev);
 }
 
 struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 205ec4a..336cc91 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -33,13 +33,34 @@
 
 #include "virtgpu_drv.h"
 
+static void virtio_gpu_create_context(struct drm_device *dev,
+				      struct drm_file *file)
+{
+	struct virtio_gpu_device *vgdev = dev->dev_private;
+	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+	char dbgname[TASK_COMM_LEN];
+
+	mutex_lock(&vfpriv->context_lock);
+	if (vfpriv->context_created)
+		goto out_unlock;
+
+	get_task_comm(dbgname, current);
+	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+				      strlen(dbgname), dbgname);
+	virtio_gpu_notify(vgdev);
+	vfpriv->context_created = true;
+
+out_unlock:
+	mutex_unlock(&vfpriv->context_lock);
+}
+
 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv)
+				struct drm_file *file)
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct drm_virtgpu_map *virtio_gpu_map = data;
 
-	return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
+	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
 					 virtio_gpu_map->handle,
 					 &virtio_gpu_map->offset);
 }
@@ -51,11 +72,11 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
  */
 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *drm_file)
+				 struct drm_file *file)
 {
 	struct drm_virtgpu_execbuffer *exbuf = data;
 	struct virtio_gpu_device *vgdev = dev->dev_private;
-	struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 	struct virtio_gpu_fence *out_fence;
 	int ret;
 	uint32_t *bo_handles = NULL;
@@ -74,6 +95,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 
 	exbuf->fence_fd = -1;
 
+	virtio_gpu_create_context(dev, file);
 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
 		struct dma_fence *in_fence;
 
@@ -116,7 +138,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 			goto out_unused_fd;
 		}
 
-		buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+		buflist = virtio_gpu_array_from_handles(file, bo_handles,
 							exbuf->num_bo_handles);
 		if (!buflist) {
 			ret = -ENOENT;
@@ -126,22 +148,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 		bo_handles = NULL;
 	}
 
-	if (buflist) {
-		ret = virtio_gpu_array_lock_resv(buflist);
-		if (ret)
-			goto out_unused_fd;
-	}
-
 	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
 	if (IS_ERR(buf)) {
 		ret = PTR_ERR(buf);
-		goto out_unresv;
+		goto out_unused_fd;
+	}
+
+	if (buflist) {
+		ret = virtio_gpu_array_lock_resv(buflist);
+		if (ret)
+			goto out_memdup;
 	}
 
 	out_fence = virtio_gpu_fence_alloc(vgdev);
 	if(!out_fence) {
 		ret = -ENOMEM;
-		goto out_memdup;
+		goto out_unresv;
 	}
 
 	if (out_fence_fd >= 0) {
@@ -158,13 +180,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 
 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
 			      vfpriv->ctx_id, buflist, out_fence);
+	virtio_gpu_notify(vgdev);
 	return 0;
 
-out_memdup:
-	kvfree(buf);
 out_unresv:
 	if (buflist)
 		virtio_gpu_array_unlock_resv(buflist);
+out_memdup:
+	kvfree(buf);
 out_unused_fd:
 	kvfree(bo_handles);
 	if (buflist)
@@ -177,7 +200,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
 }
 
 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv)
+				     struct drm_file *file)
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct drm_virtgpu_getparam *param = data;
@@ -200,7 +223,7 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 }
 
 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
-					    struct drm_file *file_priv)
+					    struct drm_file *file)
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct drm_virtgpu_resource_create *rc = data;
@@ -211,7 +234,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 	uint32_t handle = 0;
 	struct virtio_gpu_object_params params = { 0 };
 
-	if (vgdev->has_virgl_3d == false) {
+	if (vgdev->has_virgl_3d) {
+		virtio_gpu_create_context(dev, file);
+		params.virgl = true;
+		params.target = rc->target;
+		params.bind = rc->bind;
+		params.depth = rc->depth;
+		params.array_size = rc->array_size;
+		params.last_level = rc->last_level;
+		params.nr_samples = rc->nr_samples;
+		params.flags = rc->flags;
+	} else {
 		if (rc->depth > 1)
 			return -EINVAL;
 		if (rc->nr_samples > 1)
@@ -228,16 +261,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 	params.width = rc->width;
 	params.height = rc->height;
 	params.size = rc->size;
-	if (vgdev->has_virgl_3d) {
-		params.virgl = true;
-		params.target = rc->target;
-		params.bind = rc->bind;
-		params.depth = rc->depth;
-		params.array_size = rc->array_size;
-		params.last_level = rc->last_level;
-		params.nr_samples = rc->nr_samples;
-		params.flags = rc->flags;
-	}
 	/* allocate a single page size object */
 	if (params.size == 0)
 		params.size = PAGE_SIZE;
@@ -251,7 +274,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 		return ret;
 	obj = &qobj->base.base;
 
-	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	ret = drm_gem_handle_create(file, obj, &handle);
 	if (ret) {
 		drm_gem_object_release(obj);
 		return ret;
@@ -264,13 +287,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 }
 
 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
-					  struct drm_file *file_priv)
+					  struct drm_file *file)
 {
 	struct drm_virtgpu_resource_info *ri = data;
 	struct drm_gem_object *gobj = NULL;
 	struct virtio_gpu_object *qobj = NULL;
 
-	gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
+	gobj = drm_gem_object_lookup(file, ri->bo_handle);
 	if (gobj == NULL)
 		return -ENOENT;
 
@@ -297,6 +320,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	if (vgdev->has_virgl_3d == false)
 		return -ENOSYS;
 
+	virtio_gpu_create_context(dev, file);
 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
 	if (objs == NULL)
 		return -ENOENT;
@@ -314,6 +338,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 		(vgdev, vfpriv->ctx_id, offset, args->level,
 		 &args->box, objs, fence);
 	dma_fence_put(&fence->f);
+	virtio_gpu_notify(vgdev);
 	return 0;
 
 err_unlock:
@@ -344,6 +369,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 			 args->box.w, args->box.h, args->box.x, args->box.y,
 			 objs, NULL);
 	} else {
+		virtio_gpu_create_context(dev, file);
 		ret = virtio_gpu_array_lock_resv(objs);
 		if (ret != 0)
 			goto err_put_free;
@@ -359,6 +385,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 			 args->level, &args->box, objs, fence);
 		dma_fence_put(&fence->f);
 	}
+	virtio_gpu_notify(vgdev);
 	return 0;
 
 err_unlock:
@@ -445,6 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
 	/* not in cache - need to talk to hw */
 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
 				  &cache_ent);
+	virtio_gpu_notify(vgdev);
 
 copy_exit:
 	ret = wait_event_timeout(vgdev->resp_wq,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 2f5773e..023a030 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
 		if (vgdev->has_edid)
 			virtio_gpu_cmd_get_edids(vgdev);
 		virtio_gpu_cmd_get_display_info(vgdev);
+		virtio_gpu_notify(vgdev);
 		drm_helper_hpd_irq_event(vgdev->ddev);
 		events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
 	}
@@ -51,22 +52,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
 		      events_clear, &events_clear);
 }
 
-static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
-				      uint32_t nlen, const char *name)
-{
-	int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-
-	if (handle < 0)
-		return handle;
-	handle += 1;
-	virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
-	return handle;
-}
-
 static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
 				      uint32_t ctx_id)
 {
 	virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
+	virtio_gpu_notify(vgdev);
 	ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
 }
 
@@ -92,6 +82,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
 	}
 	for (i = 0; i < num_capsets; i++) {
 		virtio_gpu_cmd_get_capset_info(vgdev, i);
+		virtio_gpu_notify(vgdev);
 		ret = wait_event_timeout(vgdev->resp_wq,
 					 vgdev->capsets[i].id > 0, 5 * HZ);
 		if (ret == 0) {
@@ -159,6 +150,9 @@ int virtio_gpu_init(struct drm_device *dev)
 	if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
 		vgdev->has_edid = true;
 	}
+	if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+		vgdev->has_indirect = true;
+	}
 
 	DRM_INFO("features: %cvirgl %cedid\n",
 		 vgdev->has_virgl_3d ? '+' : '-',
@@ -196,13 +190,13 @@ int virtio_gpu_init(struct drm_device *dev)
 	virtio_gpu_modeset_init(vgdev);
 
 	virtio_device_ready(vgdev->vdev);
-	vgdev->vqs_ready = true;
 
 	if (num_capsets)
 		virtio_gpu_get_capsets(vgdev, num_capsets);
 	if (vgdev->has_edid)
 		virtio_gpu_cmd_get_edids(vgdev);
 	virtio_gpu_cmd_get_display_info(vgdev);
+	virtio_gpu_notify(vgdev);
 	wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
 			   5 * HZ);
 	return 0;
@@ -231,12 +225,16 @@ void virtio_gpu_deinit(struct drm_device *dev)
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 
 	flush_work(&vgdev->obj_free_work);
-	vgdev->vqs_ready = false;
 	flush_work(&vgdev->ctrlq.dequeue_work);
 	flush_work(&vgdev->cursorq.dequeue_work);
 	flush_work(&vgdev->config_changed_work);
 	vgdev->vdev->config->reset(vgdev->vdev);
 	vgdev->vdev->config->del_vqs(vgdev->vdev);
+}
+
+void virtio_gpu_release(struct drm_device *dev)
+{
+	struct virtio_gpu_device *vgdev = dev->dev_private;
 
 	virtio_gpu_modeset_fini(vgdev);
 	virtio_gpu_free_vbufs(vgdev);
@@ -249,8 +247,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_fpriv *vfpriv;
-	int id;
-	char dbgname[TASK_COMM_LEN];
+	int handle;
 
 	/* can't create contexts without 3d renderer */
 	if (!vgdev->has_virgl_3d)
@@ -261,14 +258,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
 	if (!vfpriv)
 		return -ENOMEM;
 
-	get_task_comm(dbgname, current);
-	id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
-	if (id < 0) {
+	mutex_init(&vfpriv->context_lock);
+
+	handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
+	if (handle < 0) {
 		kfree(vfpriv);
-		return id;
+		return handle;
 	}
 
-	vfpriv->ctx_id = id;
+	vfpriv->ctx_id = handle + 1;
 	file->driver_priv = vfpriv;
 	return 0;
 }
@@ -284,6 +282,7 @@ void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
 	vfpriv = file->driver_priv;
 
 	virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+	mutex_destroy(&vfpriv->context_lock);
 	kfree(vfpriv);
 	file->driver_priv = NULL;
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 3af7ec8..2bfb13d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/dma-mapping.h>
 #include <linux/moduleparam.h>
 
 #include "virtgpu_drv.h"
@@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
 	}
 }
 
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
+{
+	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+
+	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+	if (virtio_gpu_is_shmem(bo)) {
+		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+		if (shmem->pages) {
+			if (shmem->mapped) {
+				dma_unmap_sg(vgdev->vdev->dev.parent,
+					     shmem->pages->sgl, shmem->mapped,
+					     DMA_TO_DEVICE);
+				shmem->mapped = 0;
+			}
+
+			sg_free_table(shmem->pages);
+			shmem->pages = NULL;
+			drm_gem_shmem_unpin(&bo->base.base);
+		}
+
+		drm_gem_shmem_free_object(&bo->base.base);
+	}
+}
+
 static void virtio_gpu_free_object(struct drm_gem_object *obj)
 {
 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 
-	if (bo->pages)
-		virtio_gpu_object_detach(vgdev, bo);
-	if (bo->created)
-		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
-	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
-
-	drm_gem_shmem_free_object(obj);
+	if (bo->created) {
+		virtio_gpu_cmd_unref_resource(vgdev, bo);
+		virtio_gpu_notify(vgdev);
+		/* completion handler calls virtio_gpu_cleanup_object() */
+		return;
+	}
+	virtio_gpu_cleanup_object(bo);
 }
 
-static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
 	.free = virtio_gpu_free_object,
 	.open = virtio_gpu_gem_object_open,
 	.close = virtio_gpu_gem_object_close,
@@ -86,9 +112,14 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
 	.get_sg_table = drm_gem_shmem_get_sg_table,
 	.vmap = drm_gem_shmem_vmap,
 	.vunmap = drm_gem_shmem_vunmap,
-	.mmap = &drm_gem_shmem_mmap,
+	.mmap = drm_gem_shmem_mmap,
 };
 
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
+{
+	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
+}
+
 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
 						size_t size)
 {
@@ -98,11 +129,58 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
 	if (!bo)
 		return NULL;
 
-	bo->base.base.funcs = &virtio_gpu_gem_funcs;
+	bo->base.base.funcs = &virtio_gpu_shmem_funcs;
 	bo->base.map_cached = true;
 	return &bo->base.base;
 }
 
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+					struct virtio_gpu_object *bo,
+					struct virtio_gpu_mem_entry **ents,
+					unsigned int *nents)
+{
+	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+	struct scatterlist *sg;
+	int si, ret;
+
+	ret = drm_gem_shmem_pin(&bo->base.base);
+	if (ret < 0)
+		return -EINVAL;
+
+	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+	if (!shmem->pages) {
+		drm_gem_shmem_unpin(&bo->base.base);
+		return -EINVAL;
+	}
+
+	if (use_dma_api) {
+		shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+					   shmem->pages->sgl,
+					   shmem->pages->nents,
+					   DMA_TO_DEVICE);
+		*nents = shmem->mapped;
+	} else {
+		*nents = shmem->pages->nents;
+	}
+
+	*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+			      GFP_KERNEL);
+	if (!(*ents)) {
+		DRM_ERROR("failed to allocate ent list\n");
+		return -ENOMEM;
+	}
+
+	for_each_sg(shmem->pages->sgl, sg, *nents, si) {
+		(*ents)[si].addr = cpu_to_le64(use_dma_api
+					       ? sg_dma_address(sg)
+					       : sg_phys(sg));
+		(*ents)[si].length = cpu_to_le32(sg->length);
+		(*ents)[si].padding = 0;
+	}
+	return 0;
+}
+
 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object_params *params,
 			     struct virtio_gpu_object **bo_ptr,
@@ -111,6 +189,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 	struct virtio_gpu_object_array *objs = NULL;
 	struct drm_gem_shmem_object *shmem_obj;
 	struct virtio_gpu_object *bo;
+	struct virtio_gpu_mem_entry *ents;
+	unsigned int nents;
 	int ret;
 
 	*bo_ptr = NULL;
@@ -147,12 +227,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 					       objs, fence);
 	}
 
-	ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
 	if (ret != 0) {
 		virtio_gpu_free_object(&shmem_obj->base);
 		return ret;
 	}
 
+	ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
+	if (ret != 0) {
+		virtio_gpu_free_object(&shmem_obj->base);
+		return ret;
+	}
+
+	virtio_gpu_notify(vgdev);
 	*bo_ptr = bo;
 	return 0;
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index d1c3f5f..52d2417 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -148,14 +148,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 					   plane->state->src_w >> 16,
 					   plane->state->src_h >> 16,
 					   0, 0);
+		virtio_gpu_notify(vgdev);
 		return;
 	}
 
 	if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
 		return;
 
-	virtio_gpu_disable_notify(vgdev);
-
 	bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
 	if (bo->dumb)
 		virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
@@ -186,8 +185,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 				      rect.y1,
 				      rect.x2 - rect.x1,
 				      rect.y2 - rect.y1);
-
-	virtio_gpu_enable_notify(vgdev);
+	virtio_gpu_notify(vgdev);
 }
 
 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -266,6 +264,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 			 plane->state->crtc_w,
 			 plane->state->crtc_h,
 			 0, 0, objs, vgfb->fence);
+		virtio_gpu_notify(vgdev);
 		dma_fence_wait(&vgfb->fence->f, true);
 		dma_fence_put(&vgfb->fence->f);
 		vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5914e79..7385491 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -95,7 +95,8 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
 	if (!vbuf)
 		return ERR_PTR(-ENOMEM);
 
-	BUG_ON(size > MAX_INLINE_CMD_SIZE);
+	BUG_ON(size > MAX_INLINE_CMD_SIZE ||
+	       size < sizeof(struct virtio_gpu_ctrl_hdr));
 	vbuf->buf = (void *)vbuf + sizeof(*vbuf);
 	vbuf->size = size;
 
@@ -109,21 +110,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
 	return vbuf;
 }
 
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
-				  struct virtio_gpu_vbuffer **vbuffer_p,
-				  int size)
+static struct virtio_gpu_ctrl_hdr *
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
 {
-	struct virtio_gpu_vbuffer *vbuf;
-
-	vbuf = virtio_gpu_get_vbuf(vgdev, size,
-				   sizeof(struct virtio_gpu_ctrl_hdr),
-				   NULL, NULL);
-	if (IS_ERR(vbuf)) {
-		*vbuffer_p = NULL;
-		return ERR_CAST(vbuf);
-	}
-	*vbuffer_p = vbuf;
-	return vbuf->buf;
+	/* this assumes a vbuf contains a command that starts with a
+	 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
+	 * virtqueues.
+	 */
+	return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
 }
 
 static struct virtio_gpu_update_cursor*
@@ -161,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
 	return (struct virtio_gpu_command *)vbuf->buf;
 }
 
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+				  struct virtio_gpu_vbuffer **vbuffer_p,
+				  int size)
+{
+	return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+					 sizeof(struct virtio_gpu_ctrl_hdr),
+					 NULL);
+}
+
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+				     struct virtio_gpu_vbuffer **vbuffer_p,
+				     int size,
+				     virtio_gpu_resp_cb cb)
+{
+	return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+					 sizeof(struct virtio_gpu_ctrl_hdr),
+					 NULL);
+}
+
 static void free_vbuf(struct virtio_gpu_device *vgdev,
 		      struct virtio_gpu_vbuffer *vbuf)
 {
@@ -209,12 +222,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
 		trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
 
 		if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
-			if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+			if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
 				struct virtio_gpu_ctrl_hdr *cmd;
-				cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
-				DRM_ERROR("response 0x%x (command 0x%x)\n",
-					  le32_to_cpu(resp->type),
-					  le32_to_cpu(cmd->type));
+				cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
+				DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
+						      le32_to_cpu(resp->type),
+						      le32_to_cpu(cmd->type));
 			} else
 				DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
 		}
@@ -307,135 +320,134 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
 	return sgt;
 }
 
-static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
-						struct virtio_gpu_vbuffer *vbuf,
-						struct scatterlist *vout)
-		__releases(&vgdev->ctrlq.qlock)
-		__acquires(&vgdev->ctrlq.qlock)
+static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+				      struct virtio_gpu_vbuffer *vbuf,
+				      struct virtio_gpu_fence *fence,
+				      int elemcnt,
+				      struct scatterlist **sgs,
+				      int outcnt,
+				      int incnt)
 {
 	struct virtqueue *vq = vgdev->ctrlq.vq;
-	struct scatterlist *sgs[3], vcmd, vresp;
-	int outcnt = 0, incnt = 0;
-	bool notify = false;
-	int ret;
+	int ret, idx;
 
-	if (!vgdev->vqs_ready)
-		return notify;
-
-	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
-	sgs[outcnt + incnt] = &vcmd;
-	outcnt++;
-
-	if (vout) {
-		sgs[outcnt + incnt] = vout;
-		outcnt++;
+	if (!drm_dev_enter(vgdev->ddev, &idx)) {
+		if (fence && vbuf->objs)
+			virtio_gpu_array_unlock_resv(vbuf->objs);
+		free_vbuf(vgdev, vbuf);
+		return;
 	}
 
-	if (vbuf->resp_size) {
-		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
-		sgs[outcnt + incnt] = &vresp;
-		incnt++;
-	}
-
-retry:
-	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
-	if (ret == -ENOSPC) {
-		spin_unlock(&vgdev->ctrlq.qlock);
-		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
-		spin_lock(&vgdev->ctrlq.qlock);
-		goto retry;
-	} else {
-		trace_virtio_gpu_cmd_queue(vq,
-			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
-
-		notify = virtqueue_kick_prepare(vq);
-	}
-	return notify;
-}
-
-static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
-						struct virtio_gpu_vbuffer *vbuf,
-						struct virtio_gpu_ctrl_hdr *hdr,
-						struct virtio_gpu_fence *fence)
-{
-	struct virtqueue *vq = vgdev->ctrlq.vq;
-	struct scatterlist *vout = NULL, sg;
-	struct sg_table *sgt = NULL;
-	bool notify;
-	int outcnt = 0;
-
-	if (vbuf->data_size) {
-		if (is_vmalloc_addr(vbuf->data_buf)) {
-			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
-					     &outcnt);
-			if (!sgt)
-				return;
-			vout = sgt->sgl;
-		} else {
-			sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
-			vout = &sg;
-			outcnt = 1;
-		}
-	}
+	if (vgdev->has_indirect)
+		elemcnt = 1;
 
 again:
 	spin_lock(&vgdev->ctrlq.qlock);
 
-	/*
-	 * Make sure we have enouth space in the virtqueue.  If not
-	 * wait here until we have.
-	 *
-	 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
-	 * to wait for free space, which can result in fence ids being
-	 * submitted out-of-order.
-	 */
-	if (vq->num_free < 2 + outcnt) {
+	if (vq->num_free < elemcnt) {
 		spin_unlock(&vgdev->ctrlq.qlock);
-		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
+		virtio_gpu_notify(vgdev);
+		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
 		goto again;
 	}
 
-	if (hdr && fence) {
-		virtio_gpu_fence_emit(vgdev, hdr, fence);
+	/* now that the position of the vbuf in the virtqueue is known, we can
+	 * finally set the fence id
+	 */
+	if (fence) {
+		virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
+				      fence);
 		if (vbuf->objs) {
 			virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
 			virtio_gpu_array_unlock_resv(vbuf->objs);
 		}
 	}
-	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
+
+	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+	WARN_ON(ret);
+
+	trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+
+	atomic_inc(&vgdev->pending_commands);
+
 	spin_unlock(&vgdev->ctrlq.qlock);
-	if (notify) {
-		if (vgdev->disable_notify)
-			vgdev->pending_notify = true;
-		else
-			virtqueue_notify(vgdev->ctrlq.vq);
+
+	drm_dev_exit(idx);
+}
+
+static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+						struct virtio_gpu_vbuffer *vbuf,
+						struct virtio_gpu_fence *fence)
+{
+	struct scatterlist *sgs[3], vcmd, vout, vresp;
+	struct sg_table *sgt = NULL;
+	int elemcnt = 0, outcnt = 0, incnt = 0;
+
+	/* set up vcmd */
+	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+	elemcnt++;
+	sgs[outcnt] = &vcmd;
+	outcnt++;
+
+	/* set up vout */
+	if (vbuf->data_size) {
+		if (is_vmalloc_addr(vbuf->data_buf)) {
+			int sg_ents;
+			sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
+					     &sg_ents);
+			if (!sgt) {
+				if (fence && vbuf->objs)
+					virtio_gpu_array_unlock_resv(vbuf->objs);
+				return;
+			}
+
+			elemcnt += sg_ents;
+			sgs[outcnt] = sgt->sgl;
+		} else {
+			sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+			elemcnt++;
+			sgs[outcnt] = &vout;
+		}
+		outcnt++;
 	}
 
+	/* set up vresp */
+	if (vbuf->resp_size) {
+		sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+		elemcnt++;
+		sgs[outcnt + incnt] = &vresp;
+		incnt++;
+	}
+
+	virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+				  incnt);
+
 	if (sgt) {
 		sg_free_table(sgt);
 		kfree(sgt);
 	}
 }
 
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
 {
-	vgdev->disable_notify = true;
-}
+	bool notify;
 
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
-{
-	vgdev->disable_notify = false;
-
-	if (!vgdev->pending_notify)
+	if (!atomic_read(&vgdev->pending_commands))
 		return;
-	vgdev->pending_notify = false;
-	virtqueue_notify(vgdev->ctrlq.vq);
+
+	spin_lock(&vgdev->ctrlq.qlock);
+	atomic_set(&vgdev->pending_commands, 0);
+	notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+	spin_unlock(&vgdev->ctrlq.qlock);
+
+	if (notify)
+		virtqueue_notify(vgdev->ctrlq.vq);
 }
 
 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 					 struct virtio_gpu_vbuffer *vbuf)
 {
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
 }
 
 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -443,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 {
 	struct virtqueue *vq = vgdev->cursorq.vq;
 	struct scatterlist *sgs[1], ccmd;
+	int idx, ret, outcnt;
 	bool notify;
-	int ret;
-	int outcnt;
 
-	if (!vgdev->vqs_ready)
+	if (!drm_dev_enter(vgdev->ddev, &idx)) {
+		free_vbuf(vgdev, vbuf);
 		return;
+	}
 
 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 	sgs[0] = &ccmd;
@@ -464,7 +477,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 		goto retry;
 	} else {
 		trace_virtio_gpu_cmd_queue(vq,
-			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+			virtio_gpu_vbuf_ctrl_hdr(vbuf));
 
 		notify = virtqueue_kick_prepare(vq);
 	}
@@ -473,6 +486,8 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 
 	if (notify)
 		virtqueue_notify(vq);
+
+	drm_dev_exit(idx);
 }
 
 /* just create gem objects for userspace and long lived objects,
@@ -499,41 +514,38 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
 	cmd_p->width = cpu_to_le32(params->width);
 	cmd_p->height = cpu_to_le32(params->height);
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 	bo->created = true;
 }
 
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+				    struct virtio_gpu_vbuffer *vbuf)
+{
+	struct virtio_gpu_object *bo;
+
+	bo = vbuf->resp_cb_data;
+	vbuf->resp_cb_data = NULL;
+
+	virtio_gpu_cleanup_object(bo);
+}
+
 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
-				   uint32_t resource_id)
+				   struct virtio_gpu_object *bo)
 {
 	struct virtio_gpu_resource_unref *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
 
-	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+	cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
+					virtio_gpu_cmd_unref_cb);
 	memset(cmd_p, 0, sizeof(*cmd_p));
 
 	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
-	cmd_p->resource_id = cpu_to_le32(resource_id);
+	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 
+	vbuf->resp_cb_data = bo;
 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 }
 
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
-						  uint32_t resource_id,
-						  struct virtio_gpu_fence *fence)
-{
-	struct virtio_gpu_resource_detach_backing *cmd_p;
-	struct virtio_gpu_vbuffer *vbuf;
-
-	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
-	memset(cmd_p, 0, sizeof(*cmd_p));
-
-	cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
-	cmd_p->resource_id = cpu_to_le32(resource_id);
-
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
-}
-
 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 				uint32_t scanout_id, uint32_t resource_id,
 				uint32_t width, uint32_t height,
@@ -588,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 	struct virtio_gpu_transfer_to_host_2d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
 	if (use_dma_api)
 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-				       bo->pages->sgl, bo->pages->nents,
+				       shmem->pages->sgl, shmem->pages->nents,
 				       DMA_TO_DEVICE);
 
 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -606,7 +619,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
 	cmd_p->r.x = cpu_to_le32(x);
 	cmd_p->r.y = cpu_to_le32(y);
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
 static void
@@ -629,7 +642,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
 	vbuf->data_buf = ents;
 	vbuf->data_size = sizeof(*ents) * nents;
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -939,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 	cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
 	virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
 }
 
 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@@ -988,7 +1000,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 	cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
 	cmd_p->flags = cpu_to_le32(params->flags);
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+
 	bo->created = true;
 }
 
@@ -1003,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 	struct virtio_gpu_transfer_host_3d *cmd_p;
 	struct virtio_gpu_vbuffer *vbuf;
 	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
 	if (use_dma_api)
 		dma_sync_sg_for_device(vgdev->vdev->dev.parent,
-				       bo->pages->sgl, bo->pages->nents,
+				       shmem->pages->sgl, shmem->pages->nents,
 				       DMA_TO_DEVICE);
 
 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1021,7 +1035,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 	cmd_p->offset = cpu_to_le64(offset);
 	cmd_p->level = cpu_to_le32(level);
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
@@ -1047,7 +1061,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
 	cmd_p->offset = cpu_to_le64(offset);
 	cmd_p->level = cpu_to_le32(level);
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@@ -1070,94 +1084,19 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
 	cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
 	cmd_p->size = cpu_to_le32(data_size);
 
-	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+	virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
 
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
 			     struct virtio_gpu_object *obj,
-			     struct virtio_gpu_fence *fence)
+			     struct virtio_gpu_mem_entry *ents,
+			     unsigned int nents)
 {
-	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-	struct virtio_gpu_mem_entry *ents;
-	struct scatterlist *sg;
-	int si, nents, ret;
-
-	if (WARN_ON_ONCE(!obj->created))
-		return -EINVAL;
-	if (WARN_ON_ONCE(obj->pages))
-		return -EINVAL;
-
-	ret = drm_gem_shmem_pin(&obj->base.base);
-	if (ret < 0)
-		return -EINVAL;
-
-	obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
-	if (obj->pages == NULL) {
-		drm_gem_shmem_unpin(&obj->base.base);
-		return -EINVAL;
-	}
-
-	if (use_dma_api) {
-		obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
-					 obj->pages->sgl, obj->pages->nents,
-					 DMA_TO_DEVICE);
-		nents = obj->mapped;
-	} else {
-		nents = obj->pages->nents;
-	}
-
-	/* gets freed when the ring has consumed it */
-	ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
-			     GFP_KERNEL);
-	if (!ents) {
-		DRM_ERROR("failed to allocate ent list\n");
-		return -ENOMEM;
-	}
-
-	for_each_sg(obj->pages->sgl, sg, nents, si) {
-		ents[si].addr = cpu_to_le64(use_dma_api
-					    ? sg_dma_address(sg)
-					    : sg_phys(sg));
-		ents[si].length = cpu_to_le32(sg->length);
-		ents[si].padding = 0;
-	}
-
 	virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
-					       ents, nents,
-					       fence);
+					       ents, nents, NULL);
 	return 0;
 }
 
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
-			      struct virtio_gpu_object *obj)
-{
-	bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
-	if (WARN_ON_ONCE(!obj->pages))
-		return;
-
-	if (use_dma_api && obj->mapped) {
-		struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
-		/* detach backing and wait for the host process it ... */
-		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
-		dma_fence_wait(&fence->f, true);
-		dma_fence_put(&fence->f);
-
-		/* ... then tear down iommu mappings */
-		dma_unmap_sg(vgdev->vdev->dev.parent,
-			     obj->pages->sgl, obj->mapped,
-			     DMA_TO_DEVICE);
-		obj->mapped = 0;
-	} else {
-		virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
-	}
-
-	sg_free_table(obj->pages);
-	obj->pages = NULL;
-
-	drm_gem_shmem_unpin(&obj->base.base);
-}
-
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
 			    struct virtio_gpu_output *output)
 {
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 74f703b..ac85e17 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -76,10 +76,12 @@ static void vkms_disable_vblank(struct drm_crtc *crtc)
 	hrtimer_cancel(&out->vblank_hrtimer);
 }
 
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
-			       int *max_error, ktime_t *vblank_time,
-			       bool in_vblank_irq)
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+				      int *max_error, ktime_t *vblank_time,
+				      bool in_vblank_irq)
 {
+	struct drm_device *dev = crtc->dev;
+	unsigned int pipe = crtc->index;
 	struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
 	struct vkms_output *output = &vkmsdev->output;
 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -154,6 +156,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
 	.atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
 	.enable_vblank		= vkms_enable_vblank,
 	.disable_vblank		= vkms_disable_vblank,
+	.get_vblank_timestamp	= vkms_get_vblank_timestamp,
 	.get_crc_sources	= vkms_get_crc_sources,
 	.set_crc_source		= vkms_set_crc_source,
 	.verify_crc_source	= vkms_verify_crc_source,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 25bd751..860de05 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -103,7 +103,6 @@ static struct drm_driver vkms_driver = {
 	.dumb_create		= vkms_dumb_create,
 	.gem_vm_ops		= &vkms_gem_vm_ops,
 	.gem_free_object_unlocked = vkms_gem_free_object,
-	.get_vblank_timestamp	= vkms_get_vblank_timestamp,
 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
 	.gem_prime_import_sg_table = vkms_prime_import_sg_table,
 
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 7d52e24..eda04ff 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -111,10 +111,6 @@ struct vkms_gem_object {
 int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
 		   struct drm_plane *primary, struct drm_plane *cursor);
 
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
-			       int *max_error, ktime_t *vblank_time,
-			       bool in_vblank_irq);
-
 int vkms_output_init(struct vkms_device *vkmsdev, int index);
 
 struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 5fc8f85..6d31265 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -117,7 +117,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
 	bool can_position = false;
 	int ret;
 
-	if (!state->fb | !state->crtc)
+	if (!state->fb || WARN_ON(!state->crtc))
 		return 0;
 
 	crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c877a21..5c3515e 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,7 @@
 	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
 	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
 	    vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
-	    vmwgfx_validation.o vmwgfx_page_dirty.o \
+	    vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
 	    ttm_object.o ttm_lock.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
index 9cbba0e..799bc09 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2020 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -104,12 +104,12 @@ typedef enum {
    SVGA_3D_CMD_DEAD1                                      = 1083,
    SVGA_3D_CMD_DEAD2                                      = 1084,
 
-   SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1085,
-   SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1086,
-   SVGA_3D_CMD_LOGICOPS_STRETCHBLT                        = 1087,
-   SVGA_3D_CMD_LOGICOPS_COLORFILL                         = 1088,
-   SVGA_3D_CMD_LOGICOPS_ALPHABLEND                        = 1089,
-   SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND                    = 1090,
+   SVGA_3D_CMD_DEAD12                                     = 1085,
+   SVGA_3D_CMD_DEAD13                                     = 1086,
+   SVGA_3D_CMD_DEAD14                                     = 1087,
+   SVGA_3D_CMD_DEAD15                                     = 1088,
+   SVGA_3D_CMD_DEAD16                                     = 1089,
+   SVGA_3D_CMD_DEAD17                                     = 1090,
 
    SVGA_3D_CMD_SET_OTABLE_BASE                            = 1091,
    SVGA_3D_CMD_READBACK_OTABLE                            = 1092,
@@ -261,30 +261,23 @@ typedef enum {
    SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET           = 1220,
    SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET           = 1221,
    SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET           = 1222,
-
-   /*
-    * Reserve some IDs to be used for the SM5 shader types.
-    */
-   SVGA_3D_CMD_DX_RESERVED1                               = 1223,
-   SVGA_3D_CMD_DX_RESERVED2                               = 1224,
-   SVGA_3D_CMD_DX_RESERVED3                               = 1225,
+   SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET           = 1223,
+   SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET           = 1224,
+   SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET           = 1225,
 
    SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER                    = 1226,
    SVGA_3D_CMD_DX_MAX                                     = 1227,
 
    SVGA_3D_CMD_SCREEN_COPY                                = 1227,
 
-   /*
-    * Reserve some IDs to be used for video.
-    */
-   SVGA_3D_CMD_VIDEO_RESERVED1                            = 1228,
-   SVGA_3D_CMD_VIDEO_RESERVED2                            = 1229,
-   SVGA_3D_CMD_VIDEO_RESERVED3                            = 1230,
-   SVGA_3D_CMD_VIDEO_RESERVED4                            = 1231,
-   SVGA_3D_CMD_VIDEO_RESERVED5                            = 1232,
-   SVGA_3D_CMD_VIDEO_RESERVED6                            = 1233,
-   SVGA_3D_CMD_VIDEO_RESERVED7                            = 1234,
-   SVGA_3D_CMD_VIDEO_RESERVED8                            = 1235,
+   SVGA_3D_CMD_RESERVED1                                  = 1228,
+   SVGA_3D_CMD_RESERVED2                                  = 1229,
+   SVGA_3D_CMD_RESERVED3                                  = 1230,
+   SVGA_3D_CMD_RESERVED4                                  = 1231,
+   SVGA_3D_CMD_RESERVED5                                  = 1232,
+   SVGA_3D_CMD_RESERVED6                                  = 1233,
+   SVGA_3D_CMD_RESERVED7                                  = 1234,
+   SVGA_3D_CMD_RESERVED8                                  = 1235,
 
    SVGA_3D_CMD_GROW_OTABLE                                = 1236,
    SVGA_3D_CMD_DX_GROW_COTABLE                            = 1237,
@@ -298,7 +291,46 @@ typedef enum {
    SVGA_3D_CMD_DX_PRED_CONVERT                            = 1243,
    SVGA_3D_CMD_WHOLE_SURFACE_COPY                         = 1244,
 
-   SVGA_3D_CMD_MAX                                        = 1245,
+   SVGA_3D_CMD_DX_DEFINE_UA_VIEW                          = 1245,
+   SVGA_3D_CMD_DX_DESTROY_UA_VIEW                         = 1246,
+   SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT                      = 1247,
+   SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT                     = 1248,
+   SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT                    = 1249,
+   SVGA_3D_CMD_DX_SET_UA_VIEWS                            = 1250,
+
+   SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT         = 1251,
+   SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT                 = 1252,
+   SVGA_3D_CMD_DX_DISPATCH                                = 1253,
+   SVGA_3D_CMD_DX_DISPATCH_INDIRECT                       = 1254,
+
+   SVGA_3D_CMD_WRITE_ZERO_SURFACE                         = 1255,
+   SVGA_3D_CMD_HINT_ZERO_SURFACE                          = 1256,
+   SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER                      = 1257,
+   SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT                     = 1258,
+
+   SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1259,
+   SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1260,
+   SVGA_3D_CMD_LOGICOPS_STRETCHBLT                        = 1261,
+   SVGA_3D_CMD_LOGICOPS_COLORFILL                         = 1262,
+   SVGA_3D_CMD_LOGICOPS_ALPHABLEND                        = 1263,
+   SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND                    = 1264,
+
+   SVGA_3D_CMD_RESERVED2_1                                = 1265,
+
+   SVGA_3D_CMD_RESERVED2_2                                = 1266,
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V4                       = 1267,
+   SVGA_3D_CMD_DX_SET_CS_UA_VIEWS                         = 1268,
+   SVGA_3D_CMD_DX_SET_MIN_LOD                             = 1269,
+   SVGA_3D_CMD_RESERVED2_3                                = 1270,
+   SVGA_3D_CMD_RESERVED2_4                                = 1271,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2             = 1272,
+   SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB            = 1273,
+   SVGA_3D_CMD_DX_SET_SHADER_IFACE                        = 1274,
+   SVGA_3D_CMD_DX_BIND_STREAMOUTPUT                       = 1275,
+   SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS            = 1276,
+   SVGA_3D_CMD_DX_BIND_SHADER_IFACE                       = 1277,
+
+   SVGA_3D_CMD_MAX                                        = 1278,
    SVGA_3D_CMD_FUTURE_MAX                                 = 3000
 } SVGAFifo3dCmdId;
 
@@ -334,6 +366,7 @@ struct {
    uint32                      sid;
    SVGA3dSurface1Flags         surfaceFlags;
    SVGA3dSurfaceFormat         format;
+
    /*
     * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
     * structures must have the same value of numMipLevels field.
@@ -341,6 +374,7 @@ struct {
     * numMipLevels set to 0.
     */
    SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+
    /*
     * Followed by an SVGA3dSize structure for each mip level in each face.
     *
@@ -360,6 +394,7 @@ struct {
    uint32                      sid;
    SVGA3dSurface1Flags         surfaceFlags;
    SVGA3dSurfaceFormat         format;
+
    /*
     * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
     * structures must have the same value of numMipLevels field.
@@ -369,6 +404,7 @@ struct {
    SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
    uint32                      multisampleCount;
    SVGA3dTextureFilter         autogenFilter;
+
    /*
     * Followed by an SVGA3dSize structure for each mip level in each face.
     *
@@ -517,6 +553,18 @@ typedef
 struct {
    SVGA3dSurfaceImageId  src;
    SVGA3dSurfaceImageId  dest;
+   SVGA3dBox boxSrc;
+   SVGA3dBox boxDest;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBltNonMSToMS;
+/* SVGA_3D_CMD_SURFACE_STRETCHBLT_NON_MS_TO_MS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
    SVGA3dBox             boxSrc;
    SVGA3dBox             boxDest;
    SVGA3dStretchBltMode  mode;
@@ -555,6 +603,7 @@ struct {
    SVGAGuestImage guest;
    SVGA3dSurfaceImageId host;
    SVGA3dTransferType transfer;
+
    /*
     * Followed by variable number of SVGA3dCopyBox structures. For consistency
     * in all clipping logic and coordinate translation, we define the
@@ -789,7 +838,7 @@ struct {
 
    uint32 indexBufferSid;     /* Valid index buffer sid. */
    uint32 indexBufferOffset;  /* Byte offset into the vertex buffer, almost */
-			      /* always 0 for DX9 guests, non-zero for OpenGL */
+                              /* always 0 for pre SM guests, non-zero for OpenGL */
                               /* guests.  We can't represent non-multiple of */
                               /* stride offsets in D3D9Renderer... */
    uint8 indexBufferStride;   /* Allowable values = 1, 2, or 4 */
@@ -1228,6 +1277,7 @@ struct SVGA3dCmdLogicOpsBitBlt {
    SVGA3dSurfaceImageId src;
    SVGA3dSurfaceImageId dst;
    SVGA3dLogicOp logicOp;
+   SVGA3dLogicOpRop3 logicOpRop3;
    /* Followed by variable number of SVGA3dCopyBox structures */
 }
 #include "vmware_pack_end.h"
@@ -1247,7 +1297,8 @@ struct SVGA3dCmdLogicOpsTransBlt {
    uint32 color;
    uint32 flags;
    SVGA3dBox srcBox;
-   SVGA3dBox dstBox;
+   SVGA3dSignedBox dstBox;
+   SVGA3dBox clipBox;
 }
 #include "vmware_pack_end.h"
 SVGA3dCmdLogicOpsTransBlt;   /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
@@ -1266,7 +1317,8 @@ struct SVGA3dCmdLogicOpsStretchBlt {
    uint16 mode;
    uint16 flags;
    SVGA3dBox srcBox;
-   SVGA3dBox dstBox;
+   SVGA3dSignedBox dstBox;
+   SVGA3dBox clipBox;
 }
 #include "vmware_pack_end.h"
 SVGA3dCmdLogicOpsStretchBlt;   /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
@@ -1283,6 +1335,7 @@ struct SVGA3dCmdLogicOpsColorFill {
    SVGA3dSurfaceImageId dst;
    uint32 color;
    SVGA3dLogicOp logicOp;
+   SVGA3dLogicOpRop3 logicOpRop3;
    /* Followed by variable number of SVGA3dRect structures. */
 }
 #include "vmware_pack_end.h"
@@ -1302,7 +1355,8 @@ struct SVGA3dCmdLogicOpsAlphaBlend {
    uint32 alphaVal;
    uint32 flags;
    SVGA3dBox srcBox;
-   SVGA3dBox dstBox;
+   SVGA3dSignedBox dstBox;
+   SVGA3dBox clipBox;
 }
 #include "vmware_pack_end.h"
 SVGA3dCmdLogicOpsAlphaBlend;   /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
@@ -1365,8 +1419,9 @@ struct {
    SVGA3dSurface2Flags surface2Flags;
    uint8 multisamplePattern;
    uint8 qualityLevel;
-   uint8  pad0[2];
-   uint32 pad1[3];
+   uint16 bufferByteStride;
+   float minLOD;
+   uint32 pad0[2];
 }
 #include "vmware_pack_end.h"
 SVGAOTableSurfaceEntry;
@@ -1543,7 +1598,7 @@ typedef
 #include "vmware_pack_begin.h"
 struct {
    SVGAOTableType type;
-   PPN baseAddress;
+   PPN32 baseAddress;
    uint32 sizeInBytes;
    uint32 validSizeInBytes;
    SVGAMobFormat ptDepth;
@@ -1599,7 +1654,7 @@ typedef
 struct SVGA3dCmdDefineGBMob {
    SVGAMobId mobid;
    SVGAMobFormat ptDepth;
-   PPN base;
+   PPN32 base;
    uint32 sizeInBytes;
 }
 #include "vmware_pack_end.h"
@@ -1618,7 +1673,6 @@ struct SVGA3dCmdDestroyGBMob {
 #include "vmware_pack_end.h"
 SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
 
-
 /*
  * Define a memory object (Mob) in the OTable with a PPN64 base.
  */
@@ -1719,6 +1773,27 @@ struct SVGA3dCmdDefineGBSurface_v3 {
 SVGA3dCmdDefineGBSurface_v3;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
 
 /*
+ * Defines a guest-backed surface, adding buffer byte stride.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v4 {
+   uint32 sid;
+   SVGA3dSurfaceAllFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dMSPattern multisamplePattern;
+   SVGA3dMSQualityLevel qualityLevel;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+   uint32 bufferByteStride;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v4;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V4 */
+
+/*
  * Destroy a guest-backed surface.
  */
 
@@ -2181,4 +2256,20 @@ SVGA3dCmdScreenCopy;  /* SVGA_3D_CMD_SCREEN_COPY */
 #define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
 #define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
 
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWriteZeroSurface;  /* SVGA_3D_CMD_WRITE_ZERO_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHintZeroSurface;  /* SVGA_3D_CMD_HINT_ZERO_SURFACE */
+
 #endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
index f256560..617b468 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2019 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -39,6 +39,8 @@
 
 #include "includeCheck.h"
 
+#include "svga3d_types.h"
+
 /*
  * 3D Hardware Version
  *
@@ -69,381 +71,408 @@ typedef enum {
  * DevCap indexes.
  */
 
-typedef enum {
-   SVGA3D_DEVCAP_INVALID                           = ((uint32)-1),
-   SVGA3D_DEVCAP_3D                                = 0,
-   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
+typedef uint32 SVGA3dDevCapIndex;
 
-   /*
-    * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
-    * fixed-function texture units available. Each of these units
-    * work in both FFP and Shader modes, and they support texture
-    * transforms and texture coordinates. The host may have additional
-    * texture image units that are only usable with shaders.
-    */
-   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,
-   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
-   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
-   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
-   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
-   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
-   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
-   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
-   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12,
-   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13,
-   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14,
-   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
-   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
-   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
-   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
-   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
-   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
-   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
-   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
-   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
-   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
-   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
-   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
-   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
-   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
-   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
-   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
-   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
-   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
-   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
-   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
-   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
-   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
+#define SVGA3D_DEVCAP_INVALID                              ((uint32)-1)
+#define SVGA3D_DEVCAP_3D                                   0
+#define SVGA3D_DEVCAP_MAX_LIGHTS                           1
 
-   /*
-    * There is a hole in our devcap definitions for
-    * historical reasons.
-    *
-    * Define a constant just for completeness.
-    */
-   SVGA3D_DEVCAP_MISSING62                         = 62,
+/*
+ * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ */
+#define SVGA3D_DEVCAP_MAX_TEXTURES                         2
+#define SVGA3D_DEVCAP_MAX_CLIP_PLANES                      3
+#define SVGA3D_DEVCAP_VERTEX_SHADER_VERSION                4
+#define SVGA3D_DEVCAP_VERTEX_SHADER                        5
+#define SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION              6
+#define SVGA3D_DEVCAP_FRAGMENT_SHADER                      7
+#define SVGA3D_DEVCAP_MAX_RENDER_TARGETS                   8
+#define SVGA3D_DEVCAP_S23E8_TEXTURES                       9
+#define SVGA3D_DEVCAP_S10E5_TEXTURES                       10
+#define SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND                11
+#define SVGA3D_DEVCAP_D16_BUFFER_FORMAT                    12
+#define SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT                  13
+#define SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT                  14
+#define SVGA3D_DEVCAP_QUERY_TYPES                          15
+#define SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING            16
+#define SVGA3D_DEVCAP_MAX_POINT_SIZE                       17
+#define SVGA3D_DEVCAP_MAX_SHADER_TEXTURES                  18
+#define SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                    19
+#define SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                   20
+#define SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                    21
+#define SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                   22
+#define SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO             23
+#define SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY               24
+#define SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT                  25
+#define SVGA3D_DEVCAP_MAX_VERTEX_INDEX                     26
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS       27
+#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS     28
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS              29
+#define SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS            30
+#define SVGA3D_DEVCAP_TEXTURE_OPS                          31
+#define SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8                  32
+#define SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8                  33
+#define SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10               34
+#define SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5                  35
+#define SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5                  36
+#define SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4                  37
+#define SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                    38
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16               39
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8         40
+#define SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                    41
+#define SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8                42
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D16                     43
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                   44
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                   45
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT1                      46
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT2                      47
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT3                      48
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT4                      49
+#define SVGA3D_DEVCAP_SURFACEFMT_DXT5                      50
+#define SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8              51
+#define SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10               52
+#define SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8                  53
+#define SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8                  54
+#define SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                    55
+#define SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                   56
+#define SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                   57
+#define SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5                  58
+#define SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8                  59
+#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5                60
+#define SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8                61
 
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
+/*
+ * There is a hole in our devcap definitions for
+ * historical reasons.
+ *
+ * Define a constant just for completeness.
+ */
+#define SVGA3D_DEVCAP_MISSING62                            62
 
-   /*
-    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
-    * render targets.  This does not include the depth or stencil targets.
-    */
-   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
+#define SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES           63
 
-   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
-   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
-   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
-   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
-   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
-   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
-   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
-   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
-   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
-   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
-   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
-   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+/*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets.  This does not include the depth or stencil targets.
+ */
+#define SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS      64
 
-   /*
-    * This is the maximum number of SVGA context IDs that the guest
-    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
-    */
-   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+#define SVGA3D_DEVCAP_SURFACEFMT_V16U16                    65
+#define SVGA3D_DEVCAP_SURFACEFMT_G16R16                    66
+#define SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16              67
+#define SVGA3D_DEVCAP_SURFACEFMT_UYVY                      68
+#define SVGA3D_DEVCAP_SURFACEFMT_YUY2                      69
 
-   /*
-    * This is the maximum number of SVGA surface IDs that the guest
-    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
-    */
-   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD4                                70
+#define SVGA3D_DEVCAP_DEAD5                                71
+#define SVGA3D_DEVCAP_DEAD7                                72
+#define SVGA3D_DEVCAP_DEAD6                                73
 
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+#define SVGA3D_DEVCAP_AUTOGENMIPMAPS                       74
+#define SVGA3D_DEVCAP_SURFACEFMT_NV12                      75
+#define SVGA3D_DEVCAP_DEAD10                               76
 
-   SVGA3D_DEVCAP_SURFACEFMT_ATI1                   = 82,
-   SVGA3D_DEVCAP_SURFACEFMT_ATI2                   = 83,
+/*
+ * This is the maximum number of SVGA context IDs that the guest
+ * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+ */
+#define SVGA3D_DEVCAP_MAX_CONTEXT_IDS                      77
 
-   /*
-    * Deprecated.
-    */
-   SVGA3D_DEVCAP_DEAD1                             = 84,
+/*
+ * This is the maximum number of SVGA surface IDs that the guest
+ * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+ */
+#define SVGA3D_DEVCAP_MAX_SURFACE_IDS                      78
 
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
-    * ored together, one for every type of video decoding supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                    79
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                    80
+#define SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT               81
 
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
-    * ored together, one for every type of video processing supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+#define SVGA3D_DEVCAP_SURFACEFMT_ATI1                      82
+#define SVGA3D_DEVCAP_SURFACEFMT_ATI2                      83
 
-   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
-   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
-   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
-   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD1                                84
+#define SVGA3D_DEVCAP_DEAD8                                85
+#define SVGA3D_DEVCAP_DEAD9                                86
 
-   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+#define SVGA3D_DEVCAP_LINE_AA                              87  /* boolean */
+#define SVGA3D_DEVCAP_LINE_STIPPLE                         88  /* boolean */
+#define SVGA3D_DEVCAP_MAX_LINE_WIDTH                       89  /* float */
+#define SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                    90  /* float */
 
-   /*
-    * Does the host support the SVGA logic ops commands?
-    */
-   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+#define SVGA3D_DEVCAP_SURFACEFMT_YV12                      91
 
-   /*
-    * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
-    */
-   SVGA3D_DEVCAP_TS_COLOR_KEY                      = 93, /* boolean */
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD3                                92
 
-   /*
-    * Deprecated.
-    */
-   SVGA3D_DEVCAP_DEAD2                             = 94,
+/*
+ * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+ */
+#define SVGA3D_DEVCAP_TS_COLOR_KEY                         93 /* boolean */
 
-   /*
-    * Does the device support DXContexts?
-    */
-   SVGA3D_DEVCAP_DXCONTEXT                         = 95,
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD2                                94
 
-   /*
-    * What is the maximum size of a texture array?
-    *
-    * (Even if this cap is zero, cubemaps are still allowed.)
-    */
-   SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE            = 96,
+/*
+ * Does the device support DXContexts?
+ */
+#define SVGA3D_DEVCAP_DXCONTEXT                            95
 
-   /*
-    * What is the maximum number of vertex buffers or vertex input registers
-    * that can be expected to work correctly with a DXContext?
-    *
-    * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
-    * anything in excess of this cap is not guaranteed to render correctly.
-    *
-    * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
-    * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
-    * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
-    * but only the registers up to this cap value are guaranteed to render
-    * correctly.
-    *
-    * If guest-drivers are able to expose a lower-limit, it's recommended
-    * that they clamp to this value.  Otherwise, the host will make a
-    * best-effort on case-by-case basis if guests exceed this.
-    */
-   SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS              = 97,
+/*
+ * Deprecated.
+ */
+#define SVGA3D_DEVCAP_DEAD11                               96
 
-   /*
-    * What is the maximum number of constant buffers that can be expected to
-    * work correctly with a DX context?
-    *
-    * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
-    * anything in excess of this cap is not guaranteed to render correctly.
-    *
-    * If guest-drivers are able to expose a lower-limit, it's recommended
-    * that they clamp to this value.  Otherwise, the host will make a
-    * best-effort on case-by-case basis if guests exceed this.
-    */
-   SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS           = 98,
+/*
+ * What is the maximum number of vertex buffers or vertex input registers
+ * that can be expected to work correctly with a DXContext?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+ * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+ * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+ * but only the registers up to this cap value are guaranteed to render
+ * correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value.  Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
+ */
+#define SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS                 97
 
-   /*
-    * Does the device support provoking vertex control?
-    *
-    * If this cap is present, the provokingVertexLast field in the
-    * rasterizer state is enabled.  (Guests can then set it to FALSE,
-    * meaning that the first vertex is the provoking vertex, or TRUE,
-    * meaning that the last verteix is the provoking vertex.)
-    *
-    * If this cap is FALSE, then guests should set the provokingVertexLast
-    * to FALSE, otherwise rendering behavior is undefined.
-    */
-   SVGA3D_DEVCAP_DX_PROVOKING_VERTEX               = 99,
+/*
+ * What is the maximum number of constant buffers that can be expected to
+ * work correctly with a DX context?
+ *
+ * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+ * anything in excess of this cap is not guaranteed to render correctly.
+ *
+ * If guest-drivers are able to expose a lower-limit, it's recommended
+ * that they clamp to this value.  Otherwise, the host will make a
+ * best-effort on case-by-case basis if guests exceed this.
+ */
+#define SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS              98
 
-   SVGA3D_DEVCAP_DXFMT_X8R8G8B8                    = 100,
-   SVGA3D_DEVCAP_DXFMT_A8R8G8B8                    = 101,
-   SVGA3D_DEVCAP_DXFMT_R5G6B5                      = 102,
-   SVGA3D_DEVCAP_DXFMT_X1R5G5B5                    = 103,
-   SVGA3D_DEVCAP_DXFMT_A1R5G5B5                    = 104,
-   SVGA3D_DEVCAP_DXFMT_A4R4G4B4                    = 105,
-   SVGA3D_DEVCAP_DXFMT_Z_D32                       = 106,
-   SVGA3D_DEVCAP_DXFMT_Z_D16                       = 107,
-   SVGA3D_DEVCAP_DXFMT_Z_D24S8                     = 108,
-   SVGA3D_DEVCAP_DXFMT_Z_D15S1                     = 109,
-   SVGA3D_DEVCAP_DXFMT_LUMINANCE8                  = 110,
-   SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4           = 111,
-   SVGA3D_DEVCAP_DXFMT_LUMINANCE16                 = 112,
-   SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8           = 113,
-   SVGA3D_DEVCAP_DXFMT_DXT1                        = 114,
-   SVGA3D_DEVCAP_DXFMT_DXT2                        = 115,
-   SVGA3D_DEVCAP_DXFMT_DXT3                        = 116,
-   SVGA3D_DEVCAP_DXFMT_DXT4                        = 117,
-   SVGA3D_DEVCAP_DXFMT_DXT5                        = 118,
-   SVGA3D_DEVCAP_DXFMT_BUMPU8V8                    = 119,
-   SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                  = 120,
-   SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                = 121,
-   SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1                = 122,
-   SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                  = 123,
-   SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                  = 124,
-   SVGA3D_DEVCAP_DXFMT_A2R10G10B10                 = 125,
-   SVGA3D_DEVCAP_DXFMT_V8U8                        = 126,
-   SVGA3D_DEVCAP_DXFMT_Q8W8V8U8                    = 127,
-   SVGA3D_DEVCAP_DXFMT_CxV8U8                      = 128,
-   SVGA3D_DEVCAP_DXFMT_X8L8V8U8                    = 129,
-   SVGA3D_DEVCAP_DXFMT_A2W10V10U10                 = 130,
-   SVGA3D_DEVCAP_DXFMT_ALPHA8                      = 131,
-   SVGA3D_DEVCAP_DXFMT_R_S10E5                     = 132,
-   SVGA3D_DEVCAP_DXFMT_R_S23E8                     = 133,
-   SVGA3D_DEVCAP_DXFMT_RG_S10E5                    = 134,
-   SVGA3D_DEVCAP_DXFMT_RG_S23E8                    = 135,
-   SVGA3D_DEVCAP_DXFMT_BUFFER                      = 136,
-   SVGA3D_DEVCAP_DXFMT_Z_D24X8                     = 137,
-   SVGA3D_DEVCAP_DXFMT_V16U16                      = 138,
-   SVGA3D_DEVCAP_DXFMT_G16R16                      = 139,
-   SVGA3D_DEVCAP_DXFMT_A16B16G16R16                = 140,
-   SVGA3D_DEVCAP_DXFMT_UYVY                        = 141,
-   SVGA3D_DEVCAP_DXFMT_YUY2                        = 142,
-   SVGA3D_DEVCAP_DXFMT_NV12                        = 143,
-   SVGA3D_DEVCAP_DXFMT_AYUV                        = 144,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS       = 145,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT           = 146,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT           = 147,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS          = 148,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT             = 149,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT              = 150,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT              = 151,
-   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS       = 152,
-   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT           = 153,
-   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM          = 154,
-   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT           = 155,
-   SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS             = 156,
-   SVGA3D_DEVCAP_DXFMT_R32G32_UINT                 = 157,
-   SVGA3D_DEVCAP_DXFMT_R32G32_SINT                 = 158,
-   SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS           = 159,
-   SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT        = 160,
-   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24             = 161,
-   SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT              = 162,
-   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS        = 163,
-   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT            = 164,
-   SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT             = 165,
-   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS           = 166,
-   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM              = 167,
-   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB         = 168,
-   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT               = 169,
-   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT               = 170,
-   SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS             = 171,
-   SVGA3D_DEVCAP_DXFMT_R16G16_UINT                 = 172,
-   SVGA3D_DEVCAP_DXFMT_R16G16_SINT                 = 173,
-   SVGA3D_DEVCAP_DXFMT_R32_TYPELESS                = 174,
-   SVGA3D_DEVCAP_DXFMT_D32_FLOAT                   = 175,
-   SVGA3D_DEVCAP_DXFMT_R32_UINT                    = 176,
-   SVGA3D_DEVCAP_DXFMT_R32_SINT                    = 177,
-   SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS              = 178,
-   SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT           = 179,
-   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8                = 180,
-   SVGA3D_DEVCAP_DXFMT_X24_G8_UINT                 = 181,
-   SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS               = 182,
-   SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                  = 183,
-   SVGA3D_DEVCAP_DXFMT_R8G8_UINT                   = 184,
-   SVGA3D_DEVCAP_DXFMT_R8G8_SINT                   = 185,
-   SVGA3D_DEVCAP_DXFMT_R16_TYPELESS                = 186,
-   SVGA3D_DEVCAP_DXFMT_R16_UNORM                   = 187,
-   SVGA3D_DEVCAP_DXFMT_R16_UINT                    = 188,
-   SVGA3D_DEVCAP_DXFMT_R16_SNORM                   = 189,
-   SVGA3D_DEVCAP_DXFMT_R16_SINT                    = 190,
-   SVGA3D_DEVCAP_DXFMT_R8_TYPELESS                 = 191,
-   SVGA3D_DEVCAP_DXFMT_R8_UNORM                    = 192,
-   SVGA3D_DEVCAP_DXFMT_R8_UINT                     = 193,
-   SVGA3D_DEVCAP_DXFMT_R8_SNORM                    = 194,
-   SVGA3D_DEVCAP_DXFMT_R8_SINT                     = 195,
-   SVGA3D_DEVCAP_DXFMT_P8                          = 196,
-   SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP          = 197,
-   SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM             = 198,
-   SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM             = 199,
-   SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS                = 200,
-   SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB              = 201,
-   SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS                = 202,
-   SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB              = 203,
-   SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS                = 204,
-   SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB              = 205,
-   SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS                = 206,
-   SVGA3D_DEVCAP_DXFMT_ATI1                        = 207,
-   SVGA3D_DEVCAP_DXFMT_BC4_SNORM                   = 208,
-   SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS                = 209,
-   SVGA3D_DEVCAP_DXFMT_ATI2                        = 210,
-   SVGA3D_DEVCAP_DXFMT_BC5_SNORM                   = 211,
-   SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM  = 212,
-   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS           = 213,
-   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB         = 214,
-   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS           = 215,
-   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB         = 216,
-   SVGA3D_DEVCAP_DXFMT_Z_DF16                      = 217,
-   SVGA3D_DEVCAP_DXFMT_Z_DF24                      = 218,
-   SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT                 = 219,
-   SVGA3D_DEVCAP_DXFMT_YV12                        = 220,
-   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT          = 221,
-   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT          = 222,
-   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM          = 223,
-   SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT                = 224,
-   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM           = 225,
-   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM              = 226,
-   SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT                = 227,
-   SVGA3D_DEVCAP_DXFMT_R16G16_UNORM                = 228,
-   SVGA3D_DEVCAP_DXFMT_R16G16_SNORM                = 229,
-   SVGA3D_DEVCAP_DXFMT_R32_FLOAT                   = 230,
-   SVGA3D_DEVCAP_DXFMT_R8G8_SNORM                  = 231,
-   SVGA3D_DEVCAP_DXFMT_R16_FLOAT                   = 232,
-   SVGA3D_DEVCAP_DXFMT_D16_UNORM                   = 233,
-   SVGA3D_DEVCAP_DXFMT_A8_UNORM                    = 234,
-   SVGA3D_DEVCAP_DXFMT_BC1_UNORM                   = 235,
-   SVGA3D_DEVCAP_DXFMT_BC2_UNORM                   = 236,
-   SVGA3D_DEVCAP_DXFMT_BC3_UNORM                   = 237,
-   SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM                = 238,
-   SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM              = 239,
-   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM              = 240,
-   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM              = 241,
-   SVGA3D_DEVCAP_DXFMT_BC4_UNORM                   = 242,
-   SVGA3D_DEVCAP_DXFMT_BC5_UNORM                   = 243,
+/*
+ * Does the device support provoking vertex control?
+ *
+ * If this cap is present, the provokingVertexLast field in the
+ * rasterizer state is enabled.  (Guests can then set it to FALSE,
+ * meaning that the first vertex is the provoking vertex, or TRUE,
+ * meaning that the last verteix is the provoking vertex.)
+ *
+ * If this cap is FALSE, then guests should set the provokingVertexLast
+ * to FALSE, otherwise rendering behavior is undefined.
+ */
+#define SVGA3D_DEVCAP_DX_PROVOKING_VERTEX                  99
 
-   /*
-    * Advertises shaderModel 4.1 support, independent blend-states,
-    * cube-map arrays, and a higher vertex input registers limit.
-    *
-    * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
-    */
-   SVGA3D_DEVCAP_SM41                              = 244,
+#define SVGA3D_DEVCAP_DXFMT_X8R8G8B8                       100
+#define SVGA3D_DEVCAP_DXFMT_A8R8G8B8                       101
+#define SVGA3D_DEVCAP_DXFMT_R5G6B5                         102
+#define SVGA3D_DEVCAP_DXFMT_X1R5G5B5                       103
+#define SVGA3D_DEVCAP_DXFMT_A1R5G5B5                       104
+#define SVGA3D_DEVCAP_DXFMT_A4R4G4B4                       105
+#define SVGA3D_DEVCAP_DXFMT_Z_D32                          106
+#define SVGA3D_DEVCAP_DXFMT_Z_D16                          107
+#define SVGA3D_DEVCAP_DXFMT_Z_D24S8                        108
+#define SVGA3D_DEVCAP_DXFMT_Z_D15S1                        109
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8                     110
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4              111
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE16                    112
+#define SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8              113
+#define SVGA3D_DEVCAP_DXFMT_DXT1                           114
+#define SVGA3D_DEVCAP_DXFMT_DXT2                           115
+#define SVGA3D_DEVCAP_DXFMT_DXT3                           116
+#define SVGA3D_DEVCAP_DXFMT_DXT4                           117
+#define SVGA3D_DEVCAP_DXFMT_DXT5                           118
+#define SVGA3D_DEVCAP_DXFMT_BUMPU8V8                       119
+#define SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                     120
+#define SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                   121
+#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1                   122
+#define SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                     123
+#define SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                     124
+#define SVGA3D_DEVCAP_DXFMT_A2R10G10B10                    125
+#define SVGA3D_DEVCAP_DXFMT_V8U8                           126
+#define SVGA3D_DEVCAP_DXFMT_Q8W8V8U8                       127
+#define SVGA3D_DEVCAP_DXFMT_CxV8U8                         128
+#define SVGA3D_DEVCAP_DXFMT_X8L8V8U8                       129
+#define SVGA3D_DEVCAP_DXFMT_A2W10V10U10                    130
+#define SVGA3D_DEVCAP_DXFMT_ALPHA8                         131
+#define SVGA3D_DEVCAP_DXFMT_R_S10E5                        132
+#define SVGA3D_DEVCAP_DXFMT_R_S23E8                        133
+#define SVGA3D_DEVCAP_DXFMT_RG_S10E5                       134
+#define SVGA3D_DEVCAP_DXFMT_RG_S23E8                       135
+#define SVGA3D_DEVCAP_DXFMT_BUFFER                         136
+#define SVGA3D_DEVCAP_DXFMT_Z_D24X8                        137
+#define SVGA3D_DEVCAP_DXFMT_V16U16                         138
+#define SVGA3D_DEVCAP_DXFMT_G16R16                         139
+#define SVGA3D_DEVCAP_DXFMT_A16B16G16R16                   140
+#define SVGA3D_DEVCAP_DXFMT_UYVY                           141
+#define SVGA3D_DEVCAP_DXFMT_YUY2                           142
+#define SVGA3D_DEVCAP_DXFMT_NV12                           143
+#define SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD2                   144
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS          145
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT              146
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT              147
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS             148
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT                149
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT                 150
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT                 151
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS          152
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT              153
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM             154
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT              155
+#define SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS                156
+#define SVGA3D_DEVCAP_DXFMT_R32G32_UINT                    157
+#define SVGA3D_DEVCAP_DXFMT_R32G32_SINT                    158
+#define SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS              159
+#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT           160
+#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24                161
+#define SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT                 162
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS           163
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT               164
+#define SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT                165
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS              166
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM                 167
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB            168
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT                  169
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT                  170
+#define SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS                171
+#define SVGA3D_DEVCAP_DXFMT_R16G16_UINT                    172
+#define SVGA3D_DEVCAP_DXFMT_R16G16_SINT                    173
+#define SVGA3D_DEVCAP_DXFMT_R32_TYPELESS                   174
+#define SVGA3D_DEVCAP_DXFMT_D32_FLOAT                      175
+#define SVGA3D_DEVCAP_DXFMT_R32_UINT                       176
+#define SVGA3D_DEVCAP_DXFMT_R32_SINT                       177
+#define SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS                 178
+#define SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT              179
+#define SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8                   180
+#define SVGA3D_DEVCAP_DXFMT_X24_G8_UINT                    181
+#define SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS                  182
+#define SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                     183
+#define SVGA3D_DEVCAP_DXFMT_R8G8_UINT                      184
+#define SVGA3D_DEVCAP_DXFMT_R8G8_SINT                      185
+#define SVGA3D_DEVCAP_DXFMT_R16_TYPELESS                   186
+#define SVGA3D_DEVCAP_DXFMT_R16_UNORM                      187
+#define SVGA3D_DEVCAP_DXFMT_R16_UINT                       188
+#define SVGA3D_DEVCAP_DXFMT_R16_SNORM                      189
+#define SVGA3D_DEVCAP_DXFMT_R16_SINT                       190
+#define SVGA3D_DEVCAP_DXFMT_R8_TYPELESS                    191
+#define SVGA3D_DEVCAP_DXFMT_R8_UNORM                       192
+#define SVGA3D_DEVCAP_DXFMT_R8_UINT                        193
+#define SVGA3D_DEVCAP_DXFMT_R8_SNORM                       194
+#define SVGA3D_DEVCAP_DXFMT_R8_SINT                        195
+#define SVGA3D_DEVCAP_DXFMT_P8                             196
+#define SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP             197
+#define SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM                198
+#define SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM                199
+#define SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS                   200
+#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB                 201
+#define SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS                   202
+#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB                 203
+#define SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS                   204
+#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB                 205
+#define SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS                   206
+#define SVGA3D_DEVCAP_DXFMT_ATI1                           207
+#define SVGA3D_DEVCAP_DXFMT_BC4_SNORM                      208
+#define SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS                   209
+#define SVGA3D_DEVCAP_DXFMT_ATI2                           210
+#define SVGA3D_DEVCAP_DXFMT_BC5_SNORM                      211
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM     212
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS              213
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB            214
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS              215
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB            216
+#define SVGA3D_DEVCAP_DXFMT_Z_DF16                         217
+#define SVGA3D_DEVCAP_DXFMT_Z_DF24                         218
+#define SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT                    219
+#define SVGA3D_DEVCAP_DXFMT_YV12                           220
+#define SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT             221
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT             222
+#define SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM             223
+#define SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT                   224
+#define SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM              225
+#define SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM                 226
+#define SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT                   227
+#define SVGA3D_DEVCAP_DXFMT_R16G16_UNORM                   228
+#define SVGA3D_DEVCAP_DXFMT_R16G16_SNORM                   229
+#define SVGA3D_DEVCAP_DXFMT_R32_FLOAT                      230
+#define SVGA3D_DEVCAP_DXFMT_R8G8_SNORM                     231
+#define SVGA3D_DEVCAP_DXFMT_R16_FLOAT                      232
+#define SVGA3D_DEVCAP_DXFMT_D16_UNORM                      233
+#define SVGA3D_DEVCAP_DXFMT_A8_UNORM                       234
+#define SVGA3D_DEVCAP_DXFMT_BC1_UNORM                      235
+#define SVGA3D_DEVCAP_DXFMT_BC2_UNORM                      236
+#define SVGA3D_DEVCAP_DXFMT_BC3_UNORM                      237
+#define SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM                   238
+#define SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM                 239
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM                 240
+#define SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM                 241
+#define SVGA3D_DEVCAP_DXFMT_BC4_UNORM                      242
+#define SVGA3D_DEVCAP_DXFMT_BC5_UNORM                      243
 
-   SVGA3D_DEVCAP_MULTISAMPLE_2X                    = 245,
-   SVGA3D_DEVCAP_MULTISAMPLE_4X                    = 246,
+/*
+ * Advertises shaderModel 4.1 support, independent blend-states,
+ * cube-map arrays, and a higher vertex input registers limit.
+ *
+ * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+ */
+#define SVGA3D_DEVCAP_SM41                                 244
+#define SVGA3D_DEVCAP_MULTISAMPLE_2X                       245
+#define SVGA3D_DEVCAP_MULTISAMPLE_4X                       246
 
-   SVGA3D_DEVCAP_MAX                       /* This must be the last index. */
-} SVGA3dDevCapIndex;
+/*
+ * Indicates that the device has rendering support for
+ * the full multisample quality.  If this cap is not present,
+ * the host may or may not support full quality rendering.
+ *
+ * See also SVGA_REG_MS_HINT_RESOLVED.
+ */
+#define SVGA3D_DEVCAP_MS_FULL_QUALITY                      247
+
+/*
+ * Advertises support for the SVGA3D LogicOps commands.
+ */
+#define SVGA3D_DEVCAP_LOGICOPS                             248
+
+/*
+ * Advertises support for using logicOps in the DXBlendStates.
+ */
+#define SVGA3D_DEVCAP_LOGIC_BLENDOPS                       249
+
+/*
+* Note DXFMT range is now non-contiguous.
+*/
+#define SVGA3D_DEVCAP_RESERVED_1                           250
+#define SVGA3D_DEVCAP_DXFMT_BC6H_TYPELESS                  251
+#define SVGA3D_DEVCAP_DXFMT_BC6H_UF16                      252
+#define SVGA3D_DEVCAP_DXFMT_BC6H_SF16                      253
+#define SVGA3D_DEVCAP_DXFMT_BC7_TYPELESS                   254
+#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM                      255
+#define SVGA3D_DEVCAP_DXFMT_BC7_UNORM_SRGB                 256
+#define SVGA3D_DEVCAP_RESERVED_2                           257
+
+#define SVGA3D_DEVCAP_SM5                                  258
+#define SVGA3D_DEVCAP_MULTISAMPLE_8X                       259
+
+/* This must be the last index. */
+#define SVGA3D_DEVCAP_MAX                                  260
 
 /*
  * Bit definitions for DXFMT devcaps
@@ -472,10 +501,10 @@ typedef enum {
 #define SVGA3D_DXFMT_MAX                      (1 << 10)
 
 typedef union {
-   Bool   b;
+   SVGA3dBool b;
    uint32 u;
-   int32  i;
-   float  f;
+   int32 i;
+   float f;
 } SVGA3dDevCapResult;
 
 #endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
index 7a49c94..f703ac2 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2012-2015 VMware, Inc.
+ * Copyright 2012-2019 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -118,12 +118,14 @@ typedef uint8 SVGA3dMultisampleRastEnable;
 #define SVGA3D_DX_MAX_SRVIEWS 128
 #define SVGA3D_DX_MAX_CONSTBUFFERS 16
 #define SVGA3D_DX_MAX_SAMPLERS 16
+#define SVGA3D_DX_MAX_CLASS_INSTANCES 253
 
 #define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
 
 typedef uint32 SVGA3dShaderResourceViewId;
 typedef uint32 SVGA3dRenderTargetViewId;
 typedef uint32 SVGA3dDepthStencilViewId;
+typedef uint32 SVGA3dUAViewId;
 
 typedef uint32 SVGA3dShaderId;
 typedef uint32 SVGA3dElementLayoutId;
@@ -145,6 +147,17 @@ typedef union {
    float value[4];
 } SVGA3dRGBAFloat;
 
+typedef union {
+   struct {
+      uint32 r;
+      uint32 g;
+      uint32 b;
+      uint32 a;
+   };
+
+   uint32 value[4];
+} SVGA3dRGBAUint32;
+
 typedef
 #include "vmware_pack_begin.h"
 struct {
@@ -249,6 +262,39 @@ struct SVGA3dCmdDXSetShader {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
 
+typedef union {
+   struct {
+      uint32 cbOffset : 12;
+      uint32 cbId     : 4;
+      uint32 baseSamp : 4;
+      uint32 baseTex  : 7;
+      uint32 reserved : 5;
+   };
+   uint32 value;
+} SVGA3dIfaceData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderIface {
+   SVGA3dShaderType type;
+   uint32 numClassInstances;
+   uint32 index;
+   uint32 iface;
+   SVGA3dIfaceData data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderIface; /* SVGA_3D_CMD_DX_SET_SHADER_IFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShaderIface {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShaderIface; /* SVGA_3D_CMD_DX_BIND_SHADER_IFACE */
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXSetSamplers {
@@ -306,6 +352,26 @@ SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
 
 typedef
 #include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstancedIndirect {
+   SVGA3dSurfaceId argsBufferSid;
+   uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstancedIndirect;
+/* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstancedIndirect {
+   SVGA3dSurfaceId argsBufferSid;
+   uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstancedIndirect;
+/* SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
 struct SVGA3dCmdDXDrawAuto {
    uint32 pad0;
 }
@@ -314,6 +380,27 @@ SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
 
 typedef
 #include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDispatch {
+   uint32 threadGroupCountX;
+   uint32 threadGroupCountY;
+   uint32 threadGroupCountZ;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDispatch;
+/* SVGA_3D_CMD_DX_DISPATCH */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDispatchIndirect {
+   SVGA3dSurfaceId argsBufferSid;
+   uint32 byteOffsetForArgs;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDispatchIndirect;
+/* SVGA_3D_CMD_DX_DISPATCH_INDIRECT */
+
+typedef
+#include "vmware_pack_begin.h"
 struct SVGA3dCmdDXSetInputLayout {
    SVGA3dElementLayoutId elementLayoutId;
 }
@@ -525,7 +612,7 @@ struct MKS3dDXSOState {
    uint32 offset;       /* Starting offset */
    uint32 intOffset;    /* Internal offset */
    uint32 vertexCount;  /* vertices written */
-   uint32 sizeInBytes;  /* max bytes to write */
+   uint32 dead;
 }
 #include "vmware_pack_end.h"
 SVGA3dDXSOState;
@@ -786,6 +873,31 @@ struct SVGA3dCmdDXTransferFromBuffer {
 SVGA3dCmdDXTransferFromBuffer;   /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
 
 
+#define SVGA3D_TRANSFER_TO_BUFFER_READBACK   (1 << 0)
+#define SVGA3D_TRANSFER_TO_BUFFER_FLAGS_MASK (1 << 0)
+typedef uint32 SVGA3dTransferToBufferFlags;
+
+/*
+ * Raw byte wise transfer to a buffer surface from another surface
+ * of the requested box.  Supported if SVGA_CAP_DX2 is set.  This
+ * command does not take a context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferToBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dBox srcBox;
+   SVGA3dSurfaceId destSid;
+   uint32 destOffset;
+   uint32 destPitch;
+   uint32 destSlicePitch;
+   SVGA3dTransferToBufferFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferToBuffer;   /* SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER */
+
+
 /*
  * Raw byte wise transfer from a buffer surface into another surface
  * of the requested box.  Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
@@ -905,6 +1017,20 @@ typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
 typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
 /* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
 
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetHSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetDSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetCSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET */
+
+
+#define SVGA3D_BUFFEREX_SRV_RAW        (1 << 0)
+#define SVGA3D_BUFFEREX_SRV_FLAGS_MAX  (1 << 1)
+#define SVGA3D_BUFFEREX_SRV_FLAGS_MASK (SVGA3D_BUFFEREX_SRV_FLAGS_MAX - 1)
+typedef uint32 SVGA3dBufferExFlags;
 
 typedef
 #include "vmware_pack_begin.h"
@@ -925,7 +1051,7 @@ struct {
       struct {
          uint32 firstElement;
          uint32 numElements;
-         uint32 flags;
+         SVGA3dBufferExFlags flags;
          uint32 pad0;
       } bufferex;
    };
@@ -1072,6 +1198,32 @@ struct SVGA3dCmdDXDefineDepthStencilView {
 SVGA3dCmdDXDefineDepthStencilView;
 /* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
 
+/*
+ * Version 2 needed in order to start validating and using the flags
+ * field.  Unfortunately the device wasn't validating or using the
+ * flags field and the driver wasn't initializing it in shipped code,
+ * so a new version of the command is needed to allow that code to
+ * continue to work.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView_v2 {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView_v2;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 */
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXDestroyDepthStencilView {
@@ -1081,6 +1233,138 @@ struct SVGA3dCmdDXDestroyDepthStencilView {
 SVGA3dCmdDXDestroyDepthStencilView;
 /* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
 
+
+#define SVGA3D_UABUFFER_RAW     (1 << 0)
+#define SVGA3D_UABUFFER_APPEND  (1 << 1)
+#define SVGA3D_UABUFFER_COUNTER (1 << 2)
+typedef uint32 SVGA3dUABufferFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         SVGA3dUABufferFlags flags;
+         uint32 padding0;
+         uint32 padding1;
+      } buffer;
+      struct {
+         uint32 mipSlice;
+         uint32 firstArraySlice;
+         uint32 arraySize;
+         uint32 padding0;
+         uint32 padding1;
+      } tex;  /* 1d, 2d */
+      struct {
+         uint32 mipSlice;
+         uint32 firstW;
+         uint32 wSize;
+         uint32 padding0;
+         uint32 padding1;
+      } tex3D;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dUAViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dUAViewDesc desc;
+   uint32 structureCount;
+   uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXUAViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineUAView {
+   SVGA3dUAViewId uaViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dUAViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineUAView;
+/* SVGA_3D_CMD_DX_DEFINE_UA_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyUAView {
+   SVGA3dUAViewId uaViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyUAView;
+/* SVGA_3D_CMD_DX_DESTROY_UA_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearUAViewUint {
+   SVGA3dUAViewId uaViewId;
+   SVGA3dRGBAUint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearUAViewUint;
+/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearUAViewFloat {
+   SVGA3dUAViewId uaViewId;
+   SVGA3dRGBAFloat value;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearUAViewFloat;
+/* SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCopyStructureCount {
+   SVGA3dUAViewId srcUAViewId;
+   SVGA3dSurfaceId destSid;
+   uint32 destByteOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCopyStructureCount;
+/* SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStructureCount {
+   SVGA3dUAViewId uaViewId;
+   uint32 structureCount;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStructureCount;
+/* SVGA_3D_CMD_DX_SET_STRUCTURE_COUNT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetUAViews {
+   uint32 uavSpliceIndex;
+   /* Followed by a variable number of SVGA3dUAViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetUAViews; /* SVGA_3D_CMD_DX_SET_UA_VIEWS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCSUAViews {
+   uint32 startIndex;
+   /* Followed by a variable number of SVGA3dUAViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCSUAViews; /* SVGA_3D_CMD_DX_SET_CS_UA_VIEWS */
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dInputElementDesc {
@@ -1099,7 +1383,7 @@ typedef
 struct {
    uint32 elid;
    uint32 numDescs;
-   SVGA3dInputElementDesc desc[32];
+   SVGA3dInputElementDesc descs[32];
    uint32 pad[62];
 }
 #include "vmware_pack_end.h"
@@ -1261,7 +1545,8 @@ struct {
    uint8 lineStippleEnable;
    uint8 lineStippleFactor;
    uint16 lineStipplePattern;
-   uint32 forcedSampleCount;
+   uint8 forcedSampleCount;
+   uint8 mustBeZero[3];
 }
 #include "vmware_pack_end.h"
 SVGACOTableDXRasterizerStateEntry;
@@ -1352,6 +1637,71 @@ struct SVGA3dCmdDXDestroySamplerState {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
 
+
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_UNDEFINED                          0
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_POSITION                           1
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_CLIP_DISTANCE                      2
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_CULL_DISTANCE                      3
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_RENDER_TARGET_ARRAY_INDEX          4
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_VIEWPORT_ARRAY_INDEX               5
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_VERTEX_ID                          6
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_PRIMITIVE_ID                       7
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_INSTANCE_ID                        8
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_IS_FRONT_FACE                      9
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_SAMPLE_INDEX                       10
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_0_EDGE_TESSFACTOR  11
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_0_EDGE_TESSFACTOR  12
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_EQ_1_EDGE_TESSFACTOR  13
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_EQ_1_EDGE_TESSFACTOR  14
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_U_INSIDE_TESSFACTOR     15
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_QUAD_V_INSIDE_TESSFACTOR     16
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_U_EQ_0_EDGE_TESSFACTOR   17
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_V_EQ_0_EDGE_TESSFACTOR   18
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_W_EQ_0_EDGE_TESSFACTOR   19
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_TRI_INSIDE_TESSFACTOR        20
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DETAIL_TESSFACTOR       21
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_FINAL_LINE_DENSITY_TESSFACTOR      22
+#define SVGADX_SIGNATURE_SEMANTIC_NAME_MAX                                23
+typedef uint32 SVGA3dDXSignatureSemanticName;
+
+#define SVGADX_SIGNATURE_REGISTER_COMPONENT_UNKNOWN 0
+typedef uint32 SVGA3dDXSignatureRegisterComponentType;
+
+#define SVGADX_SIGNATURE_MIN_PRECISION_DEFAULT 0
+typedef uint32 SVGA3dDXSignatureMinPrecision;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXSignatureEntry {
+   uint32 registerIndex;
+   SVGA3dDXSignatureSemanticName semanticName;
+   uint32 mask; /* Lower 4 bits represent X, Y, Z, W channels */
+   SVGA3dDXSignatureRegisterComponentType componentType;
+   SVGA3dDXSignatureMinPrecision minPrecision;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXShaderSignatureEntry;
+
+#define SVGADX_SIGNATURE_HEADER_VERSION_0 0x08a92d12
+
+/*
+ * The SVGA3dDXSignatureHeader structure is added after the shader
+ * body in the mob that is bound to the shader.  It is followed by the
+ * specified number of SVGA3dDXSignatureEntry structures for each of
+ * the three types of signatures in the order (input, output, patch
+ * constants).
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXSignatureHeader {
+   uint32 headerVersion;
+   uint32 numInputSignatures;
+   uint32 numOutputSignatures;
+   uint32 numPatchConstantSignatures;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXShaderSignatureHeader;
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXDefineShader {
@@ -1415,7 +1765,8 @@ SVGA3dCmdDXCondBindAllShader;   /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
 /*
  * The maximum number of streamout decl's in each streamout entry.
  */
-#define SVGA3D_MAX_STREAMOUT_DECLS 64
+#define SVGA3D_MAX_DX10_STREAMOUT_DECLS 64
+#define SVGA3D_MAX_STREAMOUT_DECLS 512
 
 typedef
 #include "vmware_pack_begin.h"
@@ -1434,10 +1785,16 @@ typedef
 #include "vmware_pack_begin.h"
 struct SVGAOTableStreamOutputEntry {
    uint32 numOutputStreamEntries;
-   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS];
    uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
    uint32 rasterizedStream;
-   uint32 pad[250];
+   uint32 numOutputStreamStrides;
+   uint32 mobid;
+   uint32 offsetInBytes;
+   uint8 usesMob;
+   uint8 pad0;
+   uint16 pad1;
+   uint32 pad2[246];
 }
 #include "vmware_pack_end.h"
 SVGACOTableDXStreamOutputEntry;
@@ -1447,13 +1804,47 @@ typedef
 struct SVGA3dCmdDXDefineStreamOutput {
    SVGA3dStreamOutputId soid;
    uint32 numOutputStreamEntries;
-   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_DX10_STREAMOUT_DECLS];
    uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
    uint32 rasterizedStream;
 }
 #include "vmware_pack_end.h"
 SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
 
+/*
+ * Version 2 needed in order to start validating and using the
+ * rasterizedStream field.  Unfortunately the device wasn't validating
+ * or using this field and the driver wasn't initializing it in shipped
+ * code, so a new version of the command is needed to allow that code
+ * to continue to work.  Also added new numOutputStreamStrides field.
+ */
+
+#define SVGA3D_DX_SO_NO_RASTERIZED_STREAM 0xFFFFFFFF
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutputWithMob {
+   SVGA3dStreamOutputId soid;
+   uint32 numOutputStreamEntries;
+   uint32 numOutputStreamStrides;
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutputWithMob;
+/* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindStreamOutput {
+   SVGA3dStreamOutputId soid;
+   uint32 mobid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindStreamOutput; /* SVGA_3D_CMD_DX_BIND_STREAMOUTPUT */
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXDestroyStreamOutput {
@@ -1472,6 +1863,15 @@ SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
 
 typedef
 #include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetMinLOD {
+   SVGA3dSurfaceId sid;
+   float minLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetMinLOD; /* SVGA_3D_CMD_DX_SET_MIN_LOD */
+
+typedef
+#include "vmware_pack_begin.h"
 struct {
    uint64 value;
    uint32 mobId;
@@ -1581,33 +1981,38 @@ struct SVGADXContextMobFormat {
       uint32 rasterizerStateId;
       uint32 depthStencilViewId;
       uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
-      uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
    } renderState;
 
+   uint32 pad0[8];
+
    struct {
       uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
       uint32 soid;
    } streamOut;
-   uint32 pad0[11];
+
+   uint32 pad1[10];
+
+   uint32 uavSpliceIndex;
 
    uint8 numViewports;
    uint8 numScissorRects;
-   uint16 pad1[1];
+   uint16 pad2[1];
 
-   uint32 pad2[3];
+   uint32 pad3[3];
 
    SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
-   uint32 pad3[32];
+   uint32 pad4[32];
 
    SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
-   uint32 pad4[64];
+   uint32 pad5[64];
 
    struct {
       uint32 queryID;
       uint32 value;
    } predication;
-   uint32 pad5[2];
 
+   SVGAMobId shaderIfaceMobid;
+   uint32 shaderIfaceOffset;
    struct {
       uint32 shaderId;
       SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
@@ -1619,11 +2024,38 @@ struct SVGADXContextMobFormat {
    SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
 
    SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
-   uint32 pad7[380];
+
+   uint32 pad7[64];
+
+   uint32 uaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS];
+   uint32 csuaViewIds[SVGA3D_DX11_1_MAX_UAVIEWS];
+
+   uint32 pad8[188];
 }
 #include "vmware_pack_end.h"
 SVGADXContextMobFormat;
 
+/*
+ * There is conflicting documentation on max class instances (253 vs 256).  The
+ * lower value is the one used throughout the device, but since mob format is
+ * more involved to increase if needed, conservatively use the higher one here.
+ */
+#define SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXShaderIfaceMobFormat {
+   struct {
+      uint32 numClassInstances;
+      uint32 iface[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED];
+      SVGA3dIfaceData data[SVGA3D_DX_MAX_CLASS_INSTANCES_PADDED];
+   } shaderIfaceState[SVGA3D_NUM_SHADERTYPE];
+
+   uint32 pad0[1018];
+}
+#include "vmware_pack_end.h"
+SVGADXShaderIfaceMobFormat;
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXTempSetContext {
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
index b22a67f..f4375a4 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2007-2015 VMware, Inc.
+ * Copyright 2007-2019 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -40,11 +40,25 @@
 #include "includeCheck.h"
 
 #define SVGA3D_NUM_CLIPPLANES                   6
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+
+/*
+ * While there are separate bind-points for RenderTargetViews and
+ * UnorderedAccessViews in a DXContext, there is in fact one shared
+ * semantic space that the guest-driver can use on any given draw call.
+ * So there are really only 8 slots that can be spilt up between them, with the
+ * spliceIndex controlling where the UAV's sit in the collapsed array.
+ */
 #define SVGA3D_MAX_RENDER_TARGETS               8
 #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  (SVGA3D_MAX_RENDER_TARGETS)
 #define SVGA3D_MAX_UAVIEWS                      8
-#define SVGA3D_MAX_CONTEXT_IDS                  256
-#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+#define SVGA3D_DX11_1_MAX_UAVIEWS               64
+
+/*
+ * Maximum canonical size of a surface in host-backed mode (pre-GBObjects).
+ */
+#define SVGA3D_HB_MAX_SURFACE_SIZE MBYTES_2_BYTES(128)
 
 /*
  * Maximum ID a shader can be assigned on a given context.
@@ -59,6 +73,8 @@
 #define SVGA3D_NUM_TEXTURE_UNITS                32
 #define SVGA3D_NUM_LIGHTS                       8
 
+#define SVGA3D_MAX_VIDEOPROCESSOR_SAMPLERS      32
+
 /*
  * Maximum size in dwords of shader text the SVGA device will allow.
  * Currently 8 MB.
@@ -67,6 +83,11 @@
 #define SVGA3D_MAX_SHADER_MEMORY  (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
                                    sizeof(uint32))
 
+/*
+ * The maximum value of threadGroupCount in each dimension
+ */
+#define SVGA3D_MAX_SHADER_THREAD_GROUPS 65535
+
 #define SVGA3D_MAX_CLIP_PLANES    6
 
 /*
@@ -85,7 +106,9 @@
 /*
  * Maximum number of array indexes in a GB surface (with DX enabled).
  */
-#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+#define SVGA3D_SM4_MAX_SURFACE_ARRAYSIZE 512
+#define SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE 2048
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE SVGA3D_SM5_MAX_SURFACE_ARRAYSIZE
 
 /*
  * The maximum number of vertex arrays we're guaranteed to support in
@@ -99,4 +122,9 @@
  */
 #define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
 
+/*
+ * The maximum number of samples that can be contained in a surface.
+ */
+#define SVGA3D_MAX_SAMPLES 8
+
 #endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 61414f1..4db25bd 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -131,6 +131,8 @@ enum svga3d_block_desc {
 	SVGA3DBLOCKDESC_BC3         = 1 << 26,
 	SVGA3DBLOCKDESC_BC4         = 1 << 27,
 	SVGA3DBLOCKDESC_BC5         = 1 << 28,
+	SVGA3DBLOCKDESC_BC6H        = 1 << 29,
+	SVGA3DBLOCKDESC_BC7         = 1 << 30,
 
 	SVGA3DBLOCKDESC_A_UINT    = SVGA3DBLOCKDESC_ALPHA |
 				    SVGA3DBLOCKDESC_UINT |
@@ -290,6 +292,18 @@ enum svga3d_block_desc {
 					 SVGA3DBLOCKDESC_COMP_UNORM,
 	SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
 					 SVGA3DBLOCKDESC_COMP_SNORM,
+	SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS = SVGA3DBLOCKDESC_BC6H |
+					     SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC6H_COMP_UF16 = SVGA3DBLOCKDESC_BC6H |
+					 SVGA3DBLOCKDESC_COMPRESSED,
+	SVGA3DBLOCKDESC_BC6H_COMP_SF16 = SVGA3DBLOCKDESC_BC6H |
+					 SVGA3DBLOCKDESC_COMPRESSED,
+	SVGA3DBLOCKDESC_BC7_COMP_TYPELESS = SVGA3DBLOCKDESC_BC7 |
+					    SVGA3DBLOCKDESC_COMP_TYPELESS,
+	SVGA3DBLOCKDESC_BC7_COMP_UNORM = SVGA3DBLOCKDESC_BC7 |
+					 SVGA3DBLOCKDESC_COMP_UNORM,
+	SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC7_COMP_UNORM |
+					      SVGA3DBLOCKDESC_SRGB,
 
 	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_YUV_VIDEO |
 				     SVGA3DBLOCKDESC_PLANAR_YUV |
@@ -494,7 +508,7 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
       {{8}, {8}, {8}, {0}},
       {{16}, {8}, {0}, {0}}},
 
-   {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
+   {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_NONE,
       {1, 1, 1},  3, 3,
       {{8}, {8}, {8}, {0}},
       {{16}, {8}, {0}, {0}}},
@@ -604,7 +618,7 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
       {{0}, {0}, {48}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+   {SVGA3D_FORMAT_DEAD2, SVGA3DBLOCKDESC_NONE,
       {1, 1, 1},  4, 4,
       {{8}, {8}, {8}, {8}},
       {{0}, {8}, {16}, {24}}},
@@ -1103,6 +1117,46 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
       {4, 4, 1},  16, 16,
       {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_B4G4R4A4_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
+      {1, 1, 1},  2, 2,
+      {{4}, {4}, {4}, {4}},
+      {{0}, {4}, {8}, {12}}},
+
+   {SVGA3D_BC6H_TYPELESS, SVGA3DBLOCKDESC_BC6H_COMP_TYPELESS,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC6H_UF16, SVGA3DBLOCKDESC_BC6H_COMP_UF16,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC6H_SF16, SVGA3DBLOCKDESC_BC6H_COMP_SF16,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC7_TYPELESS, SVGA3DBLOCKDESC_BC7_COMP_TYPELESS,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC7_UNORM, SVGA3DBLOCKDESC_BC7_COMP_UNORM,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC7_UNORM_SRGB, SVGA3DBLOCKDESC_BC7_COMP_UNORM_SRGB,
+      {4, 4, 1},  16, 16,
+      {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+      {1, 1, 1},  4, 4,
+      {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
 };
 
 static inline u32 clamped_umul32(u32 a, u32 b)
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
index 3083706..77e338a 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -116,6 +116,19 @@ SVGA3dBox;
 typedef
 #include "vmware_pack_begin.h"
 struct {
+   int32                x;
+   int32                y;
+   int32                z;
+   int32                w;
+   int32                h;
+   int32                d;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignedBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
    uint32               x;
    uint32               y;
    uint32               z;
@@ -198,8 +211,7 @@ typedef enum SVGA3dSurfaceFormat {
    /* Planar video formats */
    SVGA3D_NV12                         = 44,
 
-   /* Video format with alpha */
-   SVGA3D_AYUV                         = 45,
+   SVGA3D_FORMAT_DEAD2                 = 45,
 
    SVGA3D_R32G32B32A32_TYPELESS        = 46,
    SVGA3D_R32G32B32A32_UINT            = 47,
@@ -305,6 +317,18 @@ typedef enum SVGA3dSurfaceFormat {
    SVGA3D_B8G8R8X8_UNORM               = 142,
    SVGA3D_BC4_UNORM                    = 143,
    SVGA3D_BC5_UNORM                    = 144,
+   SVGA3D_B4G4R4A4_UNORM               = 145,
+   
+   /* DX11 compressed formats */
+   SVGA3D_BC6H_TYPELESS                = 146,
+   SVGA3D_BC6H_UF16                    = 147,
+   SVGA3D_BC6H_SF16                    = 148,
+   SVGA3D_BC7_TYPELESS                 = 149,
+   SVGA3D_BC7_UNORM                    = 150,
+   SVGA3D_BC7_UNORM_SRGB               = 151,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 152,
 
    SVGA3D_FORMAT_MAX
 } SVGA3dSurfaceFormat;
@@ -326,10 +350,10 @@ typedef enum SVGA3dSurfaceFormat {
 #define SVGA3D_SURFACE_HINT_RENDERTARGET      (CONST64U(1) << 6)
 #define SVGA3D_SURFACE_HINT_DEPTHSTENCIL      (CONST64U(1) << 7)
 #define SVGA3D_SURFACE_HINT_WRITEONLY         (CONST64U(1) << 8)
-#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS     (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_DEAD2                  (CONST64U(1) << 9)
 #define SVGA3D_SURFACE_AUTOGENMIPMAPS         (CONST64U(1) << 10)
 
-#define SVGA3D_SURFACE_DECODE_RENDERTARGET    (CONST64U(1) << 11)
+#define SVGA3D_SURFACE_DEAD1                  (CONST64U(1) << 11)
 
 /*
  * Is this surface using a base-level pitch for it's mob backing?
@@ -387,7 +411,7 @@ typedef enum SVGA3dSurfaceFormat {
  * Setting this flag allow this surface to be used with the
  * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
  * buffer surfaces, and no bind flags are allowed to be set on surfaces
- * with this flag.
+ * with this flag except SVGA3D_SURFACE_TRANSFER_TO_BUFFER.
  */
 #define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   (CONST64U(1) << 30)
 
@@ -402,7 +426,31 @@ typedef enum SVGA3dSurfaceFormat {
  */
 #define SVGA3D_SURFACE_MULTISAMPLE            (CONST64U(1) << 32)
 
-#define SVGA3D_SURFACE_FLAG_MAX               (CONST64U(1) << 33)
+/*
+ * Specified that the surface is allowed to be bound to a UAView.
+ */
+#define SVGA3D_SURFACE_BIND_UAVIEW            (CONST64U(1) << 33)
+
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_TO_BUFFER command.  It is only valid for
+ * buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag except SVGA3D_SURFACE_TRANSFER_FROM_BUFFER.
+ */
+#define SVGA3D_SURFACE_TRANSFER_TO_BUFFER     (CONST64U(1) << 34)
+
+#define SVGA3D_SURFACE_BIND_LOGICOPS          (CONST64U(1) << 35)
+
+/*
+ * Optional flags for use with SVGA3D_SURFACE_BIND_UAVIEW
+ */
+#define SVGA3D_SURFACE_BIND_RAW_VIEWS         (CONST64U(1) << 36)
+#define SVGA3D_SURFACE_BUFFER_STRUCTURED      (CONST64U(1) << 37)
+
+#define SVGA3D_SURFACE_DRAWINDIRECT_ARGS      (CONST64U(1) << 38)
+#define SVGA3D_SURFACE_RESOURCE_CLAMP         (CONST64U(1) << 39)
+
+#define SVGA3D_SURFACE_FLAG_MAX               (CONST64U(1) << 40)
 
 /*
  * Surface flags types:
@@ -428,17 +476,25 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
            SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
            SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
-           SVGA3D_SURFACE_MULTISAMPLE            \
+           SVGA3D_SURFACE_RESERVED1 |             \
+           SVGA3D_SURFACE_MULTISAMPLE |          \
+           SVGA3D_SURFACE_BIND_UAVIEW |          \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |   \
+           SVGA3D_SURFACE_BIND_LOGICOPS |        \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |       \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |    \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS |    \
+           SVGA3D_SURFACE_RESOURCE_CLAMP         \
         )
 
 #define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK   \
        (   SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_RESERVED1 |                \
            SVGA3D_SURFACE_MULTISAMPLE               \
         )
 
 #define SVGA3D_SURFACE_2D_DISALLOWED_MASK           \
         (  SVGA3D_SURFACE_CUBEMAP |                 \
-           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
            SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
            SVGA3D_SURFACE_VOLUME |                  \
            SVGA3D_SURFACE_1D |                      \
@@ -448,7 +504,14 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
            SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
            SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
-           SVGA3D_SURFACE_MULTISAMPLE               \
+           SVGA3D_SURFACE_RESERVED1 |                \
+           SVGA3D_SURFACE_MULTISAMPLE |             \
+           SVGA3D_SURFACE_BIND_UAVIEW |             \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |      \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |          \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |       \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS |       \
+           SVGA3D_SURFACE_RESOURCE_CLAMP            \
         )
 
 #define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK     \
@@ -456,6 +519,7 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
            SVGA3D_SURFACE_VOLUME |                  \
            SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_RESERVED1 |                \
            SVGA3D_SURFACE_MULTISAMPLE               \
         )
 
@@ -474,7 +538,14 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
            SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
            SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
-           SVGA3D_SURFACE_MULTISAMPLE               \
+           SVGA3D_SURFACE_RESERVED1 |                \
+           SVGA3D_SURFACE_MULTISAMPLE |             \
+           SVGA3D_SURFACE_BIND_UAVIEW |             \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |      \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |          \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |       \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS |       \
+           SVGA3D_SURFACE_RESOURCE_CLAMP            \
         )
 
 #define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK       \
@@ -482,10 +553,11 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
            SVGA3D_SURFACE_VOLUME |                  \
            SVGA3D_SURFACE_1D |                      \
-           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_DEAD2 |                   \
            SVGA3D_SURFACE_ARRAY |                   \
            SVGA3D_SURFACE_MULTISAMPLE |             \
-           SVGA3D_SURFACE_MOB_PITCH                 \
+           SVGA3D_SURFACE_MOB_PITCH |               \
+           SVGA3D_SURFACE_RESOURCE_CLAMP            \
         )
 
 #define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK  \
@@ -494,14 +566,23 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_VOLUME |                  \
            SVGA3D_SURFACE_1D |                      \
            SVGA3D_SURFACE_SCREENTARGET |            \
-           SVGA3D_SURFACE_MOB_PITCH                 \
+           SVGA3D_SURFACE_MOB_PITCH |               \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
+           SVGA3D_SURFACE_RESERVED1 |                \
+           SVGA3D_SURFACE_BIND_UAVIEW |             \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |      \
+           SVGA3D_SURFACE_BIND_LOGICOPS |           \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |          \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |       \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS         \
         )
 
-#define SVGA3D_SURFACE_DX_ONLY_MASK             \
-        (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |  \
-           SVGA3D_SURFACE_STAGING_UPLOAD |      \
-           SVGA3D_SURFACE_STAGING_DOWNLOAD |    \
-           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  \
+#define SVGA3D_SURFACE_DX_ONLY_MASK              \
+        (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |   \
+           SVGA3D_SURFACE_STAGING_UPLOAD |       \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER     \
         )
 
 #define SVGA3D_SURFACE_STAGING_MASK             \
@@ -516,9 +597,135 @@ typedef uint64 SVGA3dSurfaceAllFlags;
            SVGA3D_SURFACE_BIND_SHADER_RESOURCE |  \
            SVGA3D_SURFACE_BIND_RENDER_TARGET   |  \
            SVGA3D_SURFACE_BIND_DEPTH_STENCIL   |  \
-           SVGA3D_SURFACE_BIND_STREAM_OUTPUT      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT   |  \
+           SVGA3D_SURFACE_BIND_UAVIEW          |  \
+           SVGA3D_SURFACE_BIND_LOGICOPS        |  \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS          \
         )
 
+#define SVGA3D_SURFACE_VADECODE_DISALLOWED_MASK     \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_HINT_STATIC |             \
+           SVGA3D_SURFACE_HINT_DYNAMIC |            \
+           SVGA3D_SURFACE_HINT_INDEXBUFFER |        \
+           SVGA3D_SURFACE_HINT_VERTEXBUFFER |       \
+           SVGA3D_SURFACE_HINT_TEXTURE |            \
+           SVGA3D_SURFACE_HINT_RENDERTARGET |       \
+           SVGA3D_SURFACE_HINT_DEPTHSTENCIL |       \
+           SVGA3D_SURFACE_HINT_WRITEONLY |          \
+           SVGA3D_SURFACE_DEAD2 |                   \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_HINT_RT_LOCKABLE |        \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_SCREENTARGET |            \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_RENDER_TARGET |      \
+           SVGA3D_SURFACE_BIND_SHADER_RESOURCE |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_INACTIVE |                \
+           SVGA3D_SURFACE_STAGING_UPLOAD |          \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  |   \
+           SVGA3D_SURFACE_MULTISAMPLE |             \
+           SVGA3D_SURFACE_BIND_UAVIEW |             \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |      \
+           SVGA3D_SURFACE_BIND_LOGICOPS |           \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |          \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |       \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS |       \
+           SVGA3D_SURFACE_RESOURCE_CLAMP            \
+        )
+
+#define SVGA3D_SURFACE_VAPROCESSFRAME_OUTPUT_DISALLOWED_MASK     \
+        (  SVGA3D_SURFACE_HINT_INDEXBUFFER |                     \
+           SVGA3D_SURFACE_HINT_VERTEXBUFFER |                    \
+           SVGA3D_SURFACE_HINT_DEPTHSTENCIL |                    \
+           SVGA3D_SURFACE_DEAD2 |                                \
+           SVGA3D_SURFACE_VOLUME |                               \
+           SVGA3D_SURFACE_1D |                                   \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |                   \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |                    \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |                 \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |                   \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |                   \
+           SVGA3D_SURFACE_INACTIVE |                             \
+           SVGA3D_SURFACE_STAGING_UPLOAD |                       \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |                     \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |                 \
+           SVGA3D_SURFACE_VADECODE |                             \
+           SVGA3D_SURFACE_MULTISAMPLE |                          \
+           SVGA3D_SURFACE_BIND_UAVIEW |                          \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |                   \
+           SVGA3D_SURFACE_BIND_LOGICOPS |                        \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |                       \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |                    \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS |                    \
+           SVGA3D_SURFACE_RESOURCE_CLAMP         \
+        )
+
+#define SVGA3D_SURFACE_VAPROCESSFRAME_INPUT_DISALLOWED_MASK     \
+        ( SVGA3D_SURFACE_CUBEMAP |                              \
+          SVGA3D_SURFACE_HINT_INDEXBUFFER |                     \
+          SVGA3D_SURFACE_HINT_VERTEXBUFFER |                    \
+          SVGA3D_SURFACE_HINT_DEPTHSTENCIL |                    \
+          SVGA3D_SURFACE_DEAD2 |                                \
+          SVGA3D_SURFACE_VOLUME |                               \
+          SVGA3D_SURFACE_SCREENTARGET |                         \
+          SVGA3D_SURFACE_1D |                                   \
+          SVGA3D_SURFACE_BIND_VERTEX_BUFFER |                   \
+          SVGA3D_SURFACE_BIND_INDEX_BUFFER |                    \
+          SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |                 \
+          SVGA3D_SURFACE_BIND_DEPTH_STENCIL |                   \
+          SVGA3D_SURFACE_BIND_STREAM_OUTPUT |                   \
+          SVGA3D_SURFACE_STAGING_UPLOAD |                       \
+          SVGA3D_SURFACE_STAGING_DOWNLOAD |                     \
+          SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |                 \
+          SVGA3D_SURFACE_MULTISAMPLE |                          \
+          SVGA3D_SURFACE_BIND_UAVIEW |                          \
+          SVGA3D_SURFACE_TRANSFER_TO_BUFFER |                   \
+          SVGA3D_SURFACE_BIND_LOGICOPS |                        \
+          SVGA3D_SURFACE_BIND_RAW_VIEWS |                       \
+          SVGA3D_SURFACE_BUFFER_STRUCTURED |                    \
+          SVGA3D_SURFACE_DRAWINDIRECT_ARGS |                    \
+          SVGA3D_SURFACE_RESOURCE_CLAMP                         \
+        )
+
+#define SVGA3D_SURFACE_LOGICOPS_DISALLOWED_MASK     \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_DEAD2 |                   \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
+           SVGA3D_SURFACE_VADECODE |                \
+           SVGA3D_SURFACE_MULTISAMPLE |             \
+           SVGA3D_SURFACE_BIND_UAVIEW |             \
+           SVGA3D_SURFACE_TRANSFER_TO_BUFFER |      \
+           SVGA3D_SURFACE_BIND_RAW_VIEWS |          \
+           SVGA3D_SURFACE_BUFFER_STRUCTURED |       \
+           SVGA3D_SURFACE_DRAWINDIRECT_ARGS |       \
+           SVGA3D_SURFACE_RESOURCE_CLAMP            \
+        )
+
+#define SVGA3D_BUFFER_STRUCTURED_STRIDE_MAX 2048
+
+
+/*
+ * These are really the D3DFORMAT_OP defines from the wdk. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
 typedef enum {
    SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
    SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
@@ -1338,7 +1545,40 @@ typedef enum {
    SVGA3D_PRIMITIVE_LINESTRIP_ADJ               = 8,
    SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ            = 9,
    SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ           = 10,
-   SVGA3D_PRIMITIVE_MAX
+   SVGA3D_PRIMITIVE_DX10_MAX                    = 11,
+   SVGA3D_PRIMITIVE_1_CONTROL_POINT_PATCH       = 11,
+   SVGA3D_PRIMITIVE_2_CONTROL_POINT_PATCH       = 12,
+   SVGA3D_PRIMITIVE_3_CONTROL_POINT_PATCH       = 13,
+   SVGA3D_PRIMITIVE_4_CONTROL_POINT_PATCH       = 14,
+   SVGA3D_PRIMITIVE_5_CONTROL_POINT_PATCH       = 15,
+   SVGA3D_PRIMITIVE_6_CONTROL_POINT_PATCH       = 16,
+   SVGA3D_PRIMITIVE_7_CONTROL_POINT_PATCH       = 17,
+   SVGA3D_PRIMITIVE_8_CONTROL_POINT_PATCH       = 18,
+   SVGA3D_PRIMITIVE_9_CONTROL_POINT_PATCH       = 19,
+   SVGA3D_PRIMITIVE_10_CONTROL_POINT_PATCH      = 20,
+   SVGA3D_PRIMITIVE_11_CONTROL_POINT_PATCH      = 21,
+   SVGA3D_PRIMITIVE_12_CONTROL_POINT_PATCH      = 22,
+   SVGA3D_PRIMITIVE_13_CONTROL_POINT_PATCH      = 23,
+   SVGA3D_PRIMITIVE_14_CONTROL_POINT_PATCH      = 24,
+   SVGA3D_PRIMITIVE_15_CONTROL_POINT_PATCH      = 25,
+   SVGA3D_PRIMITIVE_16_CONTROL_POINT_PATCH      = 26,
+   SVGA3D_PRIMITIVE_17_CONTROL_POINT_PATCH      = 27,
+   SVGA3D_PRIMITIVE_18_CONTROL_POINT_PATCH      = 28,
+   SVGA3D_PRIMITIVE_19_CONTROL_POINT_PATCH      = 29,
+   SVGA3D_PRIMITIVE_20_CONTROL_POINT_PATCH      = 30,
+   SVGA3D_PRIMITIVE_21_CONTROL_POINT_PATCH      = 31,
+   SVGA3D_PRIMITIVE_22_CONTROL_POINT_PATCH      = 32,
+   SVGA3D_PRIMITIVE_23_CONTROL_POINT_PATCH      = 33,
+   SVGA3D_PRIMITIVE_24_CONTROL_POINT_PATCH      = 34,
+   SVGA3D_PRIMITIVE_25_CONTROL_POINT_PATCH      = 35,
+   SVGA3D_PRIMITIVE_26_CONTROL_POINT_PATCH      = 36,
+   SVGA3D_PRIMITIVE_27_CONTROL_POINT_PATCH      = 37,
+   SVGA3D_PRIMITIVE_28_CONTROL_POINT_PATCH      = 38,
+   SVGA3D_PRIMITIVE_29_CONTROL_POINT_PATCH      = 39,
+   SVGA3D_PRIMITIVE_30_CONTROL_POINT_PATCH      = 40,
+   SVGA3D_PRIMITIVE_31_CONTROL_POINT_PATCH      = 41,
+   SVGA3D_PRIMITIVE_32_CONTROL_POINT_PATCH      = 42,
+   SVGA3D_PRIMITIVE_MAX                         = 43
 } SVGA3dPrimitiveType;
 
 typedef enum {
@@ -1442,16 +1682,15 @@ typedef enum {
    SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS           = 5,
    SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE     = 6,
    SVGA3D_QUERYTYPE_OCCLUSION64                 = 7,
-   SVGA3D_QUERYTYPE_EVENT                       = 8,
-   SVGA3D_QUERYTYPE_DX10_MAX                    = 9,
-   SVGA3D_QUERYTYPE_SOSTATS_STREAM0             = 9,
-   SVGA3D_QUERYTYPE_SOSTATS_STREAM1             = 10,
-   SVGA3D_QUERYTYPE_SOSTATS_STREAM2             = 11,
-   SVGA3D_QUERYTYPE_SOSTATS_STREAM3             = 12,
-   SVGA3D_QUERYTYPE_SOP_STREAM0                 = 13,
-   SVGA3D_QUERYTYPE_SOP_STREAM1                 = 14,
-   SVGA3D_QUERYTYPE_SOP_STREAM2                 = 15,
-   SVGA3D_QUERYTYPE_SOP_STREAM3                 = 16,
+   SVGA3D_QUERYTYPE_DX10_MAX                    = 8,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM0             = 8,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM1             = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM2             = 10,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM3             = 11,
+   SVGA3D_QUERYTYPE_SOP_STREAM0                 = 12,
+   SVGA3D_QUERYTYPE_SOP_STREAM1                 = 13,
+   SVGA3D_QUERYTYPE_SOP_STREAM2                 = 14,
+   SVGA3D_QUERYTYPE_SOP_STREAM3                 = 15,
    SVGA3D_QUERYTYPE_MAX
 } SVGA3dQueryType;
 
@@ -1584,28 +1823,33 @@ typedef enum {
    SVGA3D_READ_HOST_VRAM         = 2,
 } SVGA3dTransferType;
 
-typedef enum {
-   SVGA3D_LOGICOP_INVALID   = 0,
-   SVGA3D_LOGICOP_MIN       = 1,
-   SVGA3D_LOGICOP_COPY      = 1,
-   SVGA3D_LOGICOP_NOT       = 2,
-   SVGA3D_LOGICOP_AND       = 3,
-   SVGA3D_LOGICOP_OR        = 4,
-   SVGA3D_LOGICOP_XOR       = 5,
-   SVGA3D_LOGICOP_NXOR      = 6,
-   SVGA3D_LOGICOP_ROP3MIN   = 30,   /* 7-29 are reserved for future logic ops. */
-   SVGA3D_LOGICOP_ROP3MAX   = (SVGA3D_LOGICOP_ROP3MIN + 255),
-   SVGA3D_LOGICOP_MAX       = (SVGA3D_LOGICOP_ROP3MAX + 1),
-} SVGA3dLogicOp;
+#define SVGA3D_LOGICOP_INVALID  0
+#define SVGA3D_LOGICOP_MIN      1
+#define SVGA3D_LOGICOP_COPY     1
+#define SVGA3D_LOGICOP_NOT      2
+#define SVGA3D_LOGICOP_AND      3
+#define SVGA3D_LOGICOP_OR       4
+#define SVGA3D_LOGICOP_XOR      5
+#define SVGA3D_LOGICOP_NXOR     6
+#define SVGA3D_LOGICOP_ROP3     7
+#define SVGA3D_LOGICOP_MAX      8
+
+typedef uint16 SVGA3dLogicOp;
+
+#define SVGA3D_LOGICOP_ROP3_INVALID ((uint16) -1)
+#define SVGA3D_LOGICOP_ROP3_MIN     0
+#define SVGA3D_LOGICOP_ROP3_MAX     256
+
+typedef uint16 SVGA3dLogicOpRop3;
 
 typedef
 #include "vmware_pack_begin.h"
 struct {
    union {
       struct {
-         uint16  function;       /* SVGA3dFogFunction */
-         uint8   type;           /* SVGA3dFogType */
-         uint8   base;           /* SVGA3dFogBase */
+         uint16  function;       // SVGA3dFogFunction
+         uint8   type;           // SVGA3dFogType
+         uint8   base;           // SVGA3dFogBase
       };
       uint32     uintValue;
    };
@@ -1742,4 +1986,15 @@ typedef enum SVGA3dMSQualityLevel {
    SVGA3D_MS_QUALITY_MAX  = 2,
 } SVGA3dMSQualityLevel;
 
+/*
+ * Screen Target Update Flags
+ */
+
+typedef enum SVGA3dFrameUpdateType {
+   SVGA3D_FRAME_END     = 0,
+   SVGA3D_FRAME_PARTIAL = 1,
+   SVGA3D_FRAME_UNKNOWN = 2,
+   SVGA3D_FRAME_MAX     = 3,
+} SVGA3dFrameUpdateType;
+
 #endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 056f54b..19fb9e3 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -70,8 +70,7 @@ typedef uint32 SVGAMobId;
 
 /*
  * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
- * cursor bypass mode. This is still supported, but no new guest
- * drivers should use it.
+ * cursor bypass mode.
  */
 #define SVGA_CURSOR_ON_HIDE            0x0
 #define SVGA_CURSOR_ON_SHOW            0x1
@@ -137,6 +136,17 @@ typedef uint32 SVGAMobId;
 #define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
 
 /*
+ * The byte-size is the size of the actual cursor data,
+ * possibly after expanding it to the current bit depth.
+ *
+ * 40K is sufficient memory for two 32-bit planes for a 64 x 64 cursor.
+ *
+ * The dimension limit is a bound on the maximum width or height.
+ */
+#define SVGA_MAX_CURSOR_CMD_BYTES  (40 * 1024)
+#define SVGA_MAX_CURSOR_CMD_DIMENSION 1024
+
+/*
  * Registers
  */
 
@@ -169,7 +179,7 @@ enum {
    SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
    SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
    SVGA_REG_GUEST_ID = 23,            /* (Deprecated) */
-   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
+   SVGA_REG_DEAD = 24,                /* Drivers should never write this. */
    SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
    SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
    SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
@@ -208,7 +218,13 @@ enum {
    SVGA_REG_MAX_PRIMARY_MEM = 50,
    SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
 
-   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
+   /*
+    * Legacy version of SVGA_REG_GBOBJECT_MEM_SIZE_KB for drivers that
+    * don't know how to convert to a 64-bit byte value without overflowing.
+    * (See SVGA_REG_GBOBJECT_MEM_SIZE_KB).
+    */
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51,
+
    SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
    SVGA_REG_CMD_PREPEND_LOW = 53,
    SVGA_REG_CMD_PREPEND_HIGH = 54,
@@ -218,7 +234,59 @@ enum {
    SVGA_REG_BLANK_SCREEN_TARGETS = 58,
    SVGA_REG_CAP2 = 59,
    SVGA_REG_DEVEL_CAP = 60,
-   SVGA_REG_TOP = 61,               /* Must be 1 more than the last register */
+
+   /*
+    * Allow the guest to hint to the device which driver is running.
+    *
+    * This should not generally change device behavior, but might be
+    * convenient to work-around specific bugs in guest drivers.
+    *
+    * Drivers should first write their id value into SVGA_REG_GUEST_DRIVER_ID,
+    * and then fill out all of the version registers that they have defined.
+    *
+    * After the driver has written all of the registers, they should
+    * then write the value SVGA_REG_GUEST_DRIVER_ID_SUBMIT to the
+    * SVGA_REG_GUEST_DRIVER_ID register, to signal that they have finished.
+    *
+    * The SVGA_REG_GUEST_DRIVER_ID values are defined below by the
+    * SVGARegGuestDriverId enum.
+    *
+    * The SVGA_REG_GUEST_DRIVER_VERSION fields are driver-specific,
+    * but ideally should encode a monotonically increasing number that allows
+    * the device to perform inequality checks against ranges of driver versions.
+    */
+   SVGA_REG_GUEST_DRIVER_ID = 61,
+   SVGA_REG_GUEST_DRIVER_VERSION1 = 62,
+   SVGA_REG_GUEST_DRIVER_VERSION2 = 63,
+   SVGA_REG_GUEST_DRIVER_VERSION3 = 64,
+   SVGA_REG_CURSOR_MOBID = 65,
+   SVGA_REG_CURSOR_MAX_BYTE_SIZE = 66,
+   SVGA_REG_CURSOR_MAX_DIMENSION = 67,
+
+   SVGA_REG_FIFO_CAPS = 68,
+   SVGA_REG_FENCE = 69,
+
+   SVGA_REG_RESERVED1 = 70,
+   SVGA_REG_RESERVED2 = 71,
+   SVGA_REG_RESERVED3 = 72,
+   SVGA_REG_RESERVED4 = 73,
+   SVGA_REG_RESERVED5 = 74,
+   SVGA_REG_SCREENDMA = 75,
+
+   /*
+    * The maximum amount of guest-backed objects that the device can have
+    * resident at a time. Guest-drivers should keep their working set size
+    * below this limit for best performance.
+    *
+    * Note that this value is in kilobytes, and not bytes, because the actual
+    * number of bytes might be larger than can fit in a 32-bit register.
+    *
+    * PLEASE USE A 64-BIT VALUE WHEN CONVERTING THIS INTO BYTES.
+    * (See SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB).
+    */
+   SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,
+
+   SVGA_REG_TOP = 77,               /* Must be 1 more than the last register */
 
    SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
    /* Next 768 (== 256*3) registers exist for colormap */
@@ -229,6 +297,20 @@ enum {
       the use of the current SVGA driver. */
 };
 
+
+/*
+ * Values for SVGA_REG_GUEST_DRIVER_ID.
+ */
+typedef enum SVGARegGuestDriverId {
+   SVGA_REG_GUEST_DRIVER_ID_UNKNOWN = 0,
+   SVGA_REG_GUEST_DRIVER_ID_WDDM    = 1,
+   SVGA_REG_GUEST_DRIVER_ID_LINUX   = 2,
+   SVGA_REG_GUEST_DRIVER_ID_MAX,
+
+   SVGA_REG_GUEST_DRIVER_ID_SUBMIT  = MAX_UINT32,
+} SVGARegGuestDriverId;
+
+
 /*
  * Guest memory regions (GMRs):
  *
@@ -416,7 +498,6 @@ typedef enum {
    SVGA_CB_CONTEXT_0      = 0x0,
    SVGA_CB_CONTEXT_1      = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
    SVGA_CB_CONTEXT_MAX    = 0x2,
-   SVGA_CB_CONTEXT_HP_MAX = 0x2,
 } SVGACBContext;
 
 
@@ -733,9 +814,6 @@ SVGASignedPoint;
  * and must not be reused. Those capabilities will never be reported
  * by new versions of the SVGA device.
  *
- * XXX: Add longer descriptions for each capability, including a list
- *      of the new features that each capability provides.
- *
  * SVGA_CAP_IRQMASK --
  *    Provides device interrupts.  Adds device register SVGA_REG_IRQMASK
  *    to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
@@ -842,17 +920,51 @@ SVGASignedPoint;
  *      Allow the IntraSurfaceCopy command.
  *
  * SVGA_CAP2_DX2 --
- *      Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ *      Allow the DefineGBSurface_v3, WholeSurfaceCopy, WriteZeroSurface, and
+ *      HintZeroSurface commands, and the SVGA_REG_GUEST_DRIVER_ID register.
+ *
+ * SVGA_CAP2_GB_MEMSIZE_2 --
+ *      Allow the SVGA_REG_GBOBJECT_MEM_SIZE_KB register.
+ *
+ * SVGA_CAP2_SCREENDMA_REG --
+ *      Allow the SVGA_REG_SCREENDMA register.
+ *
+ * SVGA_CAP2_OTABLE_PTDEPTH_2 --
+ *      Allow 2 level page tables for OTable commands.
+ *
+ * SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT --
+ *      Allow a stretch blt from a non-multisampled surface to a multisampled
+ *      surface.
+ *
+ * SVGA_CAP2_CURSOR_MOB --
+ *      Allow the SVGA_REG_CURSOR_MOBID register.
+ *
+ * SVGA_CAP2_MSHINT --
+ *      Allow the SVGA_REG_MSHINT register.
+ *
+ * SVGA_CAP2_DX3 --
+ *      Allows the DefineGBSurface_v4 command.
+ *      Allows the DXDefineDepthStencilView_v2, DXDefineStreamOutputWithMob,
+ *      and DXBindStreamOutput commands if 3D is also available.
+ *      Allows the DXPredStagingCopy and DXStagingCopy commands if SM41
+ *      is also available.
  *
  * SVGA_CAP2_RESERVED --
  *      Reserve the last bit for extending the SVGA capabilities to some
  *      future mechanisms.
  */
-#define SVGA_CAP2_NONE               0x00000000
-#define SVGA_CAP2_GROW_OTABLE        0x00000001
-#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
-#define SVGA_CAP2_DX2                0x00000004
-#define SVGA_CAP2_RESERVED           0x80000000
+#define SVGA_CAP2_NONE                    0x00000000
+#define SVGA_CAP2_GROW_OTABLE             0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY      0x00000002
+#define SVGA_CAP2_DX2                     0x00000004
+#define SVGA_CAP2_GB_MEMSIZE_2            0x00000008
+#define SVGA_CAP2_SCREENDMA_REG           0x00000010
+#define SVGA_CAP2_OTABLE_PTDEPTH_2        0x00000020
+#define SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT 0x00000040
+#define SVGA_CAP2_CURSOR_MOB              0x00000080
+#define SVGA_CAP2_MSHINT                  0x00000100
+#define SVGA_CAP2_DX3                     0x00000400
+#define SVGA_CAP2_RESERVED                0x80000000
 
 
 /*
@@ -875,7 +987,9 @@ typedef enum {
    SVGABackdoorCapFifoCaps = 1,
    SVGABackdoorCap3dHWVersion = 2,
    SVGABackdoorCapDeviceCaps2 = 3,
-   SVGABackdoorCapMax = 4,
+   SVGABackdoorCapDevelCaps = 4,
+   SVGABackdoorDevelRenderer = 5,
+   SVGABackdoorCapMax = 6,
 } SVGABackdoorCapType;
 
 
@@ -1055,103 +1169,80 @@ enum {
 /*
  * FIFO Synchronization Registers
  *
- *  This explains the relationship between the various FIFO
- *  sync-related registers in IOSpace and in FIFO space.
- *
  *  SVGA_REG_SYNC --
  *
- *       The SYNC register can be used in two different ways by the guest:
+ *       The SYNC register can be used by the guest driver to signal to the
+ *       device that the guest driver is waiting for previously submitted
+ *       commands to complete.
  *
- *         1. If the guest wishes to fully sync (drain) the FIFO,
- *            it will write once to SYNC then poll on the BUSY
- *            register. The FIFO is sync'ed once BUSY is zero.
+ *       When the guest driver writes to the SYNC register, the device sets
+ *       the BUSY register to TRUE, and starts processing the submitted commands
+ *       (if it was not already doing so).  When all previously submitted
+ *       commands are finished and the device is idle again, it sets the BUSY
+ *       register back to FALSE.  (If the guest driver submits new commands
+ *       after writing the SYNC register, the new commands are not guaranteed
+ *       to have been procesesd.)
  *
- *         2. If the guest wants to asynchronously wake up the host,
- *            it will write once to SYNC without polling on BUSY.
- *            Ideally it will do this after some new commands have
- *            been placed in the FIFO, and after reading a zero
- *            from SVGA_FIFO_BUSY.
+ *       When guest drivers are submitting commands using the FIFO, the device
+ *       periodically polls to check for new FIFO commands when idle, which may
+ *       introduce a delay in command processing.  If the guest-driver wants
+ *       the commands to be processed quickly (which it typically does), it
+ *       should write SYNC after each batch of commands is committed to the
+ *       FIFO to immediately wake up the device.  For even better performance,
+ *       the guest can use the SVGA_FIFO_BUSY register to avoid these extra
+ *       SYNC writes if the device is already active, using the technique known
+ *       as "Ringing the Doorbell" (described below).  (Note that command
+ *       buffer submission implicitly wakes up the device, and so doesn't
+ *       suffer from this problem.)
  *
- *       (1) is the original behaviour that SYNC was designed to
- *       support.  Originally, a write to SYNC would implicitly
- *       trigger a read from BUSY. This causes us to synchronously
- *       process the FIFO.
- *
- *       This behaviour has since been changed so that writing SYNC
- *       will *not* implicitly cause a read from BUSY. Instead, it
- *       makes a channel call which asynchronously wakes up the MKS
- *       thread.
- *
- *       New guests can use this new behaviour to implement (2)
- *       efficiently. This lets guests get the host's attention
- *       without waiting for the MKS to poll, which gives us much
- *       better CPU utilization on SMP hosts and on UP hosts while
- *       we're blocked on the host GPU.
- *
- *       Old guests shouldn't notice the behaviour change. SYNC was
- *       never guaranteed to process the entire FIFO, since it was
- *       bounded to a particular number of CPU cycles. Old guests will
- *       still loop on the BUSY register until the FIFO is empty.
- *
- *       Writing to SYNC currently has the following side-effects:
- *
- *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
- *         - Asynchronously wakes up the MKS thread for FIFO processing
- *         - The value written to SYNC is recorded as a "reason", for
- *           stats purposes.
- *
- *       If SVGA_FIFO_BUSY is available, drivers are advised to only
- *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
- *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
- *       eventually set SVGA_FIFO_BUSY on its own, but this approach
- *       lets the driver avoid sending multiple asynchronous wakeup
- *       messages to the MKS thread.
+ *       The SYNC register can also be used in combination with BUSY to
+ *       synchronously ensure that all SVGA commands are processed (with both
+ *       the FIFO and command-buffers).  To do this, the guest driver should
+ *       write to SYNC, and then loop reading BUSY until BUSY returns FALSE.
+ *       This technique is known as a "Legacy Sync".
  *
  *  SVGA_REG_BUSY --
  *
  *       This register is set to TRUE when SVGA_REG_SYNC is written,
- *       and it reads as FALSE when the FIFO has been completely
- *       drained.
+ *       and is set back to FALSE when the device has finished processing
+ *       all commands and is idle again.
  *
- *       Every read from this register causes us to synchronously
- *       process FIFO commands. There is no guarantee as to how many
- *       commands each read will process.
+ *       Every read from the BUSY reigster will block for an undefined
+ *       amount of time (normally until the device finishes some interesting
+ *       work unit), or the device is idle.
  *
- *       CPU time spent processing FIFO commands will be billed to
- *       the guest.
- *
- *       New drivers should avoid using this register unless they
- *       need to guarantee that the FIFO is completely drained. It
- *       is overkill for performing a sync-to-fence. Older drivers
- *       will use this register for any type of synchronization.
+ *       Guest drivers can also do a partial Legacy Sync to check for some
+ *       particular condition, for instance by stopping early when a fence
+ *       passes before BUSY has been set back to FALSE.  This is particularly
+ *       useful if the guest-driver knows that it is blocked waiting on the
+ *       device, because it will yield CPU time back to the host.
  *
  *  SVGA_FIFO_BUSY --
  *
- *       This register is a fast way for the guest driver to check
- *       whether the FIFO is already being processed. It reads and
- *       writes at normal RAM speeds, with no monitor intervention.
+ *       The SVGA_FIFO_BUSY register is a fast way for the guest driver to check
+ *       whether the device is actively processing FIFO commands before writing
+ *       the more expensive SYNC register.
  *
- *       If this register reads as TRUE, the host is guaranteeing that
- *       any new commands written into the FIFO will be noticed before
- *       the MKS goes back to sleep.
+ *       If this register reads as TRUE, the device is actively processing
+ *       FIFO commands.
  *
- *       If this register reads as FALSE, no such guarantee can be
- *       made.
+ *       If this register reads as FALSE, the device may not be actively
+ *       processing commands, and the guest driver should try
+ *       "Ringing the Doorbell".
  *
- *       The guest should use this register to quickly determine
- *       whether or not it needs to wake up the host. If the guest
- *       just wrote a command or group of commands that it would like
- *       the host to begin processing, it should:
+ *       To Ring the Doorbell, the guest should:
  *
- *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
- *            action is necessary.
+ *       1. Have already written their batch of commands into the FIFO.
+ *       2. Check if the SVGA_FIFO_BUSY register is available by reading
+ *          SVGA_FIFO_MIN.
+ *       3. Read SVGA_FIFO_BUSY.  If it reads as TRUE, the device is actively
+ *          processing FIFO commands, and no further action is necessary.
+ *       4. If SVGA_FIFO_BUSY was FALSE, write TRUE to SVGA_REG_SYNC.
  *
- *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
- *            code that we've already sent a SYNC to the host and we
- *            don't need to send a duplicate.
- *
- *         3. Write a reason to SVGA_REG_SYNC. This will send an
- *            asynchronous wakeup to the MKS thread.
+ *       For maximum performance, this procedure should be followed after
+ *       every meaningful batch of commands has been written into the FIFO.
+ *       (Normally when the underlying application signals it's finished a
+ *       meaningful work unit by calling Flush.)
  */
 
 
@@ -1164,9 +1255,6 @@ enum {
  *      Video -- SVGA Video overlay units are supported
  *      Escape -- Escape command is supported
  *
- * XXX: Add longer descriptions for each capability, including a list
- *      of the new features that each capability provides.
- *
  * SVGA_FIFO_CAP_SCREEN_OBJECT --
  *
  *    Provides dynamic multi-screen rendering, for improved Unity and
@@ -1279,6 +1367,15 @@ enum {
 
 
 /*
+ * ScreenDMA Register Values
+ */
+
+#define SVGA_SCREENDMA_REG_UNDEFINED    0
+#define SVGA_SCREENDMA_REG_NOT_PRESENT  1
+#define SVGA_SCREENDMA_REG_PRESENT      2
+#define SVGA_SCREENDMA_REG_MAX          3
+
+/*
  * Video overlay support
  */
 
@@ -1665,6 +1762,80 @@ SVGAFifoCmdDefineAlphaCursor;
 
 
 /*
+ *    Provide a new large cursor image, as an AND/XOR mask.
+ *
+ *    Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   uint32 andMaskDepth;
+   uint32 xorMaskDepth;
+   /*
+    * Followed by scanline data for AND mask, then XOR mask.
+    * Each scanline is padded to a 32-bit boundary.
+   */
+}
+#include "vmware_pack_end.h"
+SVGAGBColorCursorHeader;
+
+
+/*
+ *    Provide a new large cursor image, in 32-bit BGRA format.
+ *
+ *    Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   /* Followed by scanline data */
+}
+#include "vmware_pack_end.h"
+SVGAGBAlphaCursorHeader;
+
+ /*
+  * Define the SVGA guest backed cursor types
+  */
+
+typedef enum {
+   SVGA_COLOR_CURSOR       = 0,
+   SVGA_ALPHA_CURSOR       = 1,
+} SVGAGBCursorType;
+
+/*
+ *    Provide a new large cursor image.
+ *
+ *    Should only be used for CursorMob functionality
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGBCursorType type;
+   union {
+      SVGAGBColorCursorHeader colorHeader;
+      SVGAGBAlphaCursorHeader alphaHeader;
+   } header;
+   uint32 sizeInBytes;
+   /*
+    * Followed by the cursor data
+    */
+}
+#include "vmware_pack_end.h"
+SVGAGBCursorHeader;
+
+
+/*
  * SVGA_CMD_UPDATE_VERBOSE --
  *
  *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
@@ -2061,9 +2232,12 @@ SVGAFifoCmdRemapGMR2;
 #define SVGA_VRAM_MAX_SIZE         (128 * 1024 * 1024)
 #define SVGA_MEMORY_SIZE_MAX      (1024 * 1024 * 1024)
 #define SVGA_FIFO_SIZE_MAX           (2 * 1024 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_MIN       (32 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_MAX       (2 * 1024 * 1024)
-#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT   (256 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN     (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_2GB (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_3GB (3 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_4GB (4 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX_8GB (8 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT (256 * 1024)
 
 #define SVGA_VRAM_SIZE_W2K          (64 * 1024 * 1024) /* 64 MB */
 
@@ -2086,4 +2260,6 @@ SVGAFifoCmdRemapGMR2;
 #define SVGA_FIFO_SIZE_GBOBJECTS          (256 * 1024)
 #define SVGA_VRAM_SIZE_GBOBJECTS     (4 * 1024 * 1024)
 
+#define SVGA_PCI_REGS_PAGES                        (1)
+
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
index 350bbc6..beddcce 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -37,6 +37,7 @@ typedef s8  int8;
 
 typedef uint64 PA;
 typedef uint32 PPN;
+typedef uint32 PPN32;
 typedef uint64 PPN64;
 
 typedef bool Bool;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 66e14e3..f415507 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -57,9 +57,11 @@
 
 #define VMW_BINDING_RT_BIT     0
 #define VMW_BINDING_PS_BIT     1
-#define VMW_BINDING_SO_BIT     2
+#define VMW_BINDING_SO_T_BIT   2
 #define VMW_BINDING_VB_BIT     3
-#define VMW_BINDING_NUM_BITS   4
+#define VMW_BINDING_UAV_BIT    4
+#define VMW_BINDING_CS_UAV_BIT 5
+#define VMW_BINDING_NUM_BITS   6
 
 #define VMW_BINDING_PS_SR_BIT  0
 
@@ -75,6 +77,8 @@
  * @vertex_buffers: Vertex buffer bindings.
  * @index_buffer: Index buffer binding.
  * @per_shader: Per shader-type bindings.
+ * @ua_views: UAV bindings.
+ * @so_state: StreamOutput bindings.
  * @dirty: Bitmap tracking per binding-type changes that have not yet
  * been emitted to the device.
  * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
@@ -95,10 +99,12 @@ struct vmw_ctx_binding_state {
 	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
 	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
 	struct vmw_ctx_bindinfo_view ds_view;
-	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+	struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
 	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
 	struct vmw_ctx_bindinfo_ib index_buffer;
-	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
+	struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
+	struct vmw_ctx_bindinfo_so so_state;
 
 	unsigned long dirty;
 	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
@@ -115,12 +121,16 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
 				       bool rebind);
 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+
 static void vmw_binding_build_asserts(void) __attribute__ ((unused));
 
 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
@@ -151,6 +161,9 @@ static const size_t vmw_binding_shader_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
 	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
 	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
 };
 static const size_t vmw_binding_rt_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, render_targets),
@@ -162,6 +175,9 @@ static const size_t vmw_binding_cb_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
 	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
 	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
 };
 static const size_t vmw_binding_dx_ds_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, ds_view),
@@ -170,8 +186,11 @@ static const size_t vmw_binding_sr_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
 	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
 	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
 };
-static const size_t vmw_binding_so_offsets[] = {
+static const size_t vmw_binding_so_target_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, so_targets),
 };
 static const size_t vmw_binding_vb_offsets[] = {
@@ -180,6 +199,15 @@ static const size_t vmw_binding_vb_offsets[] = {
 static const size_t vmw_binding_ib_offsets[] = {
 	offsetof(struct vmw_ctx_binding_state, index_buffer),
 };
+static const size_t vmw_binding_uav_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
+};
+static const size_t vmw_binding_cs_uav_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
+};
+static const size_t vmw_binding_so_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, so_state),
+};
 
 static const struct vmw_binding_info vmw_binding_infos[] = {
 	[vmw_ctx_binding_shader] = {
@@ -214,10 +242,10 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
 		.size = sizeof(struct vmw_ctx_bindinfo_view),
 		.offsets = vmw_binding_dx_ds_offsets,
 		.scrub_func = vmw_binding_scrub_dx_rt},
-	[vmw_ctx_binding_so] = {
-		.size = sizeof(struct vmw_ctx_bindinfo_so),
-		.offsets = vmw_binding_so_offsets,
-		.scrub_func = vmw_binding_scrub_so},
+	[vmw_ctx_binding_so_target] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_so_target),
+		.offsets = vmw_binding_so_target_offsets,
+		.scrub_func = vmw_binding_scrub_so_target},
 	[vmw_ctx_binding_vb] = {
 		.size = sizeof(struct vmw_ctx_bindinfo_vb),
 		.offsets = vmw_binding_vb_offsets,
@@ -226,6 +254,18 @@ static const struct vmw_binding_info vmw_binding_infos[] = {
 		.size = sizeof(struct vmw_ctx_bindinfo_ib),
 		.offsets = vmw_binding_ib_offsets,
 		.scrub_func = vmw_binding_scrub_ib},
+	[vmw_ctx_binding_uav] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_uav_offsets,
+		.scrub_func = vmw_binding_scrub_uav},
+	[vmw_ctx_binding_cs_uav] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_cs_uav_offsets,
+		.scrub_func = vmw_binding_scrub_cs_uav},
+	[vmw_ctx_binding_so] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_so),
+		.offsets = vmw_binding_so_offsets,
+		.scrub_func = vmw_binding_scrub_so},
 };
 
 /**
@@ -312,6 +352,18 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
 }
 
 /**
+ * vmw_binding_add_uav_index - Add UAV index for tracking.
+ * @cbs: Pointer to the context binding state tracker.
+ * @slot: UAV type to which bind this index.
+ * @index: The splice index to track.
+ */
+void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
+			       uint32 index)
+{
+	cbs->ua_views[slot].index = index;
+}
+
+/**
  * vmw_binding_transfer: Transfer a context binding tracking entry.
  *
  * @cbs: Pointer to the persistent context binding state tracker.
@@ -450,6 +502,10 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
 		vmw_binding_transfer(to, from, entry);
 		vmw_binding_drop(entry);
 	}
+
+	/* Also transfer uav splice indices */
+	to->ua_views[0].index = from->ua_views[0].index;
+	to->ua_views[1].index = from->ua_views[1].index;
 }
 
 /**
@@ -828,8 +884,8 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
 				   const struct vmw_ctx_bindinfo *bi,
 				   u32 max_num)
 {
-	const struct vmw_ctx_bindinfo_so *biso =
-		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+	const struct vmw_ctx_bindinfo_so_target *biso =
+		container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
 	unsigned long i;
 	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
 
@@ -854,11 +910,11 @@ static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
 }
 
 /**
- * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ * vmw_emit_set_so_target - Issue delayed streamout binding commands
  *
  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  */
-static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
 {
 	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
 	struct {
@@ -1005,6 +1061,66 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
 	return 0;
 }
 
+static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetUAViews body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+	/* Splice index is specified user-space   */
+	cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+}
+
+static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCSUAViews body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+	/* Start index is specified user-space */
+	cmd->body.startIndex = cbs->ua_views[1].index;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+}
+
 /**
  * vmw_binding_emit_dirty - Issue delayed binding commands
  *
@@ -1030,12 +1146,18 @@ static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
 		case VMW_BINDING_PS_BIT:
 			ret = vmw_binding_emit_dirty_ps(cbs);
 			break;
-		case VMW_BINDING_SO_BIT:
-			ret = vmw_emit_set_so(cbs);
+		case VMW_BINDING_SO_T_BIT:
+			ret = vmw_emit_set_so_target(cbs);
 			break;
 		case VMW_BINDING_VB_BIT:
 			ret = vmw_emit_set_vb(cbs);
 			break;
+		case VMW_BINDING_UAV_BIT:
+			ret = vmw_emit_set_uav(cbs);
+			break;
+		case VMW_BINDING_CS_UAV_BIT:
+			ret = vmw_emit_set_cs_uav(cbs);
+			break;
 		default:
 			BUG();
 		}
@@ -1089,18 +1211,18 @@ static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
 }
 
 /**
- * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
  * scrub from a context
  *
  * @bi: single binding information.
  * @rebind: Whether to issue a bind instead of scrub command.
  */
-static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
 {
 	struct vmw_ctx_binding_state *cbs =
 		vmw_context_binding_state(bi->ctx);
 
-	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+	__set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
 
 	return 0;
 }
@@ -1162,6 +1284,49 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
 	return 0;
 }
 
+static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
+	return 0;
+}
+
+static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
+ * @bi: Single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_so *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetStreamOutput body;
+	} *cmd;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
 /**
  * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
  * memory accounting.
@@ -1248,8 +1413,8 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
  * Each time a resource is put on the validation list as the result of a
  * context binding referencing it, we need to determine whether that resource
  * will be dirtied (written to by the GPU) as a result of the corresponding
- * GPU operation. Currently rendertarget-, depth-stencil-, and
- * stream-output-target bindings are capable of dirtying its resource.
+ * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
+ * and unordered access view bindings are capable of dirtying its resource.
  *
  * Return: Whether the binding type dirties the resource its binding points to.
  */
@@ -1259,11 +1424,13 @@ u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
 		[vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
 		[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
 		[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
-		[vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+		[vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
+		[vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
+		[vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
 	};
 
 	/* Review this function as new bindings are added. */
-	BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+	BUILD_BUG_ON(vmw_ctx_binding_max != 14);
 	return is_binding_dirtying[binding_type];
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index cd9805c..dcb71fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -33,6 +33,8 @@
 
 #define VMW_MAX_VIEW_BINDINGS 128
 
+#define VMW_MAX_UAV_BIND_TYPE 2
+
 struct vmw_private;
 struct vmw_ctx_binding_state;
 
@@ -48,9 +50,12 @@ enum vmw_ctx_binding_type {
 	vmw_ctx_binding_dx_rt,
 	vmw_ctx_binding_sr,
 	vmw_ctx_binding_ds,
-	vmw_ctx_binding_so,
+	vmw_ctx_binding_so_target,
 	vmw_ctx_binding_vb,
 	vmw_ctx_binding_ib,
+	vmw_ctx_binding_uav,
+	vmw_ctx_binding_cs_uav,
+	vmw_ctx_binding_so,
 	vmw_ctx_binding_max
 };
 
@@ -128,14 +133,14 @@ struct vmw_ctx_bindinfo_view {
 };
 
 /**
- * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ * struct vmw_ctx_bindinfo_so_target - StreamOutput binding metadata
  *
  * @bi: struct vmw_ctx_bindinfo we derive from.
  * @offset: Device data used to reconstruct binding command.
  * @size: Device data used to reconstruct binding command.
  * @slot: Device data used to reconstruct binding command.
  */
-struct vmw_ctx_bindinfo_so {
+struct vmw_ctx_bindinfo_so_target {
 	struct vmw_ctx_bindinfo bi;
 	uint32 offset;
 	uint32 size;
@@ -189,9 +194,31 @@ struct vmw_dx_shader_bindings {
 	unsigned long dirty;
 };
 
+/**
+ * struct vmw_ctx_bindinfo_uav - UAV context binding state.
+ * @views: UAV view bindings.
+ * @splice_index: The device splice index set by user-space.
+ */
+struct vmw_ctx_bindinfo_uav {
+	struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS];
+	uint32 index;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - Stream output binding metadata.
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+	struct vmw_ctx_bindinfo bi;
+	uint32 slot;
+};
+
 extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
 			    const struct vmw_ctx_bindinfo *ci,
 			    u32 shader_slot, u32 slot);
+extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs,
+				      uint32 slot, uint32 splice_index);
 extern void
 vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
 			 struct vmw_ctx_binding_state *from);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 065015d..3b41cf6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1241,7 +1241,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
 		 * actually call into the already enabled manager, when
 		 * binding the MOB.
 		 */
-		if (!(dev_priv->capabilities & SVGA_CAP_DX))
+		if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
+		    !dev_priv->has_mob)
 			return -ENOMEM;
 
 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index a56c9d8..61c24633 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -36,7 +36,7 @@ struct vmw_user_context {
 	struct vmw_resource res;
 	struct vmw_ctx_binding_state *cbs;
 	struct vmw_cmdbuf_res_manager *man;
-	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+	struct vmw_resource *cotables[SVGA_COTABLE_MAX];
 	spinlock_t cotable_lock;
 	struct vmw_buffer_object *dx_query_mob;
 };
@@ -116,12 +116,15 @@ static const struct vmw_res_func vmw_dx_context_func = {
  * Context management:
  */
 
-static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
+				       struct vmw_user_context *uctx)
 {
 	struct vmw_resource *res;
 	int i;
+	u32 cotable_max = has_sm5_context(dev_priv) ?
+		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 
-	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+	for (i = 0; i < cotable_max; ++i) {
 		spin_lock(&uctx->cotable_lock);
 		res = uctx->cotables[i];
 		uctx->cotables[i] = NULL;
@@ -155,7 +158,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 		    !dev_priv->query_cid_valid)
 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
-		vmw_context_cotables_unref(uctx);
+		vmw_context_cotables_unref(dev_priv, uctx);
 		return;
 	}
 
@@ -208,7 +211,9 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
 	spin_lock_init(&uctx->cotable_lock);
 
 	if (dx) {
-		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		u32 cotable_max = has_sm5_context(dev_priv) ?
+			SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
+		for (i = 0; i < cotable_max; ++i) {
 			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
 							      &uctx->res, i);
 			if (IS_ERR(uctx->cotables[i])) {
@@ -222,7 +227,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
 	return 0;
 
 out_cotables:
-	vmw_context_cotables_unref(uctx);
+	vmw_context_cotables_unref(dev_priv, uctx);
 out_err:
 	if (res_free)
 		res_free(res);
@@ -545,10 +550,12 @@ void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
 {
 	struct vmw_user_context *uctx =
 		container_of(ctx, struct vmw_user_context, res);
+	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 	int i;
 
 	vmw_binding_state_scrub(uctx->cbs);
-	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+	for (i = 0; i < cotable_max; ++i) {
 		struct vmw_resource *res;
 
 		/* Avoid racing with ongoing cotable destruction. */
@@ -731,7 +738,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
 	};
 	int ret;
 
-	if (!dev_priv->has_dx && dx) {
+	if (!has_sm4_context(dev_priv) && dx) {
 		VMW_DEBUG_USER("DX contexts not supported by device.\n");
 		return -EINVAL;
 	}
@@ -839,7 +846,10 @@ struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
 					 SVGACOTableType cotable_type)
 {
-	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
+
+	if (cotable_type >= cotable_max)
 		return ERR_PTR(-EINVAL);
 
 	return container_of(ctx, struct vmw_user_context, res)->
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 3ca5cf3..65e8e7a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -80,9 +80,10 @@ static const struct vmw_cotable_info co_info[] = {
 	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
 	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
 	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
-	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+	{1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
 	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
-	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
+	{1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
 };
 
 /*
@@ -102,6 +103,7 @@ const SVGACOTableType vmw_cotable_scrub_order[] = {
 	SVGA_COTABLE_SAMPLER,
 	SVGA_COTABLE_STREAMOUTPUT,
 	SVGA_COTABLE_DXQUERY,
+	SVGA_COTABLE_UAVIEW,
 };
 
 static int vmw_cotable_bind(struct vmw_resource *res,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 827458f..71e45b56 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,6 +29,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/mem_encrypt.h>
 
 #include <drm/drm_drv.h>
 #include <drm/drm_ioctl.h>
@@ -289,6 +290,8 @@ static void vmw_print_capabilities2(uint32_t capabilities2)
 		DRM_INFO("  Grow oTable.\n");
 	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
 		DRM_INFO("  IntraSurface copy.\n");
+	if (capabilities2 & SVGA_CAP2_DX3)
+		DRM_INFO("  DX3.\n");
 }
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -448,7 +451,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
 	if (IS_ERR(dev_priv->cman)) {
 		dev_priv->cman = NULL;
-		dev_priv->has_dx = false;
+		dev_priv->sm_type = VMW_SM_LEGACY;
 	}
 
 	ret = vmw_request_device_late(dev_priv);
@@ -575,6 +578,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 		[vmw_dma_map_populate] = "Caching DMA mappings.",
 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 
+	/* TTM currently doesn't fully support SEV encryption. */
+	if (mem_encrypt_active())
+		return -EINVAL;
+
 	if (vmw_force_coherent)
 		dev_priv->map_mode = vmw_dma_alloc_coherent;
 	else if (vmw_restrict_iommu)
@@ -682,8 +689,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
 	ret = vmw_dma_select_mode(dev_priv);
 	if (unlikely(ret != 0)) {
-		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+		DRM_INFO("Restricting capabilities since DMA not available.\n");
 		refuse_dma = true;
+		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+			DRM_INFO("Disabling 3D acceleration.\n");
 	}
 
 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
@@ -711,9 +720,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	dev_priv->max_mob_pages = 0;
 	dev_priv->max_mob_size = 0;
 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
-		uint64_t mem_size =
-			vmw_read(dev_priv,
-				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
+		uint64_t mem_size;
+
+		if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
+			mem_size = vmw_read(dev_priv,
+					    SVGA_REG_GBOBJECT_MEM_SIZE_KB);
+		else
+			mem_size =
+				vmw_read(dev_priv,
+					 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 
 		/*
 		 * Workaround for low memory 2D VMs to compensate for the
@@ -866,7 +881,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		dev_priv->has_gmr = false;
 	}
 
-	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
 		dev_priv->has_mob = true;
 		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
 				   VMW_PL_MOB) != 0) {
@@ -876,14 +891,32 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		}
 	}
 
-	if (dev_priv->has_mob) {
+	if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
 		spin_lock(&dev_priv->cap_lock);
 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
-		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+			dev_priv->sm_type = VMW_SM_4;
 		spin_unlock(&dev_priv->cap_lock);
 	}
 
 	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
+
+	/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
+	if (has_sm4_context(dev_priv) &&
+	    (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
+
+		if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+			dev_priv->sm_type = VMW_SM_4_1;
+
+		if (has_sm4_1_context(dev_priv) &&
+		    (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
+			vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
+			if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
+				dev_priv->sm_type = VMW_SM_5;
+		}
+	}
+
 	ret = vmw_kms_init(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_kms;
@@ -893,23 +926,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (ret)
 		goto out_no_fifo;
 
-	if (dev_priv->has_dx) {
-		/*
-		 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
-		 * support
-		 */
-		if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
-			vmw_write(dev_priv, SVGA_REG_DEV_CAP,
-					SVGA3D_DEVCAP_SM41);
-			dev_priv->has_sm4_1 = vmw_read(dev_priv,
-							SVGA_REG_DEV_CAP);
-		}
-	}
-
-	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
 	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
 		 ? "yes." : "no.");
-	DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
+	if (dev_priv->sm_type == VMW_SM_5)
+		DRM_INFO("SM5 support available.\n");
+	if (dev_priv->sm_type == VMW_SM_4_1)
+		DRM_INFO("SM4_1 support available.\n");
+	if (dev_priv->sm_type == VMW_SM_4)
+		DRM_INFO("SM4 support available.\n");
 
 	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
 		VMWGFX_REPO, VMWGFX_GIT_VERSION);
@@ -1399,9 +1423,6 @@ static const struct file_operations vmwgfx_driver_fops = {
 static struct drm_driver driver = {
 	.driver_features =
 	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
-	.get_vblank_counter = vmw_get_vblank_counter,
-	.enable_vblank = vmw_enable_vblank,
-	.disable_vblank = vmw_disable_vblank,
 	.ioctls = vmw_ioctls,
 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
 	.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 86b6939..5ddbcb9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -58,7 +58,7 @@
 #define VMWGFX_DRIVER_NAME "vmwgfx"
 #define VMWGFX_DRIVER_DATE "20200114"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 17
+#define VMWGFX_DRIVER_MINOR 18
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
@@ -202,6 +202,7 @@ enum vmw_res_type {
 	vmw_res_dx_context,
 	vmw_res_cotable,
 	vmw_res_view,
+	vmw_res_streamoutput,
 	vmw_res_max
 };
 
@@ -210,7 +211,8 @@ enum vmw_res_type {
  */
 enum vmw_cmdbuf_res_type {
 	vmw_cmdbuf_res_shader,
-	vmw_cmdbuf_res_view
+	vmw_cmdbuf_res_view,
+	vmw_cmdbuf_res_streamoutput
 };
 
 struct vmw_cmdbuf_res_manager;
@@ -223,24 +225,58 @@ struct vmw_cursor_snooper {
 struct vmw_framebuffer;
 struct vmw_surface_offset;
 
-struct vmw_surface {
-	struct vmw_resource res;
-	SVGA3dSurfaceAllFlags flags;
-	uint32_t format;
-	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+/**
+ * struct vmw_surface_metadata - Metadata describing a surface.
+ *
+ * @flags: Device flags.
+ * @format: Surface SVGA3D_x format.
+ * @mip_levels: Mip level for each face. For GB first index is used only.
+ * @multisample_count: Sample count.
+ * @multisample_pattern: Sample patterns.
+ * @quality_level: Quality level.
+ * @autogen_filter: Filter for automatically generated mipmaps.
+ * @array_size: Number of array elements for a 1D/2D texture. For cubemap
+                texture number of faces * array_size. This should be 0 for pre
+		SM4 device.
+ * @buffer_byte_stride: Buffer byte stride.
+ * @num_sizes: Size of @sizes. For GB surface this should always be 1.
+ * @base_size: Surface dimension.
+ * @sizes: Array representing mip sizes. Legacy only.
+ * @scanout: Whether this surface will be used for scanout.
+ *
+ * This tracks metadata for both legacy and guest backed surface.
+ */
+struct vmw_surface_metadata {
+	u64 flags;
+	u32 format;
+	u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+	u32 multisample_count;
+	u32 multisample_pattern;
+	u32 quality_level;
+	u32 autogen_filter;
+	u32 array_size;
+	u32 num_sizes;
+	u32 buffer_byte_stride;
 	struct drm_vmw_size base_size;
 	struct drm_vmw_size *sizes;
-	uint32_t num_sizes;
 	bool scanout;
-	uint32_t array_size;
-	/* TODO so far just a extra pointer */
+};
+
+/**
+ * struct vmw_surface: Resource structure for a surface.
+ *
+ * @res: The base resource for this surface.
+ * @metadata: Metadata for this surface resource.
+ * @snooper: Cursor data. Legacy surface only.
+ * @offsets: Legacy surface only.
+ * @view_list: List of views bound to this surface.
+ */
+struct vmw_surface {
+	struct vmw_resource res;
+	struct vmw_surface_metadata metadata;
 	struct vmw_cursor_snooper snooper;
 	struct vmw_surface_offset *offsets;
-	SVGA3dTextureFilter autogen_filter;
-	uint32_t multisample_count;
 	struct list_head view_list;
-	SVGA3dMSPattern multisample_pattern;
-	SVGA3dMSQualityLevel quality_level;
 };
 
 struct vmw_marker_queue {
@@ -441,6 +477,22 @@ enum {
 	VMW_IRQTHREAD_MAX
 };
 
+/**
+ * enum vmw_sm_type - Graphics context capability supported by device.
+ * @VMW_SM_LEGACY: Pre DX context.
+ * @VMW_SM_4: Context support upto SM4.
+ * @VMW_SM_4_1: Context support upto SM4_1.
+ * @VMW_SM_5: Context support up to SM5.
+ * @VMW_SM_MAX: Should be the last.
+ */
+enum vmw_sm_type {
+	VMW_SM_LEGACY = 0,
+	VMW_SM_4,
+	VMW_SM_4_1,
+	VMW_SM_5,
+	VMW_SM_MAX
+};
+
 struct vmw_private {
 	struct ttm_bo_device bdev;
 
@@ -475,22 +527,9 @@ struct vmw_private {
 	bool has_mob;
 	spinlock_t hw_lock;
 	spinlock_t cap_lock;
-	bool has_dx;
 	bool assume_16bpp;
-	bool has_sm4_1;
 
-	/*
-	 * VGA registers.
-	 */
-
-	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
-	uint32_t vga_width;
-	uint32_t vga_height;
-	uint32_t vga_bpp;
-	uint32_t vga_bpl;
-	uint32_t vga_pitchlock;
-
-	uint32_t num_displays;
+	enum vmw_sm_type sm_type;
 
 	/*
 	 * Framebuffer info.
@@ -661,6 +700,39 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
 	return val;
 }
 
+/**
+ * has_sm4_context - Does the device support SM4 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM4 context or not.
+ */
+static inline bool has_sm4_context(const struct vmw_private *dev_priv)
+{
+	return (dev_priv->sm_type >= VMW_SM_4);
+}
+
+/**
+ * has_sm4_1_context - Does the device support SM4_1 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM4_1 context or not.
+ */
+static inline bool has_sm4_1_context(const struct vmw_private *dev_priv)
+{
+	return (dev_priv->sm_type >= VMW_SM_4_1);
+}
+
+/**
+ * has_sm5_context - Does the device support SM5 context.
+ * @dev_priv: Device private.
+ *
+ * Return: Bool value if device support SM5 context or not.
+ */
+static inline bool has_sm5_context(const struct vmw_private *dev_priv)
+{
+	return (dev_priv->sm_type >= VMW_SM_5);
+}
+
 extern void vmw_svga_enable(struct vmw_private *dev_priv);
 extern void vmw_svga_disable(struct vmw_private *dev_priv);
 
@@ -900,7 +972,6 @@ extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
 extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
 			       uint32_t *seqno);
-extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
@@ -947,7 +1018,6 @@ extern struct ttm_placement vmw_mob_placement;
 extern struct ttm_placement vmw_mob_ne_placement;
 extern struct ttm_placement vmw_nonfixed_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
-extern int vmw_dma_quiescent(struct drm_device *dev);
 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
 extern const struct vmw_sg_table *
@@ -1085,8 +1155,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv);
 
 int vmw_kms_init(struct vmw_private *dev_priv);
 int vmw_kms_close(struct vmw_private *dev_priv);
-int vmw_kms_save_vga(struct vmw_private *vmw_priv);
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
@@ -1100,9 +1168,9 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
 				uint32_t pitch,
 				uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
+int vmw_enable_vblank(struct drm_crtc *crtc);
+void vmw_disable_vblank(struct drm_crtc *crtc);
 int vmw_kms_present(struct vmw_private *dev_priv,
 		    struct drm_file *file_priv,
 		    struct vmw_framebuffer *vfb,
@@ -1139,7 +1207,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv);
 int vmw_overlay_close(struct vmw_private *dev_priv);
 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv);
-int vmw_overlay_stop_all(struct vmw_private *dev_priv);
 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
@@ -1186,10 +1253,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
 
 extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern int vmw_context_check(struct vmw_private *dev_priv,
-			     struct ttm_object_file *tfile,
-			     int id,
-			     struct vmw_resource **p_res);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
 extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
@@ -1219,7 +1282,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
 
 extern const struct vmw_user_resource_conv *user_surface_converter;
 
-extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -1230,11 +1292,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 				       struct drm_file *file_priv);
 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
 					  struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
-			     struct ttm_object_file *tfile,
-			     uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
-				struct vmw_surface *srf);
 int vmw_surface_gb_priv_define(struct drm_device *dev,
 			       uint32_t user_accounting_size,
 			       SVGA3dSurfaceAllFlags svga3d_flags,
@@ -1254,6 +1311,11 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
 					      void *data,
 					      struct drm_file *file_priv);
 
+int vmw_gb_surface_define(struct vmw_private *dev_priv,
+			  uint32_t user_accounting_size,
+			  const struct vmw_surface_metadata *req,
+			  struct vmw_surface **srf_out);
+
 /*
  * Shader management - vmwgfx_shader.c
  */
@@ -1287,6 +1349,24 @@ vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
 		  u32 user_key, SVGA3dShaderType shader_type);
 
 /*
+ * Streamoutput management
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+			   u32 user_key);
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+			    struct vmw_resource *ctx,
+			    SVGA3dStreamOutputId user_key,
+			    struct list_head *list);
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size);
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+			       SVGA3dStreamOutputId user_key,
+			       struct list_head *list);
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+					    struct list_head *list,
+					    bool readback);
+
+/*
  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
  */
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 73489a4..367d5b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -459,10 +459,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 	int ret = 0;
 	struct vmw_resource *res;
 	u32 i;
+	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
+		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 
 	/* Add all cotables to the validation list. */
-	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
-		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+	if (has_sm4_context(dev_priv) &&
+	    vmw_res_type(ctx) == vmw_res_dx_context) {
+		for (i = 0; i < cotable_max; ++i) {
 			res = vmw_context_cotable(ctx, i);
 			if (IS_ERR(res))
 				continue;
@@ -489,7 +492,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 			break;
 	}
 
-	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+	if (has_sm4_context(dev_priv) &&
+	    vmw_res_type(ctx) == vmw_res_dx_context) {
 		struct vmw_buffer_object *dx_query_mob;
 
 		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
@@ -2116,6 +2120,9 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
 				      SVGA3dCmdHeader *header)
 {
 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
+	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
+		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
+
 	struct vmw_resource *res = NULL;
 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 	struct vmw_ctx_bindinfo_cb binding;
@@ -2139,7 +2146,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
 	binding.size = cmd->body.sizeInBytes;
 	binding.slot = cmd->body.slot;
 
-	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+	if (binding.shader_slot >= max_shader_num ||
 	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
 		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
 			       (unsigned int) cmd->body.type,
@@ -2167,12 +2174,15 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
 {
 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
 		container_of(header, typeof(*cmd), header);
+	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
+
 	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
 		sizeof(SVGA3dShaderResourceViewId);
 
 	if ((u64) cmd->body.startView + (u64) num_sr_view >
 	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
-	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+	    cmd->body.type >= max_allowed) {
 		VMW_DEBUG_USER("Invalid shader binding.\n");
 		return -EINVAL;
 	}
@@ -2196,6 +2206,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
 				 SVGA3dCmdHeader *header)
 {
 	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
+	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
+		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
 	struct vmw_resource *res = NULL;
 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 	struct vmw_ctx_bindinfo_shader binding;
@@ -2206,7 +2218,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
 
 	cmd = container_of(header, typeof(*cmd), header);
 
-	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+	if (cmd->body.type >= max_allowed ||
 	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
 		VMW_DEBUG_USER("Illegal shader type %u.\n",
 			       (unsigned int) cmd->body.type);
@@ -2467,7 +2479,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
 				     SVGA3dCmdHeader *header)
 {
 	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
-	struct vmw_ctx_bindinfo_so binding;
+	struct vmw_ctx_bindinfo_so_target binding;
 	struct vmw_resource *res;
 	struct {
 		SVGA3dCmdHeader header;
@@ -2497,7 +2509,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
 
 		binding.bi.ctx = ctx_node->ctx;
 		binding.bi.res = res;
-		binding.bi.bt = vmw_ctx_binding_so,
+		binding.bi.bt = vmw_ctx_binding_so_target,
 		binding.offset = cmd->targets[i].offset;
 		binding.size = cmd->targets[i].sizeInBytes;
 		binding.slot = i;
@@ -2804,6 +2816,352 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
 				 &cmd->body.surface.sid, NULL);
 }
 
+static int vmw_cmd_sm5(struct vmw_private *dev_priv,
+		       struct vmw_sw_context *sw_context,
+		       SVGA3dCmdHeader *header)
+{
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
+}
+
+static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
+}
+
+static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearUAViewUint body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	struct vmw_resource *ret;
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
+				  cmd->body.uaViewId);
+
+	return PTR_ERR_OR_ZERO(ret);
+}
+
+static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearUAViewFloat body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	struct vmw_resource *ret;
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
+				  cmd->body.uaViewId);
+
+	return PTR_ERR_OR_ZERO(ret);
+}
+
+static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
+			   struct vmw_sw_context *sw_context,
+			   SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetUAViews body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dUAViewId);
+	int ret;
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	if (num_uav > SVGA3D_MAX_UAVIEWS) {
+		VMW_DEBUG_USER("Invalid UAV binding.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
+				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
+				    num_uav, 0);
+	if (ret)
+		return ret;
+
+	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
+					 cmd->body.uavSpliceIndex);
+
+	return ret;
+}
+
+static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCSUAViews body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dUAViewId);
+	int ret;
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	if (num_uav > SVGA3D_MAX_UAVIEWS) {
+		VMW_DEBUG_USER("Invalid UAV binding.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
+				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
+				    num_uav, 0);
+	if (ret)
+		return ret;
+
+	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
+				  cmd->body.startIndex);
+
+	return ret;
+}
+
+static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
+					  struct vmw_sw_context *sw_context,
+					  SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineStreamOutputWithMob body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+	ret = vmw_cotable_notify(res, cmd->body.soid);
+	if (ret)
+		return ret;
+
+	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
+				       cmd->body.soid,
+				       &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
+					   struct vmw_sw_context *sw_context,
+					   SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyStreamOutput body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * When device does not support SM5 then streamoutput with mob command is
+	 * not available to user-space. Simply return in this case.
+	 */
+	if (!has_sm5_context(dev_priv))
+		return 0;
+
+	/*
+	 * With SM5 capable device if lookup fails then user-space probably used
+	 * old streamoutput define command. Return without an error.
+	 */
+	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+					 cmd->body.soid);
+	if (IS_ERR(res))
+		return 0;
+
+	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
+					  &sw_context->staged_cmd_res);
+}
+
+static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindStreamOutput body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+					 cmd->body.soid);
+	if (IS_ERR(res)) {
+		DRM_ERROR("Cound not find streamoutput to bind.\n");
+		return PTR_ERR(res);
+	}
+
+	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
+
+	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+					    VMW_RES_DIRTY_NONE);
+	if (ret) {
+		DRM_ERROR("Error creating resource validation node.\n");
+		return ret;
+	}
+
+	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+					 &cmd->body.mobid,
+					 cmd->body.offsetInBytes);
+}
+
+static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct vmw_ctx_bindinfo_so binding;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetStreamOutput body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	if (cmd->body.soid == SVGA3D_INVALID_ID)
+		return 0;
+
+	/*
+	 * When device does not support SM5 then streamoutput with mob command is
+	 * not available to user-space. Simply return in this case.
+	 */
+	if (!has_sm5_context(dev_priv))
+		return 0;
+
+	/*
+	 * With SM5 capable device if lookup fails then user-space probably used
+	 * old streamoutput define command. Return without an error.
+	 */
+	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
+					 cmd->body.soid);
+	if (IS_ERR(res)) {
+		return 0;
+	}
+
+	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+					    VMW_RES_DIRTY_NONE);
+	if (ret) {
+		DRM_ERROR("Error creating resource validation node.\n");
+		return ret;
+	}
+
+	binding.bi.ctx = ctx_node->ctx;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_so;
+	binding.slot = 0; /* Only one SO set to context at a time. */
+
+	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
+			binding.slot);
+
+	return ret;
+}
+
+static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct vmw_draw_indexed_instanced_indirect_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.argsBufferSid, NULL);
+}
+
+static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct vmw_draw_instanced_indirect_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDrawInstancedIndirect body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.argsBufferSid, NULL);
+}
+
+static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_dispatch_indirect_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDispatchIndirect body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	if (!has_sm5_context(dev_priv))
+		return -EINVAL;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 VMW_RES_DIRTY_NONE, user_surface_converter,
+				 &cmd->body.argsBufferSid, NULL);
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -2922,18 +3280,12 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 		    false, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
 		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
-		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
-		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
-		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
-		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
-		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
-		    false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
@@ -3141,9 +3493,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
 		    &vmw_cmd_dx_so_define, true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
-		    &vmw_cmd_dx_cid_check, true, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
-		    true, false, true),
+		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
+		    &vmw_cmd_dx_set_streamoutput, true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
 		    &vmw_cmd_dx_set_so_targets, true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
@@ -3159,6 +3511,37 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 		    true, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
 		    true, false, true),
+
+	/*
+	 * SM5 commands
+	 */
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
+		    &vmw_cmd_clear_uav_float, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
+		    false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
+		    true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
+		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
+		    &vmw_cmd_instanced_indirect, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
+		    &vmw_cmd_dispatch_indirect, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
+		    false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
+		    &vmw_cmd_sm5_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
+		    &vmw_cmd_dx_define_streamoutput, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
+		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
 };
 
 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e5252ef..6941689 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -169,10 +169,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
 	u32 *fifo_mem = dev_priv->mmio_virt;
 
-	preempt_disable();
 	if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
 		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
-	preempt_enable();
 }
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a15375e..f681b7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,10 +114,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 			(dev_priv->active_display_unit == vmw_du_screen_target);
 		break;
 	case DRM_VMW_PARAM_DX:
-		param->value = dev_priv->has_dx;
+		param->value = has_sm4_context(dev_priv);
 		break;
 	case DRM_VMW_PARAM_SM4_1:
-		param->value = dev_priv->has_sm4_1;
+		param->value = has_sm4_1_context(dev_priv);
+		break;
+	case DRM_VMW_PARAM_SM5:
+		param->value = has_sm5_context(dev_priv);
 		break;
 	default:
 		return -EINVAL;
@@ -126,14 +129,17 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
+static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
 {
 	/*
 	 * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
 	 * to check the sample count supported by virtual device. Since there
 	 * never was support for multisample count for backing MOB return 0.
+	 *
+	 * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
+	 * device.
 	 */
-	if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+	if (cap == SVGA3D_DEVCAP_DEAD5)
 		return 0;
 
 	return fmt_value;
@@ -164,7 +170,7 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
 	for (i = 0; i < max_size; ++i) {
 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
 		compat_cap->pairs[i][0] = i;
-		compat_cap->pairs[i][1] = vmw_mask_multisample
+		compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
 			(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
 	}
 	spin_unlock(&dev_priv->cap_lock);
@@ -220,7 +226,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 		spin_lock(&dev_priv->cap_lock);
 		for (i = 0; i < num; ++i) {
 			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
-			*bounce32++ = vmw_mask_multisample
+			*bounce32++ = vmw_mask_legacy_multisample
 				(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
 		}
 		spin_unlock(&dev_priv->cap_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f47d571..04d6659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -905,14 +905,14 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 	 */
 
 	/* Surface must be marked as a scanout. */
-	if (unlikely(!surface->scanout))
+	if (unlikely(!surface->metadata.scanout))
 		return -EINVAL;
 
-	if (unlikely(surface->mip_levels[0] != 1 ||
-		     surface->num_sizes != 1 ||
-		     surface->base_size.width < mode_cmd->width ||
-		     surface->base_size.height < mode_cmd->height ||
-		     surface->base_size.depth != 1)) {
+	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
+		     surface->metadata.num_sizes != 1 ||
+		     surface->metadata.base_size.width < mode_cmd->width ||
+		     surface->metadata.base_size.height < mode_cmd->height ||
+		     surface->metadata.base_size.depth != 1)) {
 		DRM_ERROR("Incompatible surface dimensions "
 			  "for requested mode.\n");
 		return -EINVAL;
@@ -941,7 +941,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 	 * For DX, surface format validation is done when surface->scanout
 	 * is set.
 	 */
-	if (!dev_priv->has_dx && format != surface->format) {
+	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
 		DRM_ERROR("Invalid surface format for requested mode.\n");
 		return -EINVAL;
 	}
@@ -1144,8 +1144,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
 			       struct vmw_buffer_object *bo_mob,
 			       struct vmw_surface **srf_out)
 {
+	struct vmw_surface_metadata metadata = {0};
 	uint32_t format;
-	struct drm_vmw_size content_base_size = {0};
 	struct vmw_resource *res;
 	unsigned int bytes_pp;
 	struct drm_format_name_buf format_name;
@@ -1175,22 +1175,15 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
 		return -EINVAL;
 	}
 
-	content_base_size.width  = mode_cmd->pitches[0] / bytes_pp;
-	content_base_size.height = mode_cmd->height;
-	content_base_size.depth  = 1;
+	metadata.format = format;
+	metadata.mip_levels[0] = 1;
+	metadata.num_sizes = 1;
+	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
+	metadata.base_size.height =  mode_cmd->height;
+	metadata.base_size.depth = 1;
+	metadata.scanout = true;
 
-	ret = vmw_surface_gb_priv_define(dev,
-					 0, /* kernel visible only */
-					 0, /* flags */
-					 format,
-					 true, /* can be a scanout buffer */
-					 1, /* num of mip levels */
-					 0,
-					 0,
-					 content_base_size,
-					 SVGA3D_MS_PATTERN_NONE,
-					 SVGA3D_MS_QUALITY_NONE,
-					 srf_out);
+	ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
 	if (ret) {
 		DRM_ERROR("Failed to allocate proxy content buffer\n");
 		return ret;
@@ -1897,87 +1890,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
 	return 0;
 }
 
-int vmw_kms_save_vga(struct vmw_private *vmw_priv)
-{
-	struct vmw_vga_topology_state *save;
-	uint32_t i;
-
-	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
-	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
-	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
-	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
-		vmw_priv->vga_pitchlock =
-		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
-	else if (vmw_fifo_have_pitchlock(vmw_priv))
-		vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
-							SVGA_FIFO_PITCHLOCK);
-
-	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
-		return 0;
-
-	vmw_priv->num_displays = vmw_read(vmw_priv,
-					  SVGA_REG_NUM_GUEST_DISPLAYS);
-
-	if (vmw_priv->num_displays == 0)
-		vmw_priv->num_displays = 1;
-
-	for (i = 0; i < vmw_priv->num_displays; ++i) {
-		save = &vmw_priv->vga_save[i];
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
-		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
-		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
-		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
-		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
-		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-		if (i == 0 && vmw_priv->num_displays == 1 &&
-		    save->width == 0 && save->height == 0) {
-
-			/*
-			 * It should be fairly safe to assume that these
-			 * values are uninitialized.
-			 */
-
-			save->width = vmw_priv->vga_width - save->pos_x;
-			save->height = vmw_priv->vga_height - save->pos_y;
-		}
-	}
-
-	return 0;
-}
-
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
-{
-	struct vmw_vga_topology_state *save;
-	uint32_t i;
-
-	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
-	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
-	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
-	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
-		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
-			  vmw_priv->vga_pitchlock);
-	else if (vmw_fifo_have_pitchlock(vmw_priv))
-		vmw_mmio_write(vmw_priv->vga_pitchlock,
-			       vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
-
-	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
-		return 0;
-
-	for (i = 0; i < vmw_priv->num_displays; ++i) {
-		save = &vmw_priv->vga_save[i];
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-	}
-
-	return 0;
-}
-
 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
 				uint32_t pitch,
 				uint32_t height)
@@ -1991,7 +1903,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
 /**
  * Function called by DRM code called with vbl_lock held.
  */
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
 {
 	return 0;
 }
@@ -1999,7 +1911,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 /**
  * Function called by DRM code called with vbl_lock held.
  */
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int vmw_enable_vblank(struct drm_crtc *crtc)
 {
 	return -EINVAL;
 }
@@ -2007,7 +1919,7 @@ int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
 /**
  * Function called by DRM code called with vbl_lock held.
  */
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void vmw_disable_vblank(struct drm_crtc *crtc)
 {
 }
 
@@ -2597,7 +2509,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
 			 int increment)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
-	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdUpdateGBImage body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5702219..16dafff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -236,6 +236,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
 	.set_config = drm_atomic_helper_set_config,
+	.get_vblank_counter = vmw_get_vblank_counter,
+	.enable_vblank = vmw_enable_vblank,
+	.disable_vblank = vmw_disable_vblank,
 };
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 0a6bbac..e8eb429 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,7 +320,7 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
 	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
 	int ret;
 
-	if (dev_priv->has_dx) {
+	if (has_sm4_context(dev_priv)) {
 		*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
 		if (!(*otables))
 			return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index fdb52f6..cd7ed16 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -354,37 +354,6 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
 }
 
 /**
- * Stop all streams.
- *
- * Used by the fb code when starting.
- *
- * Takes the overlay lock.
- */
-int vmw_overlay_stop_all(struct vmw_private *dev_priv)
-{
-	struct vmw_overlay *overlay = dev_priv->overlay_priv;
-	int i, ret;
-
-	if (!overlay)
-		return 0;
-
-	mutex_lock(&overlay->mutex);
-
-	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
-		struct vmw_stream *stream = &overlay->stream[i];
-		if (!stream->buf)
-			continue;
-
-		ret = vmw_overlay_stop(dev_priv, i, false, false);
-		WARN_ON(ret != 0);
-	}
-
-	mutex_unlock(&overlay->mutex);
-
-	return 0;
-}
-
-/**
  * Try to resume all paused streams.
  *
  * Used by the kms code after moving a new scanout buffer to vram.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f07aa85..60cfbfa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -69,7 +69,7 @@ struct vmw_bo_dirty {
 	unsigned int ref_count;
 	unsigned long bitmap_size;
 	size_t size;
-	unsigned long bitmap[0];
+	unsigned long bitmap[];
 };
 
 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e5a2832..32a22e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -319,6 +319,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
+	.get_vblank_counter = vmw_get_vblank_counter,
+	.enable_vblank = vmw_enable_vblank,
+	.disable_vblank = vmw_disable_vblank,
 };
 
 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 6380736..3f97b61 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -319,7 +319,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
 	static const size_t vmw_view_define_sizes[] = {
 		[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
 		[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
-		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView),
+		[vmw_view_ua] = sizeof(SVGA3dCmdDXDefineUAView)
 	};
 
 	struct vmw_private *dev_priv = ctx->dev_priv;
@@ -499,8 +500,8 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
  * Each time a resource is put on the validation list as the result of a
  * view pointing to it, we need to determine whether that resource will
  * be dirtied (written to by the GPU) as a result of the corresponding
- * GPU operation. Currently only rendertarget- and depth-stencil views are
- * capable of dirtying its resource.
+ * GPU operation. Currently only rendertarget-, depth-stencil and unordered
+ * access views are capable of dirtying its resource.
  *
  * Return: Whether the view type of @res dirties the resource it points to.
  */
@@ -509,10 +510,11 @@ u32 vmw_view_dirtying(struct vmw_resource *res)
 	static u32 view_is_dirtying[vmw_view_max] = {
 		[vmw_view_rt] = VMW_RES_DIRTY_SET,
 		[vmw_view_ds] = VMW_RES_DIRTY_SET,
+		[vmw_view_ua] = VMW_RES_DIRTY_SET,
 	};
 
 	/* Update this function as we add more view types */
-	BUILD_BUG_ON(vmw_view_max != 3);
+	BUILD_BUG_ON(vmw_view_max != 4);
 	return view_is_dirtying[vmw_view(res)->view_type];
 }
 
@@ -520,12 +522,14 @@ const u32 vmw_view_destroy_cmds[] = {
 	[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
 	[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
 	[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+	[vmw_view_ua] = SVGA_3D_CMD_DX_DESTROY_UA_VIEW,
 };
 
 const SVGACOTableType vmw_view_cotables[] = {
 	[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
 	[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
 	[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+	[vmw_view_ua] = SVGA_COTABLE_UAVIEW,
 };
 
 const SVGACOTableType vmw_so_cotables[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index 1256504..f48b84b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -30,6 +30,7 @@ enum vmw_view_type {
 	vmw_view_sr,
 	vmw_view_rt,
 	vmw_view_ds,
+	vmw_view_ua,
 	vmw_view_max,
 };
 
@@ -61,6 +62,7 @@ union vmw_view_destroy {
 	struct SVGA3dCmdDXDestroyRenderTargetView rtv;
 	struct SVGA3dCmdDXDestroyShaderResourceView srv;
 	struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+	struct SVGA3dCmdDXDestroyUAView uav;
 	u32 view_id;
 };
 
@@ -87,6 +89,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
 {
 	u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
 
+	if (id == SVGA_3D_CMD_DX_DEFINE_UA_VIEW ||
+	    id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW)
+		return vmw_view_ua;
+
 	if (tmp > (u32)vmw_view_max)
 		return vmw_view_max;
 
@@ -123,6 +129,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
 	case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
 		return vmw_so_ss;
 	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB:
 	case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
 		return vmw_so_so;
 	default:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41a96fb..9ffa9c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -590,7 +590,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
 		return;
 
 	/* Assume we are blitting from Guest (bo) to Host (display_srf) */
-	dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+	dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
 	dst_bo = &stdu->display_srf->res.backup->base;
 	dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
 
@@ -916,6 +916,9 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
 	.set_config = drm_atomic_helper_set_config,
 	.page_flip = drm_atomic_helper_page_flip,
+	.get_vblank_counter = vmw_get_vblank_counter,
+	.enable_vblank = vmw_enable_vblank,
+	.disable_vblank = vmw_disable_vblank,
 };
 
 
@@ -1038,7 +1041,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
 	enum stdu_content_type new_content_type;
 	struct vmw_framebuffer_surface *new_vfbs;
-	struct drm_crtc *crtc = new_state->crtc;
 	uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
 	int ret;
 
@@ -1055,8 +1057,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 	vfb = vmw_framebuffer_to_vfb(new_fb);
 	new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
 
-	if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
-	    new_vfbs->surface->base_size.height == vdisplay)
+	if (new_vfbs &&
+	    new_vfbs->surface->metadata.base_size.width == hdisplay &&
+	    new_vfbs->surface->metadata.base_size.height == vdisplay)
 		new_content_type = SAME_AS_DISPLAY;
 	else if (vfb->bo)
 		new_content_type = SEPARATE_BO;
@@ -1064,12 +1067,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 		new_content_type = SEPARATE_SURFACE;
 
 	if (new_content_type != SAME_AS_DISPLAY) {
-		struct vmw_surface content_srf;
-		struct drm_vmw_size display_base_size = {0};
+		struct vmw_surface_metadata metadata = {0};
 
-		display_base_size.width  = hdisplay;
-		display_base_size.height = vdisplay;
-		display_base_size.depth  = 1;
+		metadata.base_size.width = hdisplay;
+		metadata.base_size.height = vdisplay;
+		metadata.base_size.depth = 1;
 
 		/*
 		 * If content buffer is a buffer object, then we have to
@@ -1079,15 +1081,15 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 
 			switch (new_fb->format->cpp[0]*8) {
 			case 32:
-				content_srf.format = SVGA3D_X8R8G8B8;
+				metadata.format = SVGA3D_X8R8G8B8;
 				break;
 
 			case 16:
-				content_srf.format = SVGA3D_R5G6B5;
+				metadata.format = SVGA3D_R5G6B5;
 				break;
 
 			case 8:
-				content_srf.format = SVGA3D_P8;
+				metadata.format = SVGA3D_P8;
 				break;
 
 			default:
@@ -1095,22 +1097,20 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 				return -EINVAL;
 			}
 
-			content_srf.flags             = 0;
-			content_srf.mip_levels[0]     = 1;
-			content_srf.multisample_count = 0;
-			content_srf.multisample_pattern =
-				SVGA3D_MS_PATTERN_NONE;
-			content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
+			metadata.mip_levels[0] = 1;
+			metadata.num_sizes = 1;
+			metadata.scanout = true;
 		} else {
-			content_srf = *new_vfbs->surface;
+			metadata = new_vfbs->surface->metadata;
 		}
 
 		if (vps->surf) {
-			struct drm_vmw_size cur_base_size = vps->surf->base_size;
+			struct drm_vmw_size cur_base_size =
+				vps->surf->metadata.base_size;
 
-			if (cur_base_size.width != display_base_size.width ||
-			    cur_base_size.height != display_base_size.height ||
-			    vps->surf->format != content_srf.format) {
+			if (cur_base_size.width != metadata.base_size.width ||
+			    cur_base_size.height != metadata.base_size.height ||
+			    vps->surf->metadata.format != metadata.format) {
 				WARN_ON(vps->pinned != 0);
 				vmw_surface_unreference(&vps->surf);
 			}
@@ -1118,20 +1118,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
 		}
 
 		if (!vps->surf) {
-			ret = vmw_surface_gb_priv_define
-				(crtc->dev,
-				 /* Kernel visible only */
-				 0,
-				 content_srf.flags,
-				 content_srf.format,
-				 true,  /* a scanout buffer */
-				 content_srf.mip_levels[0],
-				 content_srf.multisample_count,
-				 0,
-				 display_base_size,
-				 content_srf.multisample_pattern,
-				 content_srf.quality_level,
-				 &vps->surf);
+			ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+						    &vps->surf);
 			if (ret != 0) {
 				DRM_ERROR("Couldn't allocate STDU surface.\n");
 				return ret;
@@ -1308,7 +1296,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane  *update, void *cmd,
 	diff.cpp = stdu->cpp;
 
 	dst_bo = &stdu->display_srf->res.backup->base;
-	dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+	dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
 	dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
 
 	src_bo = &vfbbo->buffer->base;
@@ -1885,7 +1873,7 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
 
 
 	/* Do nothing if Screen Target support is turned off */
-	if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+	if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
 		return -ENOSYS;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
new file mode 100644
index 0000000..1931924
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
+
+/**
+ * struct vmw_dx_streamoutput - Streamoutput resource metadata.
+ * @res: Base resource struct.
+ * @ctx: Non-refcounted context to which @res belong.
+ * @cotable: Refcounted cotable holding this Streamoutput.
+ * @cotable_head: List head for cotable-so_res list.
+ * @id: User-space provided identifier.
+ * @size: User-space provided mob size.
+ * @committed: Whether streamoutput is actually created or pending creation.
+ */
+struct vmw_dx_streamoutput {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	struct vmw_resource *cotable;
+	struct list_head cotable_head;
+	u32 id;
+	u32 size;
+	bool committed;
+};
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res);
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+				    struct ttm_validate_buffer *val_buf);
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+				      struct ttm_validate_buffer *val_buf);
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+					      enum vmw_cmdbuf_res_state state);
+
+static size_t vmw_streamoutput_size;
+
+static const struct vmw_res_func vmw_dx_streamoutput_func = {
+	.res_type = vmw_res_streamoutput,
+	.needs_backup = true,
+	.may_evict = false,
+	.type_name = "DX streamoutput",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_streamoutput_create,
+	.destroy = NULL, /* Command buffer managed resource. */
+	.bind = vmw_dx_streamoutput_bind,
+	.unbind = vmw_dx_streamoutput_unbind,
+	.commit_notify = vmw_dx_streamoutput_commit_notify,
+};
+
+static inline struct vmw_dx_streamoutput *
+vmw_res_to_dx_streamoutput(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_dx_streamoutput, res);
+}
+
+/**
+ * vmw_dx_streamoutput_unscrub - Reattach the MOB to streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
+{
+	struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindStreamOutput body;
+	} *cmd;
+
+	if (!list_empty(&so->cotable_head) || !so->committed )
+		return 0;
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.soid = so->id;
+	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.offsetInBytes = res->backup_offset;
+	cmd->body.sizeInBytes = so->size;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+
+	return 0;
+}
+
+static int vmw_dx_streamoutput_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+	int ret = 0;
+
+	WARN_ON_ONCE(!so->committed);
+
+	if (vmw_resource_mob_attached(res)) {
+		mutex_lock(&dev_priv->binding_mutex);
+		ret = vmw_dx_streamoutput_unscrub(res);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+
+	res->id = so->id;
+
+	return ret;
+}
+
+static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
+				    struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	int ret;
+
+	if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
+		return -EINVAL;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	ret = vmw_dx_streamoutput_unscrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_scrub - Unbind the MOB from streamoutput.
+ * @res: The streamoutput resource.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindStreamOutput body;
+	} *cmd;
+
+	if (list_empty(&so->cotable_head))
+		return 0;
+
+	WARN_ON_ONCE(!so->committed);
+
+	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.soid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	cmd->body.sizeInBytes = so->size;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	res->id = -1;
+	list_del_init(&so->cotable_head);
+
+	return 0;
+}
+
+static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
+				      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
+		return -EINVAL;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	ret = vmw_dx_streamoutput_scrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	if (ret)
+		return ret;
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_bo_fence_single(val_buf->bo, fence);
+
+	if (fence != NULL)
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
+					   enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+	if (state == VMW_CMDBUF_RES_ADD) {
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_cotable_add_resource(so->cotable, &so->cotable_head);
+		so->committed = true;
+		res->id = so->id;
+		mutex_unlock(&dev_priv->binding_mutex);
+	} else {
+		mutex_lock(&dev_priv->binding_mutex);
+		list_del_init(&so->cotable_head);
+		so->committed = false;
+		res->id = -1;
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+}
+
+/**
+ * vmw_dx_streamoutput_lookup - Do a streamoutput resource lookup by user key.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: User-space identifier for lookup.
+ *
+ * Return: Valid refcounted vmw_resource on success, error pointer on failure.
+ */
+struct vmw_resource *
+vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
+			   u32 user_key)
+{
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
+				     user_key);
+}
+
+static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+	vmw_resource_unreference(&so->cotable);
+	kfree(so);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
+}
+
+static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
+{
+	/* Destroyed by user-space cmd buf or as part of context takedown. */
+	res->id = -1;
+}
+
+/**
+ * vmw_dx_streamoutput_add - Add a streamoutput as a cmd buf managed resource.
+ * @man: Command buffer managed resource manager for current context.
+ * @ctx: Pointer to context resource.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
+			    struct vmw_resource *ctx, u32 user_key,
+			    struct list_head *list)
+{
+	struct vmw_dx_streamoutput *so;
+	struct vmw_resource *res;
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	struct ttm_operation_ctx ttm_opt_ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	int ret;
+
+	if (!vmw_streamoutput_size)
+		vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_streamoutput_size, &ttm_opt_ctx);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for streamout.\n");
+		return ret;
+	}
+
+	so = kmalloc(sizeof(*so), GFP_KERNEL);
+	if (!so) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_streamoutput_size);
+		return -ENOMEM;
+	}
+
+	res = &so->res;
+	so->ctx = ctx;
+	so->cotable = vmw_resource_reference
+		(vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
+	so->id = user_key;
+	so->committed = false;
+	INIT_LIST_HEAD(&so->cotable_head);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_dx_streamoutput_res_free,
+				&vmw_dx_streamoutput_func);
+	if (ret)
+		goto out_resource_init;
+
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = so->id;
+	res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_dx_streamoutput_set_size - Sets streamoutput mob size in res struct.
+ * @res: The streamoutput res for which need to set size.
+ * @size: The size provided by user-space to set.
+ */
+void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
+{
+	struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
+
+	so->size = size;
+}
+
+/**
+ * vmw_dx_streamoutput_remove - Stage streamoutput for removal.
+ * @man: Command buffer managed resource manager for current context.
+ * @user_key: The identifier for this streamoutput.
+ * @list: The list of staged command buffer managed resources.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
+			       u32 user_key,
+			       struct list_head *list)
+{
+	struct vmw_resource *r;
+
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
+				     (u32)user_key, list, &r);
+}
+
+/**
+ * vmw_dx_streamoutput_cotable_list_scrub - cotable unbind_func callback.
+ * @dev_priv: Device private.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ */
+void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
+					    struct list_head *list,
+					    bool readback)
+{
+	struct vmw_dx_streamoutput *entry, *next;
+
+	lockdep_assert_held_once(&dev_priv->binding_mutex);
+
+	list_for_each_entry_safe(entry, next, list, cotable_head) {
+		WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
+		if (!readback)
+			entry->committed =false;
+	}
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3ce630a..7ef51fa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -79,7 +79,7 @@ struct vmw_surface_dirty {
 	struct svga3dsurface_cache cache;
 	size_t size;
 	u32 num_subres;
-	SVGA3dBox boxes[0];
+	SVGA3dBox boxes[];
 };
 
 static void vmw_user_surface_free(struct vmw_resource *res);
@@ -199,7 +199,7 @@ struct vmw_surface_destroy {
  */
 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
 {
-	return srf->num_sizes * sizeof(struct vmw_surface_dma);
+	return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
 }
 
 
@@ -213,7 +213,7 @@ static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
  */
 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
 {
-	return sizeof(struct vmw_surface_define) + srf->num_sizes *
+	return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
 		sizeof(SVGA3dSize);
 }
 
@@ -262,7 +262,8 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
 	uint32_t cmd_len;
 	int i;
 
-	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+	cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
+		sizeof(SVGA3dSize);
 
 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
 	cmd->header.size = cmd_len;
@@ -272,16 +273,16 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
 	 * since driver internally stores as 64 bit.
 	 * For legacy surface define only 32 bit flag is supported.
 	 */
-	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
-	cmd->body.format = srf->format;
+	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
+	cmd->body.format = srf->metadata.format;
 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+		cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
 
 	cmd += 1;
 	cmd_size = (SVGA3dSize *) cmd;
-	src_size = srf->sizes;
+	src_size = srf->metadata.sizes;
 
-	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+	for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
 		cmd_size->width = src_size->width;
 		cmd_size->height = src_size->height;
 		cmd_size->depth = src_size->depth;
@@ -305,15 +306,15 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf,
 	uint32_t i;
 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
 	const struct svga3d_surface_desc *desc =
-		svga3dsurface_get_desc(srf->format);
+		svga3dsurface_get_desc(srf->metadata.format);
 
-	for (i = 0; i < srf->num_sizes; ++i) {
+	for (i = 0; i < srf->metadata.num_sizes; ++i) {
 		SVGA3dCmdHeader *header = &cmd->header;
 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
 		SVGA3dCopyBox *cb = &cmd->cb;
 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
-		const struct drm_vmw_size *cur_size = &srf->sizes[i];
+		const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
 
 		header->id = SVGA_3D_CMD_SURFACE_DMA;
 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
@@ -669,7 +670,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
 	if (user_srf->master)
 		drm_master_put(&user_srf->master);
 	kfree(srf->offsets);
-	kfree(srf->sizes);
+	kfree(srf->metadata.sizes);
 	kfree(srf->snooper.image);
 	ttm_prime_object_kfree(user_srf, prime);
 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
@@ -728,6 +729,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_user_surface *user_srf;
 	struct vmw_surface *srf;
+	struct vmw_surface_metadata *metadata;
 	struct vmw_resource *res;
 	struct vmw_resource *tmp;
 	union drm_vmw_surface_create_arg *arg =
@@ -793,43 +795,45 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 	}
 
 	srf = &user_srf->srf;
+	metadata = &srf->metadata;
 	res = &srf->res;
 
 	/* Driver internally stores as 64-bit flags */
-	srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
-	srf->format = req->format;
-	srf->scanout = req->scanout;
+	metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
+	metadata->format = req->format;
+	metadata->scanout = req->scanout;
 
-	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
-	srf->num_sizes = num_sizes;
+	memcpy(metadata->mip_levels, req->mip_levels,
+	       sizeof(metadata->mip_levels));
+	metadata->num_sizes = num_sizes;
 	user_srf->size = size;
-	srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
-				 req->size_addr,
-				 sizeof(*srf->sizes) * srf->num_sizes);
-	if (IS_ERR(srf->sizes)) {
-		ret = PTR_ERR(srf->sizes);
+	metadata->sizes =
+		memdup_user((struct drm_vmw_size __user *)(unsigned long)
+			    req->size_addr,
+			    sizeof(*metadata->sizes) * metadata->num_sizes);
+	if (IS_ERR(metadata->sizes)) {
+		ret = PTR_ERR(metadata->sizes);
 		goto out_no_sizes;
 	}
-	srf->offsets = kmalloc_array(srf->num_sizes,
-				     sizeof(*srf->offsets),
+	srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
 				     GFP_KERNEL);
 	if (unlikely(!srf->offsets)) {
 		ret = -ENOMEM;
 		goto out_no_offsets;
 	}
 
-	srf->base_size = *srf->sizes;
-	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-	srf->multisample_count = 0;
-	srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
-	srf->quality_level = SVGA3D_MS_QUALITY_NONE;
+	metadata->base_size = *srf->metadata.sizes;
+	metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+	metadata->multisample_count = 0;
+	metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+	metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
 
 	cur_bo_offset = 0;
 	cur_offset = srf->offsets;
-	cur_size = srf->sizes;
+	cur_size = metadata->sizes;
 
 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
-		for (j = 0; j < srf->mip_levels[i]; ++j) {
+		for (j = 0; j < metadata->mip_levels[i]; ++j) {
 			uint32_t stride = svga3dsurface_calculate_pitch
 				(desc, cur_size);
 
@@ -843,11 +847,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 		}
 	}
 	res->backup_size = cur_bo_offset;
-	if (srf->scanout &&
-	    srf->num_sizes == 1 &&
-	    srf->sizes[0].width == 64 &&
-	    srf->sizes[0].height == 64 &&
-	    srf->format == SVGA3D_A8R8G8B8) {
+	if (metadata->scanout &&
+	    metadata->num_sizes == 1 &&
+	    metadata->sizes[0].width == 64 &&
+	    metadata->sizes[0].height == 64 &&
+	    metadata->format == SVGA3D_A8R8G8B8) {
 
 		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
 		if (!srf->snooper.image) {
@@ -911,7 +915,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
 out_no_copy:
 	kfree(srf->offsets);
 out_no_offsets:
-	kfree(srf->sizes);
+	kfree(metadata->sizes);
 out_no_sizes:
 	ttm_prime_object_kfree(user_srf, prime);
 out_no_user_srf:
@@ -1031,18 +1035,19 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 	srf = &user_srf->srf;
 
 	/* Downcast of flags when sending back to user space */
-	rep->flags = (uint32_t)srf->flags;
-	rep->format = srf->format;
-	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+	rep->flags = (uint32_t)srf->metadata.flags;
+	rep->format = srf->metadata.format;
+	memcpy(rep->mip_levels, srf->metadata.mip_levels,
+	       sizeof(srf->metadata.mip_levels));
 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
 	    rep->size_addr;
 
 	if (user_sizes)
-		ret = copy_to_user(user_sizes, &srf->base_size,
-				   sizeof(srf->base_size));
+		ret = copy_to_user(user_sizes, &srf->metadata.base_size,
+				   sizeof(srf->metadata.base_size));
 	if (unlikely(ret != 0)) {
 		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
-			       srf->num_sizes);
+			       srf->metadata.num_sizes);
 		ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
 		ret = -EFAULT;
 	}
@@ -1062,6 +1067,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_surface *srf = vmw_res_to_srf(res);
+	struct vmw_surface_metadata *metadata = &srf->metadata;
 	uint32_t cmd_len, cmd_id, submit_len;
 	int ret;
 	struct {
@@ -1076,6 +1082,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDefineGBSurface_v3 body;
 	} *cmd3;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface_v4 body;
+	} *cmd4;
 
 	if (likely(res->id != -1))
 		return 0;
@@ -1092,12 +1102,16 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		goto out_no_fifo;
 	}
 
-	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
+		cmd_len = sizeof(cmd4->body);
+		submit_len = sizeof(*cmd4);
+	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
 		cmd_len = sizeof(cmd3->body);
 		submit_len = sizeof(*cmd3);
-	} else if (srf->array_size > 0) {
-		/* has_dx checked on creation time. */
+	} else if (metadata->array_size > 0) {
+		/* VMW_SM_4 support verified at creation time. */
 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
 		cmd_len = sizeof(cmd2->body);
 		submit_len = sizeof(*cmd2);
@@ -1110,51 +1124,68 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
 	cmd2 = (typeof(cmd2))cmd;
 	cmd3 = (typeof(cmd3))cmd;
+	cmd4 = (typeof(cmd4))cmd;
 	if (unlikely(!cmd)) {
 		ret = -ENOMEM;
 		goto out_no_fifo;
 	}
 
-	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+	if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
+		cmd4->header.id = cmd_id;
+		cmd4->header.size = cmd_len;
+		cmd4->body.sid = srf->res.id;
+		cmd4->body.surfaceFlags = metadata->flags;
+		cmd4->body.format = metadata->format;
+		cmd4->body.numMipLevels = metadata->mip_levels[0];
+		cmd4->body.multisampleCount = metadata->multisample_count;
+		cmd4->body.multisamplePattern = metadata->multisample_pattern;
+		cmd4->body.qualityLevel = metadata->quality_level;
+		cmd4->body.autogenFilter = metadata->autogen_filter;
+		cmd4->body.size.width = metadata->base_size.width;
+		cmd4->body.size.height = metadata->base_size.height;
+		cmd4->body.size.depth = metadata->base_size.depth;
+		cmd4->body.arraySize = metadata->array_size;
+		cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
+	} else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
 		cmd3->header.id = cmd_id;
 		cmd3->header.size = cmd_len;
 		cmd3->body.sid = srf->res.id;
-		cmd3->body.surfaceFlags = srf->flags;
-		cmd3->body.format = srf->format;
-		cmd3->body.numMipLevels = srf->mip_levels[0];
-		cmd3->body.multisampleCount = srf->multisample_count;
-		cmd3->body.multisamplePattern = srf->multisample_pattern;
-		cmd3->body.qualityLevel = srf->quality_level;
-		cmd3->body.autogenFilter = srf->autogen_filter;
-		cmd3->body.size.width = srf->base_size.width;
-		cmd3->body.size.height = srf->base_size.height;
-		cmd3->body.size.depth = srf->base_size.depth;
-		cmd3->body.arraySize = srf->array_size;
-	} else if (srf->array_size > 0) {
+		cmd3->body.surfaceFlags = metadata->flags;
+		cmd3->body.format = metadata->format;
+		cmd3->body.numMipLevels = metadata->mip_levels[0];
+		cmd3->body.multisampleCount = metadata->multisample_count;
+		cmd3->body.multisamplePattern = metadata->multisample_pattern;
+		cmd3->body.qualityLevel = metadata->quality_level;
+		cmd3->body.autogenFilter = metadata->autogen_filter;
+		cmd3->body.size.width = metadata->base_size.width;
+		cmd3->body.size.height = metadata->base_size.height;
+		cmd3->body.size.depth = metadata->base_size.depth;
+		cmd3->body.arraySize = metadata->array_size;
+	} else if (metadata->array_size > 0) {
 		cmd2->header.id = cmd_id;
 		cmd2->header.size = cmd_len;
 		cmd2->body.sid = srf->res.id;
-		cmd2->body.surfaceFlags = srf->flags;
-		cmd2->body.format = srf->format;
-		cmd2->body.numMipLevels = srf->mip_levels[0];
-		cmd2->body.multisampleCount = srf->multisample_count;
-		cmd2->body.autogenFilter = srf->autogen_filter;
-		cmd2->body.size.width = srf->base_size.width;
-		cmd2->body.size.height = srf->base_size.height;
-		cmd2->body.size.depth = srf->base_size.depth;
-		cmd2->body.arraySize = srf->array_size;
+		cmd2->body.surfaceFlags = metadata->flags;
+		cmd2->body.format = metadata->format;
+		cmd2->body.numMipLevels = metadata->mip_levels[0];
+		cmd2->body.multisampleCount = metadata->multisample_count;
+		cmd2->body.autogenFilter = metadata->autogen_filter;
+		cmd2->body.size.width = metadata->base_size.width;
+		cmd2->body.size.height = metadata->base_size.height;
+		cmd2->body.size.depth = metadata->base_size.depth;
+		cmd2->body.arraySize = metadata->array_size;
 	} else {
 		cmd->header.id = cmd_id;
 		cmd->header.size = cmd_len;
 		cmd->body.sid = srf->res.id;
-		cmd->body.surfaceFlags = srf->flags;
-		cmd->body.format = srf->format;
-		cmd->body.numMipLevels = srf->mip_levels[0];
-		cmd->body.multisampleCount = srf->multisample_count;
-		cmd->body.autogenFilter = srf->autogen_filter;
-		cmd->body.size.width = srf->base_size.width;
-		cmd->body.size.height = srf->base_size.height;
-		cmd->body.size.depth = srf->base_size.depth;
+		cmd->body.surfaceFlags = metadata->flags;
+		cmd->body.format = metadata->format;
+		cmd->body.numMipLevels = metadata->mip_levels[0];
+		cmd->body.multisampleCount = metadata->multisample_count;
+		cmd->body.autogenFilter = metadata->autogen_filter;
+		cmd->body.size.width = metadata->base_size.width;
+		cmd->body.size.height = metadata->base_size.height;
+		cmd->body.size.depth = metadata->base_size.depth;
 	}
 
 	vmw_fifo_commit(dev_priv, submit_len);
@@ -1314,7 +1345,6 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 	return 0;
 }
 
-
 /**
  * vmw_gb_surface_define_ioctl - Ioctl function implementing
  * the user surface define functionality.
@@ -1336,6 +1366,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 	req_ext.svga3d_flags_upper_32_bits = 0;
 	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
 	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+	req_ext.buffer_byte_stride = 0;
 	req_ext.must_be_zero = 0;
 
 	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
@@ -1371,171 +1402,6 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
 }
 
 /**
- * vmw_surface_gb_priv_define - Define a private GB surface
- *
- * @dev:  Pointer to a struct drm_device
- * @user_accounting_size:  Used to track user-space memory usage, set
- *                         to 0 for kernel mode only memory
- * @svga3d_flags: SVGA3d surface flags for the device
- * @format: requested surface format
- * @for_scanout: true if inteded to be used for scanout buffer
- * @num_mip_levels:  number of MIP levels
- * @multisample_count:
- * @array_size: Surface array size.
- * @size: width, heigh, depth of the surface requested
- * @multisample_pattern: Multisampling pattern when msaa is supported
- * @quality_level: Precision settings
- * @user_srf_out: allocated user_srf.  Set to NULL on failure.
- *
- * GB surfaces allocated by this function will not have a user mode handle, and
- * thus will only be visible to vmwgfx.  For optimization reasons the
- * surface may later be given a user mode handle by another function to make
- * it available to user mode drivers.
- */
-int vmw_surface_gb_priv_define(struct drm_device *dev,
-			       uint32_t user_accounting_size,
-			       SVGA3dSurfaceAllFlags svga3d_flags,
-			       SVGA3dSurfaceFormat format,
-			       bool for_scanout,
-			       uint32_t num_mip_levels,
-			       uint32_t multisample_count,
-			       uint32_t array_size,
-			       struct drm_vmw_size size,
-			       SVGA3dMSPattern multisample_pattern,
-			       SVGA3dMSQualityLevel quality_level,
-			       struct vmw_surface **srf_out)
-{
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_user_surface *user_srf;
-	struct ttm_operation_ctx ctx = {
-		.interruptible = true,
-		.no_wait_gpu = false
-	};
-	struct vmw_surface *srf;
-	int ret;
-	u32 num_layers = 1;
-	u32 sample_count = 1;
-
-	*srf_out = NULL;
-
-	if (for_scanout) {
-		if (!svga3dsurface_is_screen_target_format(format)) {
-			VMW_DEBUG_USER("Invalid Screen Target surface format.");
-			return -EINVAL;
-		}
-
-		if (size.width > dev_priv->texture_max_width ||
-		    size.height > dev_priv->texture_max_height) {
-			VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
-				       size.width, size.height,
-				       dev_priv->texture_max_width,
-				       dev_priv->texture_max_height);
-			return -EINVAL;
-		}
-	} else {
-		const struct svga3d_surface_desc *desc;
-
-		desc = svga3dsurface_get_desc(format);
-		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
-			VMW_DEBUG_USER("Invalid surface format.\n");
-			return -EINVAL;
-		}
-	}
-
-	/* array_size must be null for non-GL3 host. */
-	if (array_size > 0 && !dev_priv->has_dx) {
-		VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
-		return -EINVAL;
-	}
-
-	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   user_accounting_size, &ctx);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for surface"
-				  " creation.\n");
-		goto out_unlock;
-	}
-
-	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-	if (unlikely(!user_srf)) {
-		ret = -ENOMEM;
-		goto out_no_user_srf;
-	}
-
-	*srf_out  = &user_srf->srf;
-	user_srf->size = user_accounting_size;
-	user_srf->prime.base.shareable = false;
-	user_srf->prime.base.tfile     = NULL;
-
-	srf = &user_srf->srf;
-	srf->flags             = svga3d_flags;
-	srf->format            = format;
-	srf->scanout           = for_scanout;
-	srf->mip_levels[0]     = num_mip_levels;
-	srf->num_sizes         = 1;
-	srf->sizes             = NULL;
-	srf->offsets           = NULL;
-	srf->base_size         = size;
-	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
-	srf->array_size        = array_size;
-	srf->multisample_count = multisample_count;
-	srf->multisample_pattern = multisample_pattern;
-	srf->quality_level = quality_level;
-
-	if (array_size)
-		num_layers = array_size;
-	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
-		num_layers = SVGA3D_MAX_SURFACE_FACES;
-
-	if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
-		sample_count = srf->multisample_count;
-
-	srf->res.backup_size   =
-		svga3dsurface_get_serialized_size_extended(srf->format,
-							   srf->base_size,
-							   srf->mip_levels[0],
-							   num_layers,
-							   sample_count);
-
-	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
-		srf->res.backup_size += sizeof(SVGA3dDXSOState);
-
-	/*
-	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
-	 * size greater than STDU max width/height. This is really a workaround
-	 * to support creation of big framebuffer requested by some user-space
-	 * for whole topology. That big framebuffer won't really be used for
-	 * binding with screen target as during prepare_fb a separate surface is
-	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
-	 */
-	if (dev_priv->active_display_unit == vmw_du_screen_target &&
-	    for_scanout && size.width <= dev_priv->stdu_max_width &&
-	    size.height <= dev_priv->stdu_max_height)
-		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
-
-	/*
-	 * From this point, the generic resource management functions
-	 * destroy the object on failure.
-	 */
-	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return ret;
-
-out_no_user_srf:
-	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
-out_unlock:
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return ret;
-}
-
-/**
  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
  * the user surface define functionality.
  *
@@ -1588,43 +1454,60 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 			       struct drm_vmw_gb_surface_create_rep *rep,
 			       struct drm_file *file_priv)
 {
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_user_surface *user_srf;
+	struct vmw_surface_metadata metadata = {0};
 	struct vmw_surface *srf;
 	struct vmw_resource *res;
 	struct vmw_resource *tmp;
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-	int ret;
+	int ret = 0;
 	uint32_t size;
 	uint32_t backup_handle = 0;
 	SVGA3dSurfaceAllFlags svga3d_flags_64 =
 		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
 				req->base.svga3d_flags);
 
-	if (!dev_priv->has_sm4_1) {
-		/*
-		 * If SM4_1 is not support then cannot send 64-bit flag to
-		 * device.
-		 */
+	/* array_size must be null for non-GL3 host. */
+	if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
+		VMW_DEBUG_USER("SM4 surface not supported.\n");
+		return -EINVAL;
+	}
+
+	if (!has_sm4_1_context(dev_priv)) {
 		if (req->svga3d_flags_upper_32_bits != 0)
-			return -EINVAL;
+			ret = -EINVAL;
 
 		if (req->base.multisample_count != 0)
-			return -EINVAL;
+			ret = -EINVAL;
 
 		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
-			return -EINVAL;
+			ret = -EINVAL;
 
 		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
-			return -EINVAL;
+			ret = -EINVAL;
+
+		if (ret) {
+			VMW_DEBUG_USER("SM4.1 surface not supported.\n");
+			return ret;
+		}
+	}
+
+	if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
+		VMW_DEBUG_USER("SM5 surface not supported.\n");
+		return -EINVAL;
 	}
 
 	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
-	    req->base.multisample_count == 0)
+	    req->base.multisample_count == 0) {
+		VMW_DEBUG_USER("Invalid sample count.\n");
 		return -EINVAL;
+	}
 
-	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
+		VMW_DEBUG_USER("Invalid mip level.\n");
 		return -EINVAL;
+	}
 
 	if (unlikely(vmw_user_surface_size == 0))
 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
@@ -1632,22 +1515,25 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 
 	size = vmw_user_surface_size;
 
+	metadata.flags = svga3d_flags_64;
+	metadata.format = req->base.format;
+	metadata.mip_levels[0] = req->base.mip_levels;
+	metadata.multisample_count = req->base.multisample_count;
+	metadata.multisample_pattern = req->multisample_pattern;
+	metadata.quality_level = req->quality_level;
+	metadata.array_size = req->base.array_size;
+	metadata.buffer_byte_stride = req->buffer_byte_stride;
+	metadata.num_sizes = 1;
+	metadata.base_size = req->base.base_size;
+	metadata.scanout = req->base.drm_surface_flags &
+		drm_vmw_surface_flag_scanout;
+
 	/* Define a surface based on the parameters. */
-	ret = vmw_surface_gb_priv_define(dev,
-					 size,
-					 svga3d_flags_64,
-					 req->base.format,
-					 req->base.drm_surface_flags &
-					 drm_vmw_surface_flag_scanout,
-					 req->base.mip_levels,
-					 req->base.multisample_count,
-					 req->base.array_size,
-					 req->base.base_size,
-					 req->multisample_pattern,
-					 req->quality_level,
-					 &srf);
-	if (unlikely(ret != 0))
+	ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+	if (ret != 0) {
+		VMW_DEBUG_USER("Failed to define surface.\n");
 		return ret;
+	}
 
 	user_srf = container_of(srf, struct vmw_user_surface, srf);
 	if (drm_is_primary_client(file_priv))
@@ -1762,6 +1648,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	struct vmw_surface *srf;
 	struct vmw_user_surface *user_srf;
+	struct vmw_surface_metadata *metadata;
 	struct ttm_base_object *base;
 	uint32_t backup_handle;
 	int ret = -EINVAL;
@@ -1777,6 +1664,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
 		goto out_bad_resource;
 	}
+	metadata = &srf->metadata;
 
 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
 	ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
@@ -1790,15 +1678,15 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
 		goto out_bad_resource;
 	}
 
-	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
-	rep->creq.base.format = srf->format;
-	rep->creq.base.mip_levels = srf->mip_levels[0];
+	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
+	rep->creq.base.format = metadata->format;
+	rep->creq.base.mip_levels = metadata->mip_levels[0];
 	rep->creq.base.drm_surface_flags = 0;
-	rep->creq.base.multisample_count = srf->multisample_count;
-	rep->creq.base.autogen_filter = srf->autogen_filter;
-	rep->creq.base.array_size = srf->array_size;
+	rep->creq.base.multisample_count = metadata->multisample_count;
+	rep->creq.base.autogen_filter = metadata->autogen_filter;
+	rep->creq.base.array_size = metadata->array_size;
 	rep->creq.base.buffer_handle = backup_handle;
-	rep->creq.base.base_size = srf->base_size;
+	rep->creq.base.base_size = metadata->base_size;
 	rep->crep.handle = user_srf->prime.base.handle;
 	rep->crep.backup_size = srf->res.backup_size;
 	rep->crep.buffer_handle = backup_handle;
@@ -1808,9 +1696,9 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
 
 	rep->creq.version = drm_vmw_gb_surface_v1;
 	rep->creq.svga3d_flags_upper_32_bits =
-		SVGA3D_FLAGS_UPPER_32(srf->flags);
-	rep->creq.multisample_pattern = srf->multisample_pattern;
-	rep->creq.quality_level = srf->quality_level;
+		SVGA3D_FLAGS_UPPER_32(metadata->flags);
+	rep->creq.multisample_pattern = metadata->multisample_pattern;
+	rep->creq.quality_level = metadata->quality_level;
 	rep->creq.must_be_zero = 0;
 
 out_bad_resource:
@@ -1968,7 +1856,7 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
 		    start >= res->backup_offset + res->backup_size))
 		return;
 
-	if (srf->format == SVGA3D_BUFFER)
+	if (srf->metadata.format == SVGA3D_BUFFER)
 		vmw_surface_buf_dirty_range_add(res, start, end);
 	else
 		vmw_surface_tex_dirty_range_add(res, start, end);
@@ -2058,6 +1946,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res)
 static int vmw_surface_dirty_alloc(struct vmw_resource *res)
 {
 	struct vmw_surface *srf = vmw_res_to_srf(res);
+	const struct vmw_surface_metadata *metadata = &srf->metadata;
 	struct vmw_surface_dirty *dirty;
 	u32 num_layers = 1;
 	u32 num_mip;
@@ -2070,12 +1959,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
 	};
 	int ret;
 
-	if (srf->array_size)
-		num_layers = srf->array_size;
-	else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
+	if (metadata->array_size)
+		num_layers = metadata->array_size;
+	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
 		num_layers *= SVGA3D_MAX_SURFACE_FACES;
 
-	num_mip = srf->mip_levels[0];
+	num_mip = metadata->mip_levels[0];
 	if (!num_mip)
 		num_mip = 1;
 
@@ -2096,9 +1985,10 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res)
 		goto out_no_dirty;
 	}
 
-	num_samples = max_t(u32, 1, srf->multisample_count);
-	ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
-					num_layers, num_samples, &dirty->cache);
+	num_samples = max_t(u32, 1, metadata->multisample_count);
+	ret = svga3dsurface_setup_cache(&metadata->base_size, metadata->format,
+					num_mip, num_layers, num_samples,
+					&dirty->cache);
 	if (ret)
 		goto out_no_cache;
 
@@ -2153,3 +2043,147 @@ static int vmw_surface_clean(struct vmw_resource *res)
 
 	return 0;
 }
+
+/*
+ * vmw_gb_surface_define - Define a private GB surface
+ *
+ * @dev_priv: Pointer to a device private.
+ * @user_accounting_size:  Used to track user-space memory usage, set
+ *                         to 0 for kernel mode only memory
+ * @metadata: Metadata representing the surface to create.
+ * @user_srf_out: allocated user_srf. Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx.  For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_gb_surface_define(struct vmw_private *dev_priv,
+			  uint32_t user_accounting_size,
+			  const struct vmw_surface_metadata *req,
+			  struct vmw_surface **srf_out)
+{
+	struct vmw_surface_metadata *metadata;
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	struct ttm_operation_ctx ctx = {
+		.interruptible = true,
+		.no_wait_gpu = false
+	};
+	u32 sample_count = 1;
+	u32 num_layers = 1;
+	int ret;
+
+	*srf_out = NULL;
+
+	if (req->scanout) {
+		if (!svga3dsurface_is_screen_target_format(req->format)) {
+			VMW_DEBUG_USER("Invalid Screen Target surface format.");
+			return -EINVAL;
+		}
+
+		if (req->base_size.width > dev_priv->texture_max_width ||
+		    req->base_size.height > dev_priv->texture_max_height) {
+			VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
+				       req->base_size.width,
+				       req->base_size.height,
+				       dev_priv->texture_max_width,
+				       dev_priv->texture_max_height);
+			return -EINVAL;
+		}
+	} else {
+		const struct svga3d_surface_desc *desc =
+			svga3dsurface_get_desc(req->format);
+
+		if (desc->block_desc == SVGA3DBLOCKDESC_NONE) {
+			VMW_DEBUG_USER("Invalid surface format.\n");
+			return -EINVAL;
+		}
+	}
+
+	if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
+		return -EINVAL;
+
+	if (req->num_sizes != 1)
+		return -EINVAL;
+
+	if (req->sizes != NULL)
+		return -EINVAL;
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   user_accounting_size, &ctx);
+	if (ret != 0) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(!user_srf)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	*srf_out  = &user_srf->srf;
+	user_srf->size = user_accounting_size;
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile = NULL;
+
+	srf = &user_srf->srf;
+	srf->metadata = *req;
+	srf->offsets = NULL;
+
+	metadata = &srf->metadata;
+
+	if (metadata->array_size)
+		num_layers = req->array_size;
+	else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
+		num_layers = SVGA3D_MAX_SURFACE_FACES;
+
+	if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
+		sample_count = metadata->multisample_count;
+
+	srf->res.backup_size =
+		svga3dsurface_get_serialized_size_extended(metadata->format,
+							   metadata->base_size,
+							   metadata->mip_levels[0],
+							   num_layers,
+							   sample_count);
+
+	if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+		srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+	/*
+	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+	 * size greater than STDU max width/height. This is really a workaround
+	 * to support creation of big framebuffer requested by some user-space
+	 * for whole topology. That big framebuffer won't really be used for
+	 * binding with screen target as during prepare_fb a separate surface is
+	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+	 */
+	if (dev_priv->active_display_unit == vmw_du_screen_target &&
+	    metadata->scanout &&
+	    metadata->base_size.width <= dev_priv->stdu_max_width &&
+	    metadata->base_size.height <= dev_priv->stdu_max_height)
+		metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+	/*
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index d8ea3dd..3f3b2c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -736,11 +736,6 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
 	return NULL;
 }
 
-static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-	return 0;
-}
-
 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		      struct ttm_mem_type_manager *man)
 {
@@ -866,7 +861,6 @@ struct ttm_bo_driver vmw_bo_driver = {
 	.ttm_tt_create = &vmw_ttm_tt_create,
 	.ttm_tt_populate = &vmw_ttm_populate,
 	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
-	.invalidate_caches = vmw_invalidate_caches,
 	.init_mem_type = vmw_init_mem_type,
 	.eviction_valuable = ttm_bo_eviction_valuable,
 	.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 4f34c52..78096bb 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -220,6 +220,24 @@ static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
 	return false;
 }
 
+static int display_check(struct drm_simple_display_pipe *pipe,
+			 struct drm_plane_state *plane_state,
+			 struct drm_crtc_state *crtc_state)
+{
+	/*
+	 * Xen doesn't initialize vblanking via drm_vblank_init(), so
+	 * DRM helpers assume that it doesn't handle vblanking and start
+	 * sending out fake VBLANK events automatically.
+	 *
+	 * As xen contains it's own logic for sending out VBLANK events
+	 * in send_pending_event(), disable no_vblank (i.e., the xen
+	 * driver has vblanking support).
+	 */
+	crtc_state->no_vblank = false;
+
+	return 0;
+}
+
 static void display_update(struct drm_simple_display_pipe *pipe,
 			   struct drm_plane_state *old_plane_state)
 {
@@ -284,6 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
 	.enable = display_enable,
 	.disable = display_disable,
 	.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+	.check = display_check,
 	.update = display_update,
 };
 
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 086c50f..c8f7b21 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -54,7 +54,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
 	int min_scale = FRAC_16_16(1, 8);
 	int max_scale = FRAC_16_16(8, 1);
 
-	if (!crtc || !fb)
+	if (!crtc || WARN_ON(!fb))
 		return 0;
 
 	crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
@@ -281,7 +281,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
 	struct drm_crtc *crtc = plane_state->crtc;
 	struct drm_crtc_state *crtc_state;
 
-	if (!crtc || !fb)
+	if (!crtc || WARN_ON(!fb))
 		return 0;
 
 	crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0093bbd..7d22d73 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -9,7 +9,7 @@
 # LCD
 #
 config LCD_CLASS_DEVICE
-        tristate "Lowlevel LCD controls"
+	tristate "Lowlevel LCD controls"
 	help
 	  This framework adds support for low-level control of LCD.
 	  Some framebuffer devices connect to platform-specific LCD modules
@@ -141,10 +141,10 @@
 # Backlight
 #
 config BACKLIGHT_CLASS_DEVICE
-        tristate "Lowlevel Backlight controls"
+	tristate "Lowlevel Backlight controls"
 	help
 	  This framework adds support for low-level control of the LCD
-          backlight. This includes support for brightness and power.
+	  backlight. This includes support for brightness and power.
 
 	  To have support for your specific LCD panel you will have to
 	  select the proper drivers which depend on this option.
@@ -272,7 +272,7 @@
        tristate "Apple Backlight Driver"
        depends on X86 && ACPI
        help
-         If you have an Intel-based Apple say Y to enable a driver for its
+	 If you have an Intel-based Apple say Y to enable a driver for its
 	 backlight.
 
 config BACKLIGHT_TOSA
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 70c10ea..3c01b0d 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -27,7 +27,7 @@
        depends on VGA_CONSOLE
        default n
        help
-         The scrollback buffer of the standard VGA console is located in
+	 The scrollback buffer of the standard VGA console is located in
 	 the VGA RAM.  The size of this RAM is fixed and is quite small.
 	 If you require a larger scrollback buffer, this can be placed in
 	 System RAM which is dynamically allocated during initialization.
@@ -84,36 +84,36 @@
 	  If unsure, say N.
 
 config SGI_NEWPORT_CONSOLE
-        tristate "SGI Newport Console support"
+	tristate "SGI Newport Console support"
 	depends on SGI_IP22 && HAS_IOMEM
-        select FONT_SUPPORT
-        help
-          Say Y here if you want the console on the Newport aka XL graphics
-          card of your Indy.  Most people say Y here.
+	select FONT_SUPPORT
+	help
+	  Say Y here if you want the console on the Newport aka XL graphics
+	  card of your Indy.  Most people say Y here.
 
 config DUMMY_CONSOLE
 	bool
 	default y
 
 config DUMMY_CONSOLE_COLUMNS
-        int "Initial number of console screen columns"
-        depends on DUMMY_CONSOLE && !ARM
-        default 160 if PARISC
-        default 80
-        help
-          On PA-RISC, the default value is 160, which should fit a 1280x1024
-          monitor.
-          Select 80 if you use a 640x480 resolution by default.
+	int "Initial number of console screen columns"
+	depends on DUMMY_CONSOLE && !ARM
+	default 160 if PARISC
+	default 80
+	help
+	  On PA-RISC, the default value is 160, which should fit a 1280x1024
+	  monitor.
+	  Select 80 if you use a 640x480 resolution by default.
 
 config DUMMY_CONSOLE_ROWS
-        int "Initial number of console screen rows"
-        depends on DUMMY_CONSOLE && !ARM
-        default 64 if PARISC
-        default 25
-        help
-          On PA-RISC, the default value is 64, which should fit a 1280x1024
-          monitor.
-          Select 25 if you use a 640x480 resolution by default.
+	int "Initial number of console screen rows"
+	depends on DUMMY_CONSOLE && !ARM
+	default 64 if PARISC
+	default 25
+	help
+	  On PA-RISC, the default value is 64, which should fit a 1280x1024
+	  monitor.
+	  Select 25 if you use a 640x480 resolution by default.
 
 config FRAMEBUFFER_CONSOLE
 	bool "Framebuffer Console support"
@@ -129,11 +129,11 @@
        depends on FRAMEBUFFER_CONSOLE
        default n
        ---help---
-         If this option is selected, the framebuffer console will
-         automatically select the primary display device (if the architecture
+	 If this option is selected, the framebuffer console will
+	 automatically select the primary display device (if the architecture
 	 supports this feature).  Otherwise, the framebuffer console will
-         always select the first framebuffer driver that is loaded. The latter
-         is the default behavior.
+	 always select the first framebuffer driver that is loaded. The latter
+	 is the default behavior.
 
 	 You can always override the automatic selection of the primary device
 	 by using the fbcon=map: boot option.
@@ -144,11 +144,11 @@
        bool "Framebuffer Console Rotation"
        depends on FRAMEBUFFER_CONSOLE
        help
-         Enable display rotation for the framebuffer console.  This is done
-         in software and may be significantly slower than a normally oriented
-         display.  Note that the rotation is done at the console level only
-         such that other users of the framebuffer will remain normally
-         oriented.
+	 Enable display rotation for the framebuffer console.  This is done
+	 in software and may be significantly slower than a normally oriented
+	 display.  Note that the rotation is done at the console level only
+	 such that other users of the framebuffer will remain normally
+	 oriented.
 
 config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
 	bool "Framebuffer Console Deferred Takeover"
@@ -162,14 +162,14 @@
 	  black screen as soon as fbcon loads.
 
 config STI_CONSOLE
-        bool "STI text console"
+	bool "STI text console"
 	depends on PARISC && HAS_IOMEM
-        select FONT_SUPPORT
-        default y
-        help
-          The STI console is the builtin display/keyboard on HP-PARISC
-          machines.  Say Y here to build support for it into your kernel.
-          The alternative is to use your primary serial port as a console.
+	select FONT_SUPPORT
+	default y
+	help
+	  The STI console is the builtin display/keyboard on HP-PARISC
+	  machines.  Say Y here to build support for it into your kernel.
+	  The alternative is to use your primary serial port as a console.
 
 endmenu
 
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index f65991a..91b0a71 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -435,7 +435,7 @@
 
 config FB_ARC
 	tristate "Arc Monochrome LCD board support"
-	depends on FB && X86
+	depends on FB && (X86 || COMPILE_TEST)
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -1639,7 +1639,7 @@
 
 config FB_WM8505
 	bool "Wondermedia WM8xxx-series frame buffer support"
-	depends on (FB = y) && ARM && ARCH_VT8500
+	depends on (FB = y) && HAS_IOMEM && (ARCH_VT8500 || COMPILE_TEST)
 	select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
 	select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
 	select FB_SYS_IMAGEBLIT
@@ -1827,7 +1827,7 @@
 
 config FB_W100
 	tristate "W100 frame buffer support"
-	depends on FB && ARCH_PXA
+	depends on FB && HAS_IOMEM && (ARCH_PXA || COMPILE_TEST)
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
@@ -1844,7 +1844,8 @@
 
 config FB_SH_MOBILE_LCDC
 	tristate "SuperH Mobile LCDC framebuffer support"
-	depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK
+	depends on FB && HAVE_CLK && HAS_IOMEM
+	depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 27cb65f..9c37e28 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -618,14 +618,13 @@ static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per,
 	u32 mhz100;		/* in 0.01 MHz */
 	u32 program_bits;
 	/* u32 post_divider; */
-	u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
+	u32 mach64MinFreq, mach64MaxFreq;
 	u16 m, n, k = 0, save_m, save_n, twoToKth;
 
 	/* Calculate the programming word */
 	mhz100 = 100000000 / vclk_per;
 	mach64MinFreq = MIN_FREQ_2595;
 	mach64MaxFreq = MAX_FREQ_2595;
-	mach64RefFreq = REF_FREQ_2595;	/* 14.32 MHz */
 
 	save_m = 0;
 	save_n = 0;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 3af00e3..e116a3f 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -849,12 +849,6 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
 		case 9 ... 16:
 			v.bits_per_pixel = 16;
 			break;
-		case 17 ... 24:
-#if 0 /* Doesn't seem to work */
-			v.bits_per_pixel = 24;
-			break;
-#endif			
-			return -EINVAL;
 		case 25 ... 32:
 			v.bits_per_pixel = 32;
 			break;
@@ -1650,14 +1644,14 @@ static int radeonfb_set_par(struct fb_info *info)
 	struct fb_var_screeninfo *mode = &info->var;
 	struct radeon_regs *newmode;
 	int hTotal, vTotal, hSyncStart, hSyncEnd,
-	    hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync;
+	    vSyncStart, vSyncEnd;
 	u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5};
 	u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5};
 	u32 sync, h_sync_pol, v_sync_pol, dotClock, pixClock;
 	int i, freq;
 	int format = 0;
 	int nopllcalc = 0;
-	int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid;
+	int hsync_start, hsync_fudge, hsync_wid, vsync_wid;
 	int primary_mon = PRIMARY_MONITOR(rinfo);
 	int depth = var_to_depth(mode);
 	int use_rmx = 0;
@@ -1730,13 +1724,7 @@ static int radeonfb_set_par(struct fb_info *info)
 	else if (vsync_wid > 0x1f)	/* max */
 		vsync_wid = 0x1f;
 
-	hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
-	vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
-	cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
-
 	format = radeon_get_dstbpp(depth);
-	bytpp = mode->bits_per_pixel >> 3;
 
 	if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD))
 		hsync_fudge = hsync_fudge_fp[format-1];
@@ -2548,16 +2536,6 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
 	if (rinfo->mon2_EDID)
 		sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr);
 
-#if 0
-	/* restore original state
-	 * 
-	 * Doesn't quite work yet, I suspect if we come from a legacy
-	 * VGA mode (or worse, text mode), we need to do some VGA black
-	 * magic here that I know nothing about. --BenH
-	 */
-        radeon_write_mode (rinfo, &rinfo->init_state, 1);
- #endif
-
 	del_timer_sync(&rinfo->lvds_timer);
 	arch_phys_wc_del(rinfo->wc_cookie);
         unregister_framebuffer(info);
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index a620b51..6a745eb 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -509,8 +509,7 @@ static int cg14_probe(struct platform_device *op)
 	if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
 		goto out_unmap_regs;
 
-	is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
-		  (8 * 1024 * 1024));
+	is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
 
 	BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
 		
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index 3771031..26cbc96 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -16,7 +16,6 @@
 				     fbcon_ccw.o
 endif
 endif
-fb-objs                           := $(fb-y)
 
 obj-$(CONFIG_FB_CFB_FILLRECT)  += cfbfillrect.o
 obj-$(CONFIG_FB_CFB_COPYAREA)  += cfbcopyarea.o
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bb6ae99..2833578 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -873,7 +873,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
 	int oldidx = con2fb_map[unit];
 	struct fb_info *info = registered_fb[newidx];
 	struct fb_info *oldinfo = NULL;
- 	int found, err = 0;
+	int found, err = 0;
 
 	WARN_CONSOLE_UNLOCKED();
 
@@ -895,31 +895,30 @@ static int set_con2fb_map(int unit, int newidx, int user)
 
 	con2fb_map[unit] = newidx;
 	if (!err && !found)
- 		err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
-
+		err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
 
 	/*
 	 * If old fb is not mapped to any of the consoles,
 	 * fbcon should release it.
 	 */
- 	if (!err && oldinfo && !search_fb_in_map(oldidx))
- 		err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
- 					     found);
+	if (!err && oldinfo && !search_fb_in_map(oldidx))
+		err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
+					     found);
 
- 	if (!err) {
- 		int show_logo = (fg_console == 0 && !user &&
- 				 logo_shown != FBCON_LOGO_DONTSHOW);
+	if (!err) {
+		int show_logo = (fg_console == 0 && !user &&
+				 logo_shown != FBCON_LOGO_DONTSHOW);
 
- 		if (!found)
- 			fbcon_add_cursor_timer(info);
- 		con2fb_map_boot[unit] = newidx;
- 		con2fb_init_display(vc, info, unit, show_logo);
+		if (!found)
+			fbcon_add_cursor_timer(info);
+		con2fb_map_boot[unit] = newidx;
+		con2fb_init_display(vc, info, unit, show_logo);
 	}
 
 	if (!search_fb_in_map(info_idx))
 		info_idx = newidx;
 
- 	return err;
+	return err;
 }
 
 /*
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index d045549..30e73ec 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -663,20 +663,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
 		fb_logo.depth = 1;
 
 
- 	if (fb_logo.depth > 4 && depth > 4) {
- 		switch (info->fix.visual) {
- 		case FB_VISUAL_TRUECOLOR:
- 			fb_logo.needs_truepalette = 1;
- 			break;
- 		case FB_VISUAL_DIRECTCOLOR:
- 			fb_logo.needs_directpalette = 1;
- 			fb_logo.needs_cmapreset = 1;
- 			break;
- 		case FB_VISUAL_PSEUDOCOLOR:
- 			fb_logo.needs_cmapreset = 1;
- 			break;
- 		}
- 	}
+	if (fb_logo.depth > 4 && depth > 4) {
+		switch (info->fix.visual) {
+		case FB_VISUAL_TRUECOLOR:
+			fb_logo.needs_truepalette = 1;
+			break;
+		case FB_VISUAL_DIRECTCOLOR:
+			fb_logo.needs_directpalette = 1;
+			fb_logo.needs_cmapreset = 1;
+			break;
+		case FB_VISUAL_PSEUDOCOLOR:
+			fb_logo.needs_cmapreset = 1;
+			break;
+		}
+	}
 
 	height = fb_logo.logo->height;
 	if (fb_center_logo)
@@ -1065,19 +1065,19 @@ fb_blank(struct fb_info *info, int blank)
 	struct fb_event event;
 	int ret = -EINVAL;
 
- 	if (blank > FB_BLANK_POWERDOWN)
- 		blank = FB_BLANK_POWERDOWN;
+	if (blank > FB_BLANK_POWERDOWN)
+		blank = FB_BLANK_POWERDOWN;
 
 	event.info = info;
 	event.data = &blank;
 
 	if (info->fbops->fb_blank)
- 		ret = info->fbops->fb_blank(blank, info);
+		ret = info->fbops->fb_blank(blank, info);
 
 	if (!ret)
 		fb_notifier_call_chain(FB_EVENT_BLANK, &event);
 
- 	return ret;
+	return ret;
 }
 EXPORT_SYMBOL(fb_blank);
 
@@ -1115,7 +1115,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
 		break;
 	case FBIOGET_FSCREENINFO:
 		lock_fb_info(info);
-		fix = info->fix;
+		memcpy(&fix, &info->fix, sizeof(fix));
 		if (info->flags & FBINFO_HIDE_SMEM_START)
 			fix.smem_start = 0;
 		unlock_fb_info(info);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index f47d50e..e4c3c8b 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -594,8 +594,8 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
 	t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
 	if (!t) {
 		pr_err("Time out on waiting resolution response\n");
-			ret = -ETIMEDOUT;
-			goto out;
+		ret = -ETIMEDOUT;
+		goto out;
 	}
 
 	if (msg->resolution_resp.resolution_count == 0) {
diff --git a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
index 0aeeaa1..9fde0e3 100644
--- a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
@@ -331,7 +331,7 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
 	u32 ulScale;
 	u32 ulLeft, ulRight;
 	u32 ulSrcLeft, ulSrcRight;
-	u32 ulScaleLeft, ulScaleRight;
+	u32 ulScaleLeft;
 	u32 ulhDecim;
 	u32 ulsVal;
 	u32 ulVertDecFactor;
@@ -470,7 +470,6 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
 		 * round down the pixel pos to the nearest 8 pixels.
 		 */
 		ulScaleLeft = ulSrcLeft;
-		ulScaleRight = ulSrcRight;
 
 		/* shift fxscale until it is in the range of the scaler */
 		ulhDecim = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 36cc718..570439b 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1376,6 +1376,12 @@ static struct video_board vbG200 = {
 	.accelID = FB_ACCEL_MATROX_MGAG200,
 	.lowlevel = &matrox_G100
 };
+static struct video_board vbG200eW = {
+	.maxvram = 0x800000,
+	.maxdisplayable = 0x800000,
+	.accelID = FB_ACCEL_MATROX_MGAG200,
+	.lowlevel = &matrox_G100
+};
 /* from doc it looks like that accelerator can draw only to low 16MB :-( Direct accesses & displaying are OK for
    whole 32MB */
 static struct video_board vbG400 = {
@@ -1494,6 +1500,13 @@ static struct board {
 		MGA_G200,
 		&vbG200,
 		"MGA-G200 (PCI)"},
+	{PCI_VENDOR_ID_MATROX,	0x0532,	0xFF,
+		0,			0,
+		DEVF_G200,
+		250000,
+		MGA_G200,
+		&vbG200eW,
+		"MGA-G200eW (PCI)"},
 	{PCI_VENDOR_ID_MATROX,	PCI_DEVICE_ID_MATROX_G200_AGP,	0xFF,
 		PCI_SS_VENDOR_ID_MATROX,	PCI_SS_ID_MATROX_GENERIC,
 		DEVF_G200,
@@ -2136,6 +2149,8 @@ static const struct pci_device_id matroxfb_devices[] = {
 		PCI_ANY_ID,	PCI_ANY_ID,	0, 0, 0},
 	{PCI_VENDOR_ID_MATROX,	PCI_DEVICE_ID_MATROX_G200_PCI,
 		PCI_ANY_ID,	PCI_ANY_ID,	0, 0, 0},
+	{PCI_VENDOR_ID_MATROX,	0x0532,
+		PCI_ANY_ID,	PCI_ANY_ID,	0, 0, 0},
 	{PCI_VENDOR_ID_MATROX,	PCI_DEVICE_ID_MATROX_G200_AGP,
 		PCI_ANY_ID,	PCI_ANY_ID,	0, 0, 0},
 	{PCI_VENDOR_ID_MATROX,	PCI_DEVICE_ID_MATROX_G400,
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 335d498..167585a 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
 
 	/*pathes*/
 	int path_num;
-	struct mmphw_path_plat path_plats[0];
+	struct mmphw_path_plat path_plats[];
 };
 
 static inline int overlay_is_vid(struct mmp_overlay *overlay)
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index c583c01..c24de91 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -168,27 +168,26 @@ static int nvidia_panel_tweak(struct nvidia_par *par,
 {
 	int tweak = 0;
 
-   if (par->paneltweak) {
-	   tweak = par->paneltweak;
-   } else {
-	   /* begin flat panel hacks */
-	   /* This is unfortunate, but some chips need this register
-	      tweaked or else you get artifacts where adjacent pixels are
-	      swapped.  There are no hard rules for what to set here so all
-	      we can do is experiment and apply hacks. */
+	if (par->paneltweak) {
+		tweak = par->paneltweak;
+	} else {
+		/* Begin flat panel hacks.
+		 * This is unfortunate, but some chips need this register
+		 * tweaked or else you get artifacts where adjacent pixels are
+		 * swapped.  There are no hard rules for what to set here so all
+		 * we can do is experiment and apply hacks.
+		 */
+		if (((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
+			/* At least one NV34 laptop needs this workaround. */
+			tweak = -1;
+		}
 
-	   if(((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
-		   /* At least one NV34 laptop needs this workaround. */
-		   tweak = -1;
-	   }
+		if ((par->Chipset & 0xfff0) == 0x0310)
+			tweak = 1;
+		/* end flat panel hacks */
+	}
 
-	   if((par->Chipset & 0xfff0) == 0x0310) {
-		   tweak = 1;
-	   }
-	   /* end flat panel hacks */
-   }
-
-   return tweak;
+	return tweak;
 }
 
 static void nvidia_screen_off(struct nvidia_par *par, int on)
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 8dfa915..836e7b1 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1154,16 +1154,12 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
 		   r = fbdev->ctrl->setcolreg(regno, red, green, blue,
 		   transp, update_hw_pal);
 		   */
-		/* Fallthrough */
 		r = -EINVAL;
 		break;
 	case OMAPFB_COLOR_RGB565:
 	case OMAPFB_COLOR_RGB444:
 	case OMAPFB_COLOR_RGB24P:
 	case OMAPFB_COLOR_RGB24U:
-		if (r != 0)
-			break;
-
 		if (regno < 16) {
 			u32 pal;
 			pal = ((red >> (16 - var->red.length)) <<
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 9b9ec14..aef8a30 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -769,7 +769,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
 	dma_free_wc(fbi->dev, info->fix.smem_len,
 		    info->screen_base, fbi->fb_start_dma);
 failed_free_info:
-	kfree(info);
+	framebuffer_release(info);
 
 	dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
 	return ret;
@@ -779,7 +779,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
 {
 	struct pxa168fb_info *fbi = platform_get_drvdata(pdev);
 	struct fb_info *info;
-	int irq;
 	unsigned int data;
 
 	if (!fbi)
@@ -799,8 +798,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
 	if (info->cmap.len)
 		fb_dealloc_cmap(&info->cmap);
 
-	irq = platform_get_irq(pdev, 0);
-
 	dma_free_wc(fbi->dev, info->fix.smem_len,
 		    info->screen_base, info->fix.smem_start);
 
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 8048499..eaea8c3 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -746,9 +746,9 @@ s1d13xxxfb_remove(struct platform_device *pdev)
 	}
 
 	release_mem_region(pdev->resource[0].start,
-			pdev->resource[0].end - pdev->resource[0].start +1);
+			   resource_size(&pdev->resource[0]));
 	release_mem_region(pdev->resource[1].start,
-			pdev->resource[1].end - pdev->resource[1].start +1);
+			   resource_size(&pdev->resource[1]));
 	return 0;
 }
 
@@ -788,14 +788,14 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
 	}
 
 	if (!request_mem_region(pdev->resource[0].start,
-		pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) {
+		resource_size(&pdev->resource[0]), "s1d13xxxfb mem")) {
 		dev_dbg(&pdev->dev, "request_mem_region failed\n");
 		ret = -EBUSY;
 		goto bail;
 	}
 
 	if (!request_mem_region(pdev->resource[1].start,
-		pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) {
+		resource_size(&pdev->resource[1]), "s1d13xxxfb regs")) {
 		dev_dbg(&pdev->dev, "request_mem_region failed\n");
 		ret = -EBUSY;
 		goto bail;
@@ -810,7 +810,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, info);
 	default_par = info->par;
 	default_par->regs = ioremap(pdev->resource[1].start,
-			pdev->resource[1].end - pdev->resource[1].start +1);
+				    resource_size(&pdev->resource[1]));
 	if (!default_par->regs) {
 		printk(KERN_ERR PFX "unable to map registers\n");
 		ret = -ENOMEM;
@@ -819,7 +819,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
 	info->pseudo_palette = default_par->pseudo_palette;
 
 	info->screen_base = ioremap(pdev->resource[0].start,
-			pdev->resource[0].end - pdev->resource[0].start +1);
+				    resource_size(&pdev->resource[0]));
 
 	if (!info->screen_base) {
 		printk(KERN_ERR PFX "unable to map framebuffer\n");
@@ -857,9 +857,9 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
 
 	info->fix = s1d13xxxfb_fix;
 	info->fix.mmio_start = pdev->resource[1].start;
-	info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
+	info->fix.mmio_len = resource_size(&pdev->resource[1]);
 	info->fix.smem_start = pdev->resource[0].start;
-	info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+	info->fix.smem_len = resource_size(&pdev->resource[0]);
 
 	printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
 	       default_par->regs, info->fix.smem_len / 1024, info->screen_base);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 5bb653d..2d285cc 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1053,7 +1053,7 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
 }
 
 /* Fake monspecs to fill in fbinfo structure */
-static struct fb_monspecs monspecs = {
+static const struct fb_monspecs monspecs = {
 	.hfmin	= 30000,
 	.hfmax	= 70000,
 	.vfmin	= 50,
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 4ea6f93..8a27d12 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1572,7 +1572,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
 	info->flags = FBINFO_FLAG_DEFAULT;
 	info->fbops = &sh_mobile_lcdc_overlay_ops;
 	info->device = priv->dev;
-	info->screen_base = ovl->fb_mem;
+	info->screen_buffer = ovl->fb_mem;
 	info->par = ovl;
 
 	/* Initialize fixed screen information. Restrict pan to 2 lines steps
@@ -2056,7 +2056,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
 	info->flags = FBINFO_FLAG_DEFAULT;
 	info->fbops = &sh_mobile_lcdc_ops;
 	info->device = priv->dev;
-	info->screen_base = ch->fb_mem;
+	info->screen_buffer = ch->fb_mem;
 	info->pseudo_palette = &ch->pseudo_palette;
 	info->par = ch;
 
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 1425352..12fa105 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -89,7 +89,7 @@ struct ssd1307fb_par {
 
 struct ssd1307fb_array {
 	u8	type;
-	u8	data[0];
+	u8	data[];
 };
 
 static const struct fb_fix_screeninfo ssd1307fb_fix = {
@@ -791,6 +791,8 @@ static int ssd1307fb_remove(struct i2c_client *client)
 		pwm_disable(par->pwm);
 		pwm_put(par->pwm);
 	}
+	if (par->vbat_reg)
+		regulator_disable(par->vbat_reg);
 	fb_deferred_io_cleanup(info);
 	__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
 	framebuffer_release(info);
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index ad26cbf..2d6e273 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -61,9 +61,9 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
 #define BITS_PER_PIXEL    16
 
 /* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
-static void *remapped_base;
-static void *remapped_regs;
-static void *remapped_fbuf;
+static void __iomem *remapped_base;
+static void __iomem *remapped_regs;
+static void __iomem *remapped_fbuf;
 
 #define REMAPPED_FB_LEN   0x15ffff
 
@@ -635,7 +635,7 @@ static int w100fb_resume(struct platform_device *dev)
 #endif
 
 
-int w100fb_probe(struct platform_device *pdev)
+static int w100fb_probe(struct platform_device *pdev)
 {
 	int err = -EIO;
 	struct w100fb_mach_info *inf;
@@ -807,10 +807,11 @@ static int w100fb_remove(struct platform_device *pdev)
 
 static void w100_soft_reset(void)
 {
-	u16 val = readw((u16 *) remapped_base + cfgSTATUS);
-	writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS);
+	u16 val = readw((u16 __iomem *)remapped_base + cfgSTATUS);
+
+	writew(val | 0x08, (u16 __iomem *)remapped_base + cfgSTATUS);
 	udelay(100);
-	writew(0x00, (u16 *) remapped_base + cfgSTATUS);
+	writew(0x00, (u16 __iomem *)remapped_base + cfgSTATUS);
 	udelay(100);
 }
 
@@ -1022,7 +1023,8 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
 			return pll_entry->pll_table;
 		pll_entry++;
 	} while (pll_entry->xtal_freq);
-	return 0;
+
+	return NULL;
 }
 
 
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index b656eff..8f4d674 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -339,7 +339,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
 
 	fbi->fb.fix.smem_start		= fb_mem_phys;
 	fbi->fb.fix.smem_len		= fb_mem_len;
-	fbi->fb.screen_base		= fb_mem_virt;
+	fbi->fb.screen_buffer		= fb_mem_virt;
 	fbi->fb.screen_size		= fb_mem_len;
 
 	fbi->contrast = 0x10;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 9c82e2a..856a8c4 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -53,18 +53,14 @@ static void hdmi_infoframe_set_checksum(void *buffer, size_t size)
 /**
  * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
  * @frame: HDMI AVI infoframe
- *
- * Returns 0 on success or a negative error code on failure.
  */
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
 {
 	memset(frame, 0, sizeof(*frame));
 
 	frame->type = HDMI_INFOFRAME_TYPE_AVI;
 	frame->version = 2;
 	frame->length = HDMI_AVI_INFOFRAME_SIZE;
-
-	return 0;
 }
 EXPORT_SYMBOL(hdmi_avi_infoframe_init);
 
@@ -1553,7 +1549,6 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
 				     const void *buffer, size_t size)
 {
 	const u8 *ptr = buffer;
-	int ret;
 
 	if (size < HDMI_INFOFRAME_SIZE(AVI))
 		return -EINVAL;
@@ -1566,9 +1561,7 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
 	if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0)
 		return -EINVAL;
 
-	ret = hdmi_avi_infoframe_init(frame);
-	if (ret)
-		return ret;
+	hdmi_avi_infoframe_init(frame);
 
 	ptr += HDMI_INFOFRAME_HEADER_SIZE;
 
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 9d4d5cc..0b34a12 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -129,6 +129,7 @@ struct dw_hdmi_plat_data {
 	unsigned long input_bus_format;
 	unsigned long input_bus_encoding;
 	bool use_drm_infoframe;
+	bool ycbcr_420_allowed;
 
 	/* Vendor PHY support */
 	const struct dw_hdmi_phy_ops *phy_ops;
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index 1cc77bf..d96626a 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -327,13 +327,13 @@ struct mhl_burst_bits_per_pixel_fmt {
 	struct {
 		u8 stream_id;
 		u8 pixel_format;
-	} __packed desc[0];
+	} __packed desc[];
 } __packed;
 
 struct mhl_burst_emsc_support {
 	struct mhl3_burst_header hdr;
 	u8 num_entries;
-	__be16 burst_id[0];
+	__be16 burst_id[];
 } __packed;
 
 struct mhl_burst_audio_descr {
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 951dfb1..7b6cb477 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -670,6 +670,9 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
 }
 
 int __must_check
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+			       struct drm_encoder *encoder);
+int __must_check
 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
 				   struct drm_crtc *crtc);
 int __must_check
@@ -992,4 +995,77 @@ drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
 	return state->active || state->self_refresh_active;
 }
 
+/**
+ * struct drm_bus_cfg - bus configuration
+ *
+ * This structure stores the configuration of a physical bus between two
+ * components in an output pipeline, usually between two bridges, an encoder
+ * and a bridge, or a bridge and a connector.
+ *
+ * The bus configuration is stored in &drm_bridge_state separately for the
+ * input and output buses, as seen from the point of view of each bridge. The
+ * bus configuration of a bridge output is usually identical to the
+ * configuration of the next bridge's input, but may differ if the signals are
+ * modified between the two bridges, for instance by an inverter on the board.
+ * The input and output configurations of a bridge may differ if the bridge
+ * modifies the signals internally, for instance by performing format
+ * conversion, or modifying signals polarities.
+ */
+struct drm_bus_cfg {
+	/**
+	 * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format)
+	 *
+	 * This field should not be directly modified by drivers
+	 * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus
+	 * format negotiation).
+	 */
+	u32 format;
+
+	/**
+	 * @flags: DRM_BUS_* flags used on this bus
+	 */
+	u32 flags;
+};
+
+/**
+ * struct drm_bridge_state - Atomic bridge state object
+ */
+struct drm_bridge_state {
+	/**
+	 * @base: inherit from &drm_private_state
+	 */
+	struct drm_private_state base;
+
+	/**
+	 * @bridge: the bridge this state refers to
+	 */
+	struct drm_bridge *bridge;
+
+	/**
+	 * @input_bus_cfg: input bus configuration
+	 */
+	struct drm_bus_cfg input_bus_cfg;
+
+	/**
+	 * @output_bus_cfg: input bus configuration
+	 */
+	struct drm_bus_cfg output_bus_cfg;
+};
+
+static inline struct drm_bridge_state *
+drm_priv_to_bridge_state(struct drm_private_state *priv)
+{
+	return container_of(priv, struct drm_bridge_state, base);
+}
+
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+			    struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+				struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+				struct drm_bridge *bridge);
+
 #endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9db3cac..b268180 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -224,4 +224,12 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state,
 	return old_plane_state->crtc && !new_plane_state->crtc;
 }
 
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					u32 output_fmt,
+					unsigned int *num_input_fmts);
+
 #endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 8171dea..3f8f1d6 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,8 @@
 
 #include <linux/types.h>
 
+struct drm_bridge;
+struct drm_bridge_state;
 struct drm_crtc;
 struct drm_crtc_state;
 struct drm_plane;
@@ -80,3 +82,14 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
 					  struct drm_connector_state *state);
 void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
 						     struct drm_private_state *state);
+
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+						struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge);
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+					    struct drm_bridge_state *state);
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+				      struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 694e153..ea2aa5e 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,15 +23,32 @@
 #ifndef __DRM_BRIDGE_H__
 #define __DRM_BRIDGE_H__
 
-#include <linux/list.h>
 #include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_atomic.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_mode_object.h>
 #include <drm/drm_modes.h>
 
 struct drm_bridge;
 struct drm_bridge_timings;
+struct drm_connector;
 struct drm_panel;
+struct edid;
+struct i2c_adapter;
+
+/**
+ * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach
+ */
+enum drm_bridge_attach_flags {
+	/**
+	 * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge
+	 * shall not create a drm_connector.
+	 */
+	DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0),
+};
 
 /**
  * struct drm_bridge_funcs - drm_bridge control functions
@@ -41,7 +58,8 @@ struct drm_bridge_funcs {
 	 * @attach:
 	 *
 	 * This callback is invoked whenever our bridge is being attached to a
-	 * &drm_encoder.
+	 * &drm_encoder. The flags argument tunes the behaviour of the attach
+	 * operation (see DRM_BRIDGE_ATTACH_*).
 	 *
 	 * The @attach callback is optional.
 	 *
@@ -49,7 +67,8 @@ struct drm_bridge_funcs {
 	 *
 	 * Zero on success, error code on failure.
 	 */
-	int (*attach)(struct drm_bridge *bridge);
+	int (*attach)(struct drm_bridge *bridge,
+		      enum drm_bridge_attach_flags flags);
 
 	/**
 	 * @detach:
@@ -109,7 +128,9 @@ struct drm_bridge_funcs {
 	 * this function passes all other callbacks must succeed for this
 	 * configuration.
 	 *
-	 * The @mode_fixup callback is optional.
+	 * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup()
+	 * is not called when &drm_bridge_funcs.atomic_check() is implemented,
+	 * so only one of them should be provided.
 	 *
 	 * NOTE:
 	 *
@@ -263,7 +284,7 @@ struct drm_bridge_funcs {
 	 * The @atomic_pre_enable callback is optional.
 	 */
 	void (*atomic_pre_enable)(struct drm_bridge *bridge,
-				  struct drm_atomic_state *old_state);
+				  struct drm_bridge_state *old_bridge_state);
 
 	/**
 	 * @atomic_enable:
@@ -288,7 +309,7 @@ struct drm_bridge_funcs {
 	 * The @atomic_enable callback is optional.
 	 */
 	void (*atomic_enable)(struct drm_bridge *bridge,
-			      struct drm_atomic_state *old_state);
+			      struct drm_bridge_state *old_bridge_state);
 	/**
 	 * @atomic_disable:
 	 *
@@ -311,7 +332,7 @@ struct drm_bridge_funcs {
 	 * The @atomic_disable callback is optional.
 	 */
 	void (*atomic_disable)(struct drm_bridge *bridge,
-			       struct drm_atomic_state *old_state);
+			       struct drm_bridge_state *old_bridge_state);
 
 	/**
 	 * @atomic_post_disable:
@@ -337,7 +358,275 @@ struct drm_bridge_funcs {
 	 * The @atomic_post_disable callback is optional.
 	 */
 	void (*atomic_post_disable)(struct drm_bridge *bridge,
-				    struct drm_atomic_state *old_state);
+				    struct drm_bridge_state *old_bridge_state);
+
+	/**
+	 * @atomic_duplicate_state:
+	 *
+	 * Duplicate the current bridge state object (which is guaranteed to be
+	 * non-NULL).
+	 *
+	 * The atomic_duplicate_state hook is mandatory if the bridge
+	 * implements any of the atomic hooks, and should be left unassigned
+	 * otherwise. For bridges that don't subclass &drm_bridge_state, the
+	 * drm_atomic_helper_bridge_duplicate_state() helper function shall be
+	 * used to implement this hook.
+	 *
+	 * RETURNS:
+	 * A valid drm_bridge_state object or NULL if the allocation fails.
+	 */
+	struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge);
+
+	/**
+	 * @atomic_destroy_state:
+	 *
+	 * Destroy a bridge state object previously allocated by
+	 * &drm_bridge_funcs.atomic_duplicate_state().
+	 *
+	 * The atomic_destroy_state hook is mandatory if the bridge implements
+	 * any of the atomic hooks, and should be left unassigned otherwise.
+	 * For bridges that don't subclass &drm_bridge_state, the
+	 * drm_atomic_helper_bridge_destroy_state() helper function shall be
+	 * used to implement this hook.
+	 */
+	void (*atomic_destroy_state)(struct drm_bridge *bridge,
+				     struct drm_bridge_state *state);
+
+	/**
+	 * @atomic_get_output_bus_fmts:
+	 *
+	 * Return the supported bus formats on the output end of a bridge.
+	 * The returned array must be allocated with kmalloc() and will be
+	 * freed by the caller. If the allocation fails, NULL should be
+	 * returned. num_output_fmts must be set to the returned array size.
+	 * Formats listed in the returned array should be listed in decreasing
+	 * preference order (the core will try all formats until it finds one
+	 * that works).
+	 *
+	 * This method is only called on the last element of the bridge chain
+	 * as part of the bus format negotiation process that happens in
+	 * &drm_atomic_bridge_chain_select_bus_fmts().
+	 * This method is optional. When not implemented, the core will
+	 * fall back to &drm_connector.display_info.bus_formats[0] if
+	 * &drm_connector.display_info.num_bus_formats > 0,
+	 * or to MEDIA_BUS_FMT_FIXED otherwise.
+	 */
+	u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge,
+					   struct drm_bridge_state *bridge_state,
+					   struct drm_crtc_state *crtc_state,
+					   struct drm_connector_state *conn_state,
+					   unsigned int *num_output_fmts);
+
+	/**
+	 * @atomic_get_input_bus_fmts:
+	 *
+	 * Return the supported bus formats on the input end of a bridge for
+	 * a specific output bus format.
+	 *
+	 * The returned array must be allocated with kmalloc() and will be
+	 * freed by the caller. If the allocation fails, NULL should be
+	 * returned. num_output_fmts must be set to the returned array size.
+	 * Formats listed in the returned array should be listed in decreasing
+	 * preference order (the core will try all formats until it finds one
+	 * that works). When the format is not supported NULL should be
+	 * returned and num_output_fmts should be set to 0.
+	 *
+	 * This method is called on all elements of the bridge chain as part of
+	 * the bus format negotiation process that happens in
+	 * drm_atomic_bridge_chain_select_bus_fmts().
+	 * This method is optional. When not implemented, the core will bypass
+	 * bus format negotiation on this element of the bridge without
+	 * failing, and the previous element in the chain will be passed
+	 * MEDIA_BUS_FMT_FIXED as its output bus format.
+	 *
+	 * Bridge drivers that need to support being linked to bridges that are
+	 * not supporting bus format negotiation should handle the
+	 * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a
+	 * sensible default value or extracting this information from somewhere
+	 * else (FW property, &drm_display_mode, &drm_display_info, ...)
+	 *
+	 * Note: Even if input format selection on the first bridge has no
+	 * impact on the negotiation process (bus format negotiation stops once
+	 * we reach the first element of the chain), drivers are expected to
+	 * return accurate input formats as the input format may be used to
+	 * configure the CRTC output appropriately.
+	 */
+	u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge,
+					  struct drm_bridge_state *bridge_state,
+					  struct drm_crtc_state *crtc_state,
+					  struct drm_connector_state *conn_state,
+					  u32 output_fmt,
+					  unsigned int *num_input_fmts);
+
+	/**
+	 * @atomic_check:
+	 *
+	 * This method is responsible for checking bridge state correctness.
+	 * It can also check the state of the surrounding components in chain
+	 * to make sure the whole pipeline can work properly.
+	 *
+	 * &drm_bridge_funcs.atomic_check() hooks are called in reverse
+	 * order (from the last to the first bridge).
+	 *
+	 * This method is optional. &drm_bridge_funcs.mode_fixup() is not
+	 * called when &drm_bridge_funcs.atomic_check() is implemented, so only
+	 * one of them should be provided.
+	 *
+	 * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or
+	 * &drm_bridge_state.output_bus_cfg.flags it should should happen in
+	 * this function. By default the &drm_bridge_state.output_bus_cfg.flags
+	 * field is set to the next bridge
+	 * &drm_bridge_state.input_bus_cfg.flags value or
+	 * &drm_connector.display_info.bus_flags if the bridge is the last
+	 * element in the chain.
+	 *
+	 * RETURNS:
+	 * zero if the check passed, a negative error code otherwise.
+	 */
+	int (*atomic_check)(struct drm_bridge *bridge,
+			    struct drm_bridge_state *bridge_state,
+			    struct drm_crtc_state *crtc_state,
+			    struct drm_connector_state *conn_state);
+
+	/**
+	 * @atomic_reset:
+	 *
+	 * Reset the bridge to a predefined state (or retrieve its current
+	 * state) and return a &drm_bridge_state object matching this state.
+	 * This function is called at attach time.
+	 *
+	 * The atomic_reset hook is mandatory if the bridge implements any of
+	 * the atomic hooks, and should be left unassigned otherwise. For
+	 * bridges that don't subclass &drm_bridge_state, the
+	 * drm_atomic_helper_bridge_reset() helper function shall be used to
+	 * implement this hook.
+	 *
+	 * Note that the atomic_reset() semantics is not exactly matching the
+	 * reset() semantics found on other components (connector, plane, ...).
+	 *
+	 * 1. The reset operation happens when the bridge is attached, not when
+	 *    drm_mode_config_reset() is called
+	 * 2. It's meant to be used exclusively on bridges that have been
+	 *    converted to the ATOMIC API
+	 *
+	 * RETURNS:
+	 * A valid drm_bridge_state object in case of success, an ERR_PTR()
+	 * giving the reason of the failure otherwise.
+	 */
+	struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge);
+
+	/**
+	 * @detect:
+	 *
+	 * Check if anything is attached to the bridge output.
+	 *
+	 * This callback is optional, if not implemented the bridge will be
+	 * considered as always having a component attached to its output.
+	 * Bridges that implement this callback shall set the
+	 * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops.
+	 *
+	 * RETURNS:
+	 *
+	 * drm_connector_status indicating the bridge output status.
+	 */
+	enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+
+	/**
+	 * @get_modes:
+	 *
+	 * Fill all modes currently valid for the sink into the &drm_connector
+	 * with drm_mode_probed_add().
+	 *
+	 * The @get_modes callback is mostly intended to support non-probeable
+	 * displays such as many fixed panels. Bridges that support reading
+	 * EDID shall leave @get_modes unimplemented and implement the
+	 * &drm_bridge_funcs->get_edid callback instead.
+	 *
+	 * This callback is optional. Bridges that implement it shall set the
+	 * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
+	 *
+	 * The connector parameter shall be used for the sole purpose of
+	 * filling modes, and shall not be stored internally by bridge drivers
+	 * for future usage.
+	 *
+	 * RETURNS:
+	 *
+	 * The number of modes added by calling drm_mode_probed_add().
+	 */
+	int (*get_modes)(struct drm_bridge *bridge,
+			 struct drm_connector *connector);
+
+	/**
+	 * @get_edid:
+	 *
+	 * Read and parse the EDID data of the connected display.
+	 *
+	 * The @get_edid callback is the preferred way of reporting mode
+	 * information for a display connected to the bridge output. Bridges
+	 * that support reading EDID shall implement this callback and leave
+	 * the @get_modes callback unimplemented.
+	 *
+	 * The caller of this operation shall first verify the output
+	 * connection status and refrain from reading EDID from a disconnected
+	 * output.
+	 *
+	 * This callback is optional. Bridges that implement it shall set the
+	 * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
+	 *
+	 * The connector parameter shall be used for the sole purpose of EDID
+	 * retrieval and parsing, and shall not be stored internally by bridge
+	 * drivers for future usage.
+	 *
+	 * RETURNS:
+	 *
+	 * An edid structure newly allocated with kmalloc() (or similar) on
+	 * success, or NULL otherwise. The caller is responsible for freeing
+	 * the returned edid structure with kfree().
+	 */
+	struct edid *(*get_edid)(struct drm_bridge *bridge,
+				 struct drm_connector *connector);
+
+	/**
+	 * @hpd_notify:
+	 *
+	 * Notify the bridge of hot plug detection.
+	 *
+	 * This callback is optional, it may be implemented by bridges that
+	 * need to be notified of display connection or disconnection for
+	 * internal reasons. One use case is to reset the internal state of CEC
+	 * controllers for HDMI bridges.
+	 */
+	void (*hpd_notify)(struct drm_bridge *bridge,
+			   enum drm_connector_status status);
+
+	/**
+	 * @hpd_enable:
+	 *
+	 * Enable hot plug detection. From now on the bridge shall call
+	 * drm_bridge_hpd_notify() each time a change is detected in the output
+	 * connection status, until hot plug detection gets disabled with
+	 * @hpd_disable.
+	 *
+	 * This callback is optional and shall only be implemented by bridges
+	 * that support hot-plug notification without polling. Bridges that
+	 * implement it shall also implement the @hpd_disable callback and set
+	 * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+	 */
+	void (*hpd_enable)(struct drm_bridge *bridge);
+
+	/**
+	 * @hpd_disable:
+	 *
+	 * Disable hot plug detection. Once this function returns the bridge
+	 * shall not call drm_bridge_hpd_notify() when a change in the output
+	 * connection status occurs.
+	 *
+	 * This callback is optional and shall only be implemented by bridges
+	 * that support hot-plug notification without polling. Bridges that
+	 * implement it shall also implement the @hpd_enable callback and set
+	 * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+	 */
+	void (*hpd_disable)(struct drm_bridge *bridge);
 };
 
 /**
@@ -377,9 +666,44 @@ struct drm_bridge_timings {
 };
 
 /**
+ * enum drm_bridge_ops - Bitmask of operations supported by the bridge
+ */
+enum drm_bridge_ops {
+	/**
+	 * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to
+	 * its output. Bridges that set this flag shall implement the
+	 * &drm_bridge_funcs->detect callback.
+	 */
+	DRM_BRIDGE_OP_DETECT = BIT(0),
+	/**
+	 * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
+	 * connected to its output. Bridges that set this flag shall implement
+	 * the &drm_bridge_funcs->get_edid callback.
+	 */
+	DRM_BRIDGE_OP_EDID = BIT(1),
+	/**
+	 * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug
+	 * without requiring polling. Bridges that set this flag shall
+	 * implement the &drm_bridge_funcs->hpd_enable and
+	 * &drm_bridge_funcs->hpd_disable callbacks if they support enabling
+	 * and disabling hot-plug detection dynamically.
+	 */
+	DRM_BRIDGE_OP_HPD = BIT(2),
+	/**
+	 * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported
+	 * by the display at its output. This does not include reading EDID
+	 * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set
+	 * this flag shall implement the &drm_bridge_funcs->get_modes callback.
+	 */
+	DRM_BRIDGE_OP_MODES = BIT(3),
+};
+
+/**
  * struct drm_bridge - central DRM bridge control structure
  */
 struct drm_bridge {
+	/** @base: inherit from &drm_private_object */
+	struct drm_private_obj base;
 	/** @dev: DRM device this bridge belongs to */
 	struct drm_device *dev;
 	/** @encoder: encoder to which this bridge is connected */
@@ -402,13 +726,52 @@ struct drm_bridge {
 	const struct drm_bridge_funcs *funcs;
 	/** @driver_private: pointer to the bridge driver's internal context */
 	void *driver_private;
+	/** @ops: bitmask of operations supported by the bridge */
+	enum drm_bridge_ops ops;
+	/**
+	 * @type: Type of the connection at the bridge output
+	 * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this
+	 * identifies the type of connected display.
+	 */
+	int type;
+	/**
+	 * @interlace_allowed: Indicate that the bridge can handle interlaced
+	 * modes.
+	 */
+	bool interlace_allowed;
+	/**
+	 * @ddc: Associated I2C adapter for DDC access, if any.
+	 */
+	struct i2c_adapter *ddc;
+	/** private: */
+	/**
+	 * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
+	 */
+	struct mutex hpd_mutex;
+	/**
+	 * @hpd_cb: Hot plug detection callback, registered with
+	 * drm_bridge_hpd_enable().
+	 */
+	void (*hpd_cb)(void *data, enum drm_connector_status status);
+	/**
+	 * @hpd_data: Private data passed to the Hot plug detection callback
+	 * @hpd_cb.
+	 */
+	void *hpd_data;
 };
 
+static inline struct drm_bridge *
+drm_priv_to_bridge(struct drm_private_obj *priv)
+{
+	return container_of(priv, struct drm_bridge, base);
+}
+
 void drm_bridge_add(struct drm_bridge *bridge);
 void drm_bridge_remove(struct drm_bridge *bridge);
 struct drm_bridge *of_drm_find_bridge(struct device_node *np);
 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
-		      struct drm_bridge *previous);
+		      struct drm_bridge *previous,
+		      enum drm_bridge_attach_flags flags);
 
 /**
  * drm_bridge_get_next_bridge() - Get the next bridge in the chain
@@ -482,6 +845,9 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
 void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
 void drm_bridge_chain_enable(struct drm_bridge *bridge);
 
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+				  struct drm_crtc_state *crtc_state,
+				  struct drm_connector_state *conn_state);
 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
 				     struct drm_atomic_state *state);
 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@@ -491,6 +857,27 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
 				    struct drm_atomic_state *state);
 
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+					struct drm_bridge_state *bridge_state,
+					struct drm_crtc_state *crtc_state,
+					struct drm_connector_state *conn_state,
+					u32 output_fmt,
+					unsigned int *num_input_fmts);
+
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+			 struct drm_connector *connector);
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+				 struct drm_connector *connector);
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+			   void (*cb)(void *data,
+				      enum drm_connector_status status),
+			   void *data);
+void drm_bridge_hpd_disable(struct drm_bridge *bridge);
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+			   enum drm_connector_status status);
+
 #ifdef CONFIG_DRM_PANEL_BRIDGE
 struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
 struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
new file mode 100644
index 0000000..33f6c3b
--- /dev/null
+++ b/include/drm/drm_bridge_connector.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DRM_BRIDGE_CONNECTOR_H__
+#define __DRM_BRIDGE_CONNECTOR_H__
+
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+						struct drm_encoder *encoder);
+
+#endif /* __DRM_BRIDGE_CONNECTOR_H__ */
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 5cf2c5d..3ed5dee 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -44,6 +44,11 @@ struct drm_client_funcs {
 	 * returns zero gets the privilege to restore and no more clients are
 	 * called. This callback is not called after @unregister has been called.
 	 *
+	 * Note that the core does not guarantee exclusion against concurrent
+	 * drm_open(). Clients need to ensure this themselves, for example by
+	 * using drm_master_internal_acquire() and
+	 * drm_master_internal_release().
+	 *
 	 * This callback is optional.
 	 */
 	int (*restore)(struct drm_client_dev *client);
@@ -156,7 +161,7 @@ int drm_client_modeset_create(struct drm_client_dev *client);
 void drm_client_modeset_free(struct drm_client_dev *client);
 int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
 bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
-int drm_client_modeset_commit_force(struct drm_client_dev *client);
+int drm_client_modeset_commit_locked(struct drm_client_dev *client);
 int drm_client_modeset_commit(struct drm_client_dev *client);
 int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
 
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 2219109..19ae6bb 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -254,6 +254,23 @@ enum drm_panel_orientation {
 	DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
 };
 
+/**
+ * struct drm_monitor_range_info - Panel's Monitor range in EDID for
+ * &drm_display_info
+ *
+ * This struct is used to store a frequency range supported by panel
+ * as parsed from EDID's detailed monitor range descriptor block.
+ *
+ * @min_vfreq: This is the min supported refresh rate in Hz from
+ *             EDID's detailed monitor range.
+ * @max_vfreq: This is the max supported refresh rate in Hz from
+ *             EDID's detailed monitor range
+ */
+struct drm_monitor_range_info {
+	u8 min_vfreq;
+	u8 max_vfreq;
+};
+
 /*
  * This is a consolidated colorimetry list supported by HDMI and
  * DP protocol standard. The respective connectors will register
@@ -435,6 +452,14 @@ struct drm_display_info {
 	bool dvi_dual;
 
 	/**
+	 * @is_hdmi: True if the sink is an HDMI device.
+	 *
+	 * This field shall be used instead of calling
+	 * drm_detect_hdmi_monitor() when possible.
+	 */
+	bool is_hdmi;
+
+	/**
 	 * @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
 	 */
 	bool has_hdmi_infoframe;
@@ -465,6 +490,11 @@ struct drm_display_info {
 	 * @non_desktop: Non desktop display (HMD).
 	 */
 	bool non_desktop;
+
+	/**
+	 * @monitor_range: Frequency range supported by monitor range descriptor
+	 */
+	struct drm_monitor_range_info monitor_range;
 };
 
 int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -1357,6 +1387,12 @@ struct drm_connector {
 	 * rev1.1 4.2.2.6
 	 */
 	bool edid_corrupt;
+	/**
+	 * @real_edid_checksum: real edid checksum for corrupted edid block.
+	 * Required in Displayport 1.4 compliance testing
+	 * rev1.1 4.2.2.6
+	 */
+	u8 real_edid_checksum;
 
 	/** @debugfs_entry: debugfs directory for this connector */
 	struct dentry *debugfs_entry;
@@ -1512,6 +1548,7 @@ drm_connector_is_unregistered(struct drm_connector *connector)
 		DRM_CONNECTOR_UNREGISTERED;
 }
 
+const char *drm_get_connector_type_name(unsigned int connector_type);
 const char *drm_get_connector_status_name(enum drm_connector_status status);
 const char *drm_get_subpixel_order_name(enum subpixel_order order);
 const char *drm_get_dpms_name(int val);
@@ -1552,8 +1589,13 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
 					    uint64_t link_status);
 void drm_connector_set_vrr_capable_property(
 		struct drm_connector *connector, bool capable);
-int drm_connector_init_panel_orientation_property(
-	struct drm_connector *connector, int width, int height);
+int drm_connector_set_panel_orientation(
+	struct drm_connector *connector,
+	enum drm_panel_orientation panel_orientation);
+int drm_connector_set_panel_orientation_with_quirk(
+	struct drm_connector *connector,
+	enum drm_panel_orientation panel_orientation,
+	int width, int height);
 int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
 					  int min, int max);
 
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5e9b15a..59b51a0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -174,12 +174,25 @@ struct drm_crtc_state {
 	 * @no_vblank:
 	 *
 	 * Reflects the ability of a CRTC to send VBLANK events. This state
-	 * usually depends on the pipeline configuration, and the main usuage
-	 * is CRTCs feeding a writeback connector operating in oneshot mode.
-	 * In this case the VBLANK event is only generated when a job is queued
-	 * to the writeback connector, and we want the core to fake VBLANK
-	 * events when this part of the pipeline hasn't changed but others had
-	 * or when the CRTC and connectors are being disabled.
+	 * usually depends on the pipeline configuration. If set to true, DRM
+	 * atomic helpers will send out a fake VBLANK event during display
+	 * updates after all hardware changes have been committed. This is
+	 * implemented in drm_atomic_helper_fake_vblank().
+	 *
+	 * One usage is for drivers and/or hardware without support for VBLANK
+	 * interrupts. Such drivers typically do not initialize vblanking
+	 * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs
+	 * without initialized vblanking, this field is set to true in
+	 * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be
+	 * send out on each update of the display pipeline by
+	 * drm_atomic_helper_fake_vblank().
+	 *
+	 * Another usage is CRTCs feeding a writeback connector operating in
+	 * oneshot mode. In this case the fake VBLANK event is only generated
+	 * when a job is queued to the writeback connector, and we want the
+	 * core to fake VBLANK events when this part of the pipeline hasn't
+	 * changed but others had or when the CRTC and connectors are being
+	 * disabled.
 	 *
 	 * __drm_atomic_helper_crtc_duplicate_state() will not reset the value
 	 * from the current state, the CRTC driver is then responsible for
@@ -335,7 +348,14 @@ struct drm_crtc_state {
 	 *  - Events for disabled CRTCs are not allowed, and drivers can ignore
 	 *    that case.
 	 *
-	 * This can be handled by the drm_crtc_send_vblank_event() function,
+	 * For very simple hardware without VBLANK interrupt, enabling
+	 * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers
+	 * send a fake VBLANK event at the end of the display update after all
+	 * hardware changes have been applied. See
+	 * drm_atomic_helper_fake_vblank().
+	 *
+	 * For more complex hardware this
+	 * can be handled by the drm_crtc_send_vblank_event() function,
 	 * which the driver should call on the provided event upon completion of
 	 * the atomic commit. Note that if the driver supports vblank signalling
 	 * and timestamping the vblank counters and timestamps must agree with
@@ -867,6 +887,47 @@ struct drm_crtc_funcs {
 	 * new drivers as the replacement of &drm_driver.disable_vblank hook.
 	 */
 	void (*disable_vblank)(struct drm_crtc *crtc);
+
+	/**
+	 * @get_vblank_timestamp:
+	 *
+	 * Called by drm_get_last_vbltimestamp(). Should return a precise
+	 * timestamp when the most recent vblank interval ended or will end.
+	 *
+	 * Specifically, the timestamp in @vblank_time should correspond as
+	 * closely as possible to the time when the first video scanline of
+	 * the video frame after the end of vblank will start scanning out,
+	 * the time immediately after end of the vblank interval. If the
+	 * @crtc is currently inside vblank, this will be a time in the future.
+	 * If the @crtc is currently scanning out a frame, this will be the
+	 * past start time of the current scanout. This is meant to adhere
+	 * to the OpenML OML_sync_control extension specification.
+	 *
+	 * Parameters:
+	 *
+	 * crtc:
+	 *     CRTC for which timestamp should be returned.
+	 * max_error:
+	 *     Maximum allowable timestamp error in nanoseconds.
+	 *     Implementation should strive to provide timestamp
+	 *     with an error of at most max_error nanoseconds.
+	 *     Returns true upper bound on error for timestamp.
+	 * vblank_time:
+	 *     Target location for returned vblank timestamp.
+	 * in_vblank_irq:
+	 *     True when called from drm_crtc_handle_vblank().  Some drivers
+	 *     need to apply some workarounds for gpu-specific vblank irq quirks
+	 *     if flag is set.
+	 *
+	 * Returns:
+	 *
+	 * True on success, false on failure, which means the core should
+	 * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
+	 */
+	bool (*get_vblank_timestamp)(struct drm_crtc *crtc,
+				     int *max_error,
+				     ktime_t *vblank_time,
+				     bool in_vblank_irq);
 };
 
 /**
@@ -974,11 +1035,12 @@ struct drm_crtc {
 	 * Programmed mode in hw, after adjustments for encoders, crtc, panel
 	 * scaling etc. Should only be used by legacy drivers, for high
 	 * precision vblank timestamps in
-	 * drm_calc_vbltimestamp_from_scanoutpos().
+	 * drm_crtc_vblank_helper_get_vblank_timestamp().
 	 *
 	 * Note that atomic drivers should not use this, but instead use
 	 * &drm_crtc_state.adjusted_mode. And for high-precision timestamps
-	 * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+	 * drm_crtc_vblank_helper_get_vblank_timestamp() used
+	 * &drm_vblank_crtc.hwmode,
 	 * which is filled out by calling drm_calc_timestamping_constants().
 	 */
 	struct drm_display_mode hwmode;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 1acfc3b..bb60a94 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -144,7 +144,7 @@ struct drm_device {
 	 * Usage counter for outstanding files open,
 	 * protected by drm_global_mutex
 	 */
-	int open_count;
+	atomic_t open_count;
 
 	/** @filelist_mutex: Protects @filelist. */
 	struct mutex filelist_mutex;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index bc04467..c6119e4 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -392,6 +392,8 @@
 # define DP_DS_12BPC		            2
 # define DP_DS_16BPC		            3
 
+#define DP_MAX_DOWNSTREAM_PORTS		    0x10
+
 /* DP Forward error Correction Registers */
 #define DP_FEC_CAPABILITY		    0x090    /* 1.4 */
 # define DP_FEC_CAPABLE			    (1 << 0)
@@ -1457,6 +1459,9 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
 int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
 				 u8 status[DP_LINK_STATUS_SIZE]);
 
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+				    u8 real_edid_checksum);
+
 int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 				const u8 port_cap[4]);
 int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -1493,13 +1498,16 @@ struct drm_dp_desc {
 
 int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
 		     bool is_branch);
+u32 drm_dp_get_edid_quirks(const struct edid *edid);
 
 /**
  * enum drm_dp_quirk - Display Port sink/branch device specific quirks
  *
  * Display Port sink and branch devices in the wild have a variety of bugs, try
  * to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them.
+ * implement workarounds for them. Note that because some devices have
+ * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
+ * drm_dp_get_edid_quirks().
  */
 enum drm_dp_quirk {
 	/**
@@ -1530,19 +1538,31 @@ enum drm_dp_quirk {
 	 * The DSC caps can be read from the physical aux instead.
 	 */
 	DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+	/**
+	 * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
+	 *
+	 * The device is telling the truth when it says that it uses DPCD
+	 * backlight controls, even if the system's firmware disagrees. This
+	 * quirk should be checked against both the ident and panel EDID.
+	 * When present, the driver should honor the DPCD backlight
+	 * capabilities advertised.
+	 */
+	DP_QUIRK_FORCE_DPCD_BACKLIGHT,
 };
 
 /**
  * drm_dp_has_quirk() - does the DP device have a specific quirk
  * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
  * @quirk: Quirk to query for
  *
  * Return true if DP device identified by @desc has @quirk.
  */
 static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
+		 enum drm_dp_quirk quirk)
 {
-	return desc->quirks & BIT(quirk);
+	return (desc->quirks | edid_quirks) & BIT(quirk);
 }
 
 #ifdef CONFIG_DRM_DP_CEC
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 41725d8..3cde42b 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -479,7 +479,6 @@ struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
 	/* create a connector for a port */
 	struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
-	void (*register_connector)(struct drm_connector *connector);
 	void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
 				  struct drm_connector *connector);
 };
@@ -591,6 +590,11 @@ struct drm_dp_mst_topology_mgr {
 	bool payload_id_table_cleared : 1;
 
 	/**
+	 * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
+	 */
+	bool is_waiting_for_dwn_reply : 1;
+
+	/**
 	 * @mst_primary: Pointer to the primary/first branch device.
 	 */
 	struct drm_dp_mst_branch *mst_primary;
@@ -620,11 +624,6 @@ struct drm_dp_mst_topology_mgr {
 	struct mutex qlock;
 
 	/**
-	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
-	 */
-	bool is_waiting_for_dwn_reply;
-
-	/**
 	 * @tx_msg_downq: List of pending down replies.
 	 */
 	struct list_head tx_msg_downq;
@@ -635,11 +634,13 @@ struct drm_dp_mst_topology_mgr {
 	struct mutex payload_lock;
 	/**
 	 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
-	 * VCPI structure itself is &drm_dp_mst_port.vcpi.
+	 * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
+	 * this array is determined by @max_payloads.
 	 */
 	struct drm_dp_vcpi **proposed_vcpis;
 	/**
-	 * @payloads: Array of payloads.
+	 * @payloads: Array of payloads. The size of this array is determined
+	 * by @max_payloads.
 	 */
 	struct drm_dp_payload *payloads;
 	/**
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cf13470..97109df 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -269,159 +269,6 @@ struct drm_driver {
 	void (*release) (struct drm_device *);
 
 	/**
-	 * @get_vblank_counter:
-	 *
-	 * Driver callback for fetching a raw hardware vblank counter for the
-	 * CRTC specified with the pipe argument.  If a device doesn't have a
-	 * hardware counter, the driver can simply leave the hook as NULL.
-	 * The DRM core will account for missed vblank events while interrupts
-	 * where disabled based on system timestamps.
-	 *
-	 * Wraparound handling and loss of events due to modesetting is dealt
-	 * with in the DRM core code, as long as drivers call
-	 * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
-	 * enabling a CRTC.
-	 *
-	 * This is deprecated and should not be used by new drivers.
-	 * Use &drm_crtc_funcs.get_vblank_counter instead.
-	 *
-	 * Returns:
-	 *
-	 * Raw vblank counter value.
-	 */
-	u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
-	/**
-	 * @enable_vblank:
-	 *
-	 * Enable vblank interrupts for the CRTC specified with the pipe
-	 * argument.
-	 *
-	 * This is deprecated and should not be used by new drivers.
-	 * Use &drm_crtc_funcs.enable_vblank instead.
-	 *
-	 * Returns:
-	 *
-	 * Zero on success, appropriate errno if the given @crtc's vblank
-	 * interrupt cannot be enabled.
-	 */
-	int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
-	/**
-	 * @disable_vblank:
-	 *
-	 * Disable vblank interrupts for the CRTC specified with the pipe
-	 * argument.
-	 *
-	 * This is deprecated and should not be used by new drivers.
-	 * Use &drm_crtc_funcs.disable_vblank instead.
-	 */
-	void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
-	/**
-	 * @get_scanout_position:
-	 *
-	 * Called by vblank timestamping code.
-	 *
-	 * Returns the current display scanout position from a crtc, and an
-	 * optional accurate ktime_get() timestamp of when position was
-	 * measured. Note that this is a helper callback which is only used if a
-	 * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
-	 * @get_vblank_timestamp callback.
-	 *
-	 * Parameters:
-	 *
-	 * dev:
-	 *     DRM device.
-	 * pipe:
-	 *     Id of the crtc to query.
-	 * in_vblank_irq:
-	 *     True when called from drm_crtc_handle_vblank().  Some drivers
-	 *     need to apply some workarounds for gpu-specific vblank irq quirks
-	 *     if flag is set.
-	 * vpos:
-	 *     Target location for current vertical scanout position.
-	 * hpos:
-	 *     Target location for current horizontal scanout position.
-	 * stime:
-	 *     Target location for timestamp taken immediately before
-	 *     scanout position query. Can be NULL to skip timestamp.
-	 * etime:
-	 *     Target location for timestamp taken immediately after
-	 *     scanout position query. Can be NULL to skip timestamp.
-	 * mode:
-	 *     Current display timings.
-	 *
-	 * Returns vpos as a positive number while in active scanout area.
-	 * Returns vpos as a negative number inside vblank, counting the number
-	 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
-	 * until start of active scanout / end of vblank."
-	 *
-	 * Returns:
-	 *
-	 * True on success, false if a reliable scanout position counter could
-	 * not be read out.
-	 *
-	 * FIXME:
-	 *
-	 * Since this is a helper to implement @get_vblank_timestamp, we should
-	 * move it to &struct drm_crtc_helper_funcs, like all the other
-	 * helper-internal hooks.
-	 */
-	bool (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
-				      bool in_vblank_irq, int *vpos, int *hpos,
-				      ktime_t *stime, ktime_t *etime,
-				      const struct drm_display_mode *mode);
-
-	/**
-	 * @get_vblank_timestamp:
-	 *
-	 * Called by drm_get_last_vbltimestamp(). Should return a precise
-	 * timestamp when the most recent VBLANK interval ended or will end.
-	 *
-	 * Specifically, the timestamp in @vblank_time should correspond as
-	 * closely as possible to the time when the first video scanline of
-	 * the video frame after the end of VBLANK will start scanning out,
-	 * the time immediately after end of the VBLANK interval. If the
-	 * @crtc is currently inside VBLANK, this will be a time in the future.
-	 * If the @crtc is currently scanning out a frame, this will be the
-	 * past start time of the current scanout. This is meant to adhere
-	 * to the OpenML OML_sync_control extension specification.
-	 *
-	 * Paramters:
-	 *
-	 * dev:
-	 *     dev DRM device handle.
-	 * pipe:
-	 *     crtc for which timestamp should be returned.
-	 * max_error:
-	 *     Maximum allowable timestamp error in nanoseconds.
-	 *     Implementation should strive to provide timestamp
-	 *     with an error of at most max_error nanoseconds.
-	 *     Returns true upper bound on error for timestamp.
-	 * vblank_time:
-	 *     Target location for returned vblank timestamp.
-	 * in_vblank_irq:
-	 *     True when called from drm_crtc_handle_vblank().  Some drivers
-	 *     need to apply some workarounds for gpu-specific vblank irq quirks
-	 *     if flag is set.
-	 *
-	 * Returns:
-	 *
-	 * True on success, false on failure, which means the core should
-	 * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
-	 *
-	 * FIXME:
-	 *
-	 * We should move this hook to &struct drm_crtc_funcs like all the other
-	 * vblank hooks.
-	 */
-	bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
-				     int *max_error,
-				     ktime_t *vblank_time,
-				     bool in_vblank_irq);
-
-	/**
 	 * @irq_handler:
 	 *
 	 * Interrupt handler called when using drm_irq_install(). Not used by
@@ -458,20 +305,6 @@ struct drm_driver {
 	void (*irq_uninstall) (struct drm_device *dev);
 
 	/**
-	 * @master_create:
-	 *
-	 * Called whenever a new master is created. Only used by vmwgfx.
-	 */
-	int (*master_create)(struct drm_device *dev, struct drm_master *master);
-
-	/**
-	 * @master_destroy:
-	 *
-	 * Called whenever a master is destroyed. Only used by vmwgfx.
-	 */
-	void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
-
-	/**
 	 * @master_set:
 	 *
 	 * Called whenever the minor master is set. Only used by vmwgfx.
@@ -775,6 +608,9 @@ struct drm_driver {
 	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
 	int (*dma_quiescent) (struct drm_device *);
 	int (*context_dtor) (struct drm_device *dev, int context);
+	u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
+	int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
+	void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
 	int dev_priv_size;
 };
 
@@ -824,6 +660,25 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
 }
 
 /**
+ * drm_core_check_all_features - check driver feature flags mask
+ * @dev: DRM device to check
+ * @features: feature flag(s) mask
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features,
+ * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
+ *
+ * Returns true if all features in the @features mask are supported, false
+ * otherwise.
+ */
+static inline bool drm_core_check_all_features(const struct drm_device *dev,
+					       u32 features)
+{
+	u32 supported = dev->driver->driver_features & dev->driver_features;
+
+	return features && (supported & features) == features;
+}
+
+/**
  * drm_core_check_feature - check driver feature flags
  * @dev: DRM device to check
  * @feature: feature flag
@@ -833,9 +688,10 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
  *
  * Returns true if the @feature is supported, false otherwise.
  */
-static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature)
+static inline bool drm_core_check_feature(const struct drm_device *dev,
+					  enum drm_driver_feature feature)
 {
-	return dev->driver->driver_features & dev->driver_features & feature;
+	return drm_core_check_all_features(dev, feature);
 }
 
 /**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index f0b03d4..34b15e3 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -91,6 +91,11 @@ struct detailed_data_string {
 	u8 str[13];
 } __attribute__((packed));
 
+#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG   0x00
+#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG     0x01
+#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
+#define DRM_EDID_CVT_SUPPORT_FLAG           0x04
+
 struct detailed_data_monitor_range {
 	u8 min_vfreq;
 	u8 max_vfreq;
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5623994..4370e03 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -174,7 +174,8 @@ struct drm_encoder {
 	struct drm_crtc *crtc;
 
 	/**
-	 * @bridge_chain: Bridges attached to this encoder.
+	 * @bridge_chain: Bridges attached to this encoder. Drivers shall not
+	 * access this field directly.
 	 */
 	struct list_head bridge_chain;
 
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 1c6633d..208dbf8 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -213,8 +213,7 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
-int drm_fb_helper_init(struct drm_device *dev,
-		       struct drm_fb_helper *helper, int max_conn);
+int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
 void drm_fb_helper_fini(struct drm_fb_helper *helper);
 int drm_fb_helper_blank(int blank, struct fb_info *info);
 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
@@ -279,8 +278,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev,
 }
 
 static inline int drm_fb_helper_init(struct drm_device *dev,
-		       struct drm_fb_helper *helper,
-		       int max_conn)
+		       struct drm_fb_helper *helper)
 {
 	/* So drivers can use it to free the struct */
 	helper->dev = dev;
@@ -453,27 +451,6 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
 
 #endif
 
-/* TODO: There's a todo entry to remove these three */
-static inline int
-drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
-	return 0;
-}
-
-static inline int
-drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
-				struct drm_connector *connector)
-{
-	return 0;
-}
-
-static inline int
-drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
-				   struct drm_connector *connector)
-{
-	return 0;
-}
-
 /**
  * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
  * @a: memory range, users of which are to be removed
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 8b099b3..19df802 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -374,6 +374,7 @@ int drm_open(struct inode *inode, struct file *filp);
 ssize_t drm_read(struct file *filp, char __user *buffer,
 		 size_t count, loff_t *offset);
 int drm_release(struct inode *inode, struct file *filp);
+int drm_release_noglobal(struct inode *inode, struct file *filp);
 __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
 int drm_event_reserve_init_locked(struct drm_device *dev,
 				  struct drm_file *file_priv,
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 573e9fd..0f6e472 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -6,6 +6,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_modes.h>
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
@@ -205,4 +206,12 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm(
 	struct drm_device *dev, uint64_t vram_base, size_t vram_size);
 void drm_vram_helper_release_mm(struct drm_device *dev);
 
+/*
+ * Mode-config helpers
+ */
+
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+			   const struct drm_display_mode *mode);
+
 #endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 06a1120..c6bab49 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -276,7 +276,7 @@ void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
 #define DRM_HDCP_2_VRL_LENGTH_SIZE		3
 #define DRM_HDCP_2_DCP_SIG_SIZE			384
 #define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ	4
-#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte)	(((byte) & 0xC) >> 6)
+#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte)	(((byte) & 0xC0) >> 6)
 
 struct hdcp_srm_header {
 	u8 srm_id;
@@ -288,8 +288,8 @@ struct hdcp_srm_header {
 struct drm_device;
 struct drm_connector;
 
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
-				 u8 *ksvs, u32 ksv_count);
+int drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
+				u8 *ksvs, u32 ksv_count);
 int drm_connector_attach_content_protection_property(
 		struct drm_connector *connector, bool hdcp_content_type);
 void drm_hdcp_update_content_protection(struct drm_connector *connector,
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 5745710..dcef359 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -194,17 +194,11 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
 
 #ifdef CONFIG_PCI
 
-void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
 int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
 void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
 
 #else
 
-static inline void __drm_legacy_pci_free(struct drm_device *dev,
-					 drm_dma_handle_t *dmah)
-{
-}
-
 static inline int drm_legacy_pci_init(struct drm_driver *driver,
 				      struct pci_driver *pdriver)
 {
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 67c66f5..33f325f 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -110,6 +110,18 @@ struct mipi_dbi_dev {
 	unsigned int rotation;
 
 	/**
+	 * @left_offset: Horizontal offset of the display relative to the
+	 *               controller's driver array
+	 */
+	unsigned int left_offset;
+
+	/**
+	 * @top_offset: Vertical offset of the display relative to the
+	 *              controller's driver array
+	 */
+	unsigned int top_offset;
+
+	/**
 	 * @backlight: backlight device (optional)
 	 */
 	struct backlight_device *backlight;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index d7939c0..ee8b0e8 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -272,7 +272,7 @@ static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
  */
 static inline bool drm_mm_initialized(const struct drm_mm *mm)
 {
-	return mm->hole_stack.next;
+	return READ_ONCE(mm->hole_stack.next);
 }
 
 /**
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index e946e20..99134d4 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -371,20 +371,13 @@ struct drm_display_mode {
 	int crtc_vtotal;
 
 	/**
-	 * @private:
+	 * @private_flags:
 	 *
-	 * Pointer for driver private data. This can only be used for mode
+	 * Driver private flags. private_flags can only be used for mode
 	 * objects passed to drivers in modeset operations. It shouldn't be used
 	 * by atomic drivers since they can store any additional data by
 	 * subclassing state structures.
 	 */
-	int *private;
-
-	/**
-	 * @private_flags:
-	 *
-	 * Similar to @private, but just an integer.
-	 */
 	int private_flags;
 
 	/**
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 5a87f1b..7c20b1c 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -450,6 +450,53 @@ struct drm_crtc_helper_funcs {
 	 */
 	void (*atomic_disable)(struct drm_crtc *crtc,
 			       struct drm_crtc_state *old_crtc_state);
+
+	/**
+	 * @get_scanout_position:
+	 *
+	 * Called by vblank timestamping code.
+	 *
+	 * Returns the current display scanout position from a CRTC and an
+	 * optional accurate ktime_get() timestamp of when the position was
+	 * measured. Note that this is a helper callback which is only used
+	 * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp()
+	 * for the @drm_crtc_funcs.get_vblank_timestamp callback.
+	 *
+	 * Parameters:
+	 *
+	 * crtc:
+	 *     The CRTC.
+	 * in_vblank_irq:
+	 *     True when called from drm_crtc_handle_vblank(). Some drivers
+	 *     need to apply some workarounds for gpu-specific vblank irq
+	 *     quirks if the flag is set.
+	 * vpos:
+	 *     Target location for current vertical scanout position.
+	 * hpos:
+	 *     Target location for current horizontal scanout position.
+	 * stime:
+	 *     Target location for timestamp taken immediately before
+	 *     scanout position query. Can be NULL to skip timestamp.
+	 * etime:
+	 *     Target location for timestamp taken immediately after
+	 *     scanout position query. Can be NULL to skip timestamp.
+	 * mode:
+	 *     Current display timings.
+	 *
+	 * Returns vpos as a positive number while in active scanout area.
+	 * Returns vpos as a negative number inside vblank, counting the number
+	 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+	 * until start of active scanout / end of vblank."
+	 *
+	 * Returns:
+	 *
+	 * True on success, false if a reliable scanout position counter could
+	 * not be read out.
+	 */
+	bool (*get_scanout_position)(struct drm_crtc *crtc,
+				     bool in_vblank_irq, int *vpos, int *hpos,
+				     ktime_t *stime, ktime_t *etime,
+				     const struct drm_display_mode *mode);
 };
 
 /**
@@ -646,22 +693,6 @@ struct drm_encoder_helper_funcs {
 				struct drm_connector_state *conn_state);
 
 	/**
-	 * @get_crtc:
-	 *
-	 * This callback is used by the legacy CRTC helpers to work around
-	 * deficiencies in its own book-keeping.
-	 *
-	 * Do not use, use atomic helpers instead, which get the book keeping
-	 * right.
-	 *
-	 * FIXME:
-	 *
-	 * Currently only nouveau is using this, and as soon as nouveau is
-	 * atomic we can ditch this hook.
-	 */
-	struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
-
-	/**
 	 * @detect:
 	 *
 	 * This callback can be used by drivers who want to do detection on the
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 121f7aa..6193cb5 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -198,7 +198,8 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
 }
 #endif
 
-#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+	(IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
 int drm_panel_of_backlight(struct drm_panel *panel);
 #else
 static inline int drm_panel_of_backlight(struct drm_panel *panel)
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 9031e21..3941b02 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -45,10 +45,6 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
 				     size_t align);
 void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
 
-int drm_get_pci_dev(struct pci_dev *pdev,
-		    const struct pci_device_id *ent,
-		    struct drm_driver *driver);
-
 #else
 
 static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
@@ -62,13 +58,6 @@ static inline void drm_pci_free(struct drm_device *dev,
 {
 }
 
-static inline int drm_get_pci_dev(struct pci_dev *pdev,
-				  const struct pci_device_id *ent,
-				  struct drm_driver *driver)
-{
-	return -ENOSYS;
-}
-
 #endif
 
 #endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 8f99d38..ca7cee8 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -382,42 +382,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
 	drm_dev_dbg(dev, DRM_UT_DRIVER,	fmt, ##__VA_ARGS__)
 #define DRM_DEV_DEBUG_KMS(dev, fmt, ...)				\
 	drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_ATOMIC,	fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_VBL(dev, fmt, ...)				\
-	drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-#define	DRM_DEV_DEBUG_DP(dev, fmt, ...)					\
-	drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...)	\
-({									\
-	static DEFINE_RATELIMIT_STATE(_rs,				\
-				      DEFAULT_RATELIMIT_INTERVAL,	\
-				      DEFAULT_RATELIMIT_BURST);		\
-	if (__ratelimit(&_rs))						\
-		drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__);		\
-})
-
-/**
- * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...)			\
-	_DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE,		\
-					  fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...)			\
-	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER,		\
-					  fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...)			\
-	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS,		\
-					  fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...)			\
-	_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME,		\
-					  fmt, ##__VA_ARGS__)
 
 /*
  * struct drm_device based logging
@@ -541,16 +505,42 @@ void __drm_err(const char *format, ...);
 	__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
 
 
-#define DRM_DEBUG_RATELIMITED(fmt, ...)					\
-	DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...)				\
-	DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
 #define DRM_DEBUG_KMS_RATELIMITED(fmt, ...)				\
-	DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+({									\
+	static DEFINE_RATELIMIT_STATE(_rs,				\
+				      DEFAULT_RATELIMIT_INTERVAL,       \
+				      DEFAULT_RATELIMIT_BURST);         \
+	if (__ratelimit(&_rs))						\
+		drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__);	\
+})
 
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...)				\
-	DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/*
+ * struct drm_device based WARNs
+ *
+ * drm_WARN*() acts like WARN*(), but with the key difference of
+ * using device specific information so that we know from which device
+ * warning is originating from.
+ *
+ * Prefer drm_device based drm_WARN* over regular WARN*
+ */
+
+/* Helper for struct drm_device based WARNs */
+#define drm_WARN(drm, condition, format, arg...)			\
+	WARN(condition, "%s %s: " format,				\
+			dev_driver_string((drm)->dev),			\
+			dev_name((drm)->dev), ## arg)
+
+#define drm_WARN_ONCE(drm, condition, format, arg...)			\
+	WARN_ONCE(condition, "%s %s: " format,				\
+			dev_driver_string((drm)->dev),			\
+			dev_name((drm)->dev), ## arg)
+
+#define drm_WARN_ON(drm, x)						\
+	drm_WARN((drm), (x), "%s",					\
+		 "drm_WARN_ON(" __stringify(x) ")")
+
+#define drm_WARN_ON_ONCE(drm, x)					\
+	drm_WARN_ONCE((drm), (x), "%s",					\
+		      "drm_WARN_ON_ONCE(" __stringify(x) ")")
 
 #endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 15afee9..a026375 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -100,8 +100,11 @@ struct drm_simple_display_pipe_funcs {
 	 * This is the function drivers should submit the
 	 * &drm_pending_vblank_event from. Using either
 	 * drm_crtc_arm_vblank_event(), when the driver supports vblank
-	 * interrupt handling, or drm_crtc_send_vblank_event() directly in case
-	 * the hardware lacks vblank support entirely.
+	 * interrupt handling, or drm_crtc_send_vblank_event() for more
+	 * complex case. In case the hardware lacks vblank support entirely,
+	 * drivers can set &struct drm_crtc_state.no_vblank in
+	 * &struct drm_simple_display_pipe_funcs.check and let DRM's
+	 * atomic helper fake a vblank event.
 	 */
 	void (*update)(struct drm_simple_display_pipe *pipe,
 		       struct drm_plane_state *old_plane_state);
@@ -178,4 +181,8 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
 			const uint64_t *format_modifiers,
 			struct drm_connector *connector);
 
+int drm_simple_encoder_init(struct drm_device *dev,
+			    struct drm_encoder *encoder,
+			    int encoder_type);
+
 #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c16c440..dd9f5b9 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -174,13 +174,13 @@ struct drm_vblank_crtc {
 	unsigned int pipe;
 	/**
 	 * @framedur_ns: Frame/Field duration in ns, used by
-	 * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+	 * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
 	 * drm_calc_timestamping_constants().
 	 */
 	int framedur_ns;
 	/**
 	 * @linedur_ns: Line duration in ns, used by
-	 * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+	 * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
 	 * drm_calc_timestamping_constants().
 	 */
 	int linedur_ns;
@@ -190,8 +190,8 @@ struct drm_vblank_crtc {
 	 *
 	 * Cache of the current hardware display mode. Only valid when @enabled
 	 * is set. This is used by helpers like
-	 * drm_calc_vbltimestamp_from_scanoutpos(). We can't just access the
-	 * hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
+	 * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access
+	 * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
 	 * because that one is really hard to get from interrupt context.
 	 */
 	struct drm_display_mode hwmode;
@@ -206,6 +206,7 @@ struct drm_vblank_crtc {
 };
 
 int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+bool drm_dev_has_vblank(const struct drm_device *dev);
 u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
 u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
 				   ktime_t *vblanktime);
@@ -229,13 +230,32 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
 void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
 void drm_crtc_vblank_restore(struct drm_crtc *crtc);
 
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-					   unsigned int pipe, int *max_error,
-					   ktime_t *vblank_time,
-					   bool in_vblank_irq);
 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
 				     const struct drm_display_mode *mode);
 wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
 void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
 				   u32 max_vblank_count);
+
+/*
+ * Helpers for struct drm_crtc_funcs
+ */
+
+typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc,
+						     bool in_vblank_irq,
+						     int *vpos, int *hpos,
+						     ktime_t *stime,
+						     ktime_t *etime,
+						     const struct drm_display_mode *mode);
+
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc,
+						     int *max_error,
+						     ktime_t *vblank_time,
+						     bool in_vblank_irq,
+						     drm_vblank_get_scanout_position_func get_scanout_position);
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+						 int *max_error,
+						 ktime_t *vblank_time,
+						 bool in_vblank_irq);
+
 #endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 589be85..26b04ff 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -262,7 +262,7 @@ struct drm_sched_backend_ops {
  * @job_list_lock: lock to protect the ring_mirror_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
- * @score: score to help loadbalancer pick a idle sched
+ * @num_jobs: the number of jobs in queue in the scheduler
  * @ready: marks if the underlying HW is ready to work
  * @free_guilty: A hit to time out handler to free the guilty job.
  *
@@ -283,8 +283,8 @@ struct drm_gpu_scheduler {
 	struct list_head		ring_mirror_list;
 	spinlock_t			job_list_lock;
 	int				hang_limit;
-	atomic_t                        score;
-	bool				ready;
+	atomic_t                        num_jobs;
+	bool			ready;
 	bool				free_guilty;
 };
 
@@ -297,6 +297,10 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+				    struct drm_gpu_scheduler **sched_list,
+                                   unsigned int num_sched_list);
+
 void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
@@ -337,5 +341,8 @@ void drm_sched_fence_finished(struct drm_sched_fence *fence);
 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 		                unsigned long remaining);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+		     unsigned int num_sched_list);
 
 #endif
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 4d48de8..702f613 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -12,7 +12,6 @@
 #include <linux/mutex.h>
 #include <linux/device.h>
 #include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
 
 /**
  * enum hdcp_port_type - HDCP port implementation type defined by ME FW
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 66ca49d..b9bc1b0 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -135,18 +135,14 @@ struct ttm_tt;
  * @num_pages: Actual number of pages.
  * @acc_size: Accounted size for this object.
  * @kref: Reference count of this buffer object. When this refcount reaches
- * zero, the object is put on the delayed delete list.
- * @list_kref: List reference count of this buffer object. This member is
- * used to avoid destruction while the buffer object is still on a list.
- * Lru lists may keep one refcount, the delayed delete list, and kref != 0
- * keeps one refcount. When this refcount reaches zero,
- * the object is destroyed.
+ * zero, the object is destroyed or put on the delayed delete list.
  * @mem: structure describing current placement.
  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
  * pinned in physical memory. If this behaviour is not desired, this member
  * holds a pointer to a persistent shmem object.
  * @ttm: TTM structure holding system pages.
  * @evicted: Whether the object was evicted without user-space knowing.
+ * @deleted: True if the object is only a zombie and already deleted.
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
@@ -183,9 +179,7 @@ struct ttm_buffer_object {
 	/**
 	* Members not needing protection.
 	*/
-
 	struct kref kref;
-	struct kref list_kref;
 
 	/**
 	 * Members protected by the bo::resv::reserved lock.
@@ -195,6 +189,7 @@ struct ttm_buffer_object {
 	struct file *persistent_swap_storage;
 	struct ttm_tt *ttm;
 	bool evicted;
+	bool deleted;
 
 	/**
 	 * Members protected by the bdev::lru_lock.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cac7a8a..c9e0fd0 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -210,8 +210,6 @@ struct ttm_mem_type_manager {
  * struct ttm_bo_driver
  *
  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @invalidate_caches: Callback to invalidate read caches when a buffer object
- * has been evicted.
  * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
  * structure.
  * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
@@ -256,19 +254,6 @@ struct ttm_bo_driver {
 	 */
 	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
 
-	/**
-	 * struct ttm_bo_driver member invalidate_caches
-	 *
-	 * @bdev: the buffer object device.
-	 * @flags: new placement of the rebound buffer object.
-	 *
-	 * A previosly evicted buffer has been rebound in a
-	 * potentially new location. Tell the driver that it might
-	 * consider invalidating read (texture) caches on the next command
-	 * submission as a consequence.
-	 */
-
-	int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
 	int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
 			     struct ttm_mem_type_manager *man);
 
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index abf5459..1ade486 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -43,18 +43,6 @@ struct dma_buf_ops {
 	bool cache_sgt_mapping;
 
 	/**
-	 * @dynamic_mapping:
-	 *
-	 * If true the framework makes sure that the map/unmap_dma_buf
-	 * callbacks are always called with the dma_resv object locked.
-	 *
-	 * If false the framework makes sure that the map/unmap_dma_buf
-	 * callbacks are always called without the dma_resv object locked.
-	 * Mutual exclusive with @cache_sgt_mapping.
-	 */
-	bool dynamic_mapping;
-
-	/**
 	 * @attach:
 	 *
 	 * This is called from dma_buf_attach() to make sure that a given
@@ -94,13 +82,42 @@ struct dma_buf_ops {
 	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
 
 	/**
+	 * @pin:
+	 *
+	 * This is called by dma_buf_pin and lets the exporter know that the
+	 * DMA-buf can't be moved any more.
+	 *
+	 * This is called with the dmabuf->resv object locked and is mutual
+	 * exclusive with @cache_sgt_mapping.
+	 *
+	 * This callback is optional and should only be used in limited use
+	 * cases like scanout and not for temporary pin operations.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success, negative error code on failure.
+	 */
+	int (*pin)(struct dma_buf_attachment *attach);
+
+	/**
+	 * @unpin:
+	 *
+	 * This is called by dma_buf_unpin and lets the exporter know that the
+	 * DMA-buf can be moved again.
+	 *
+	 * This is called with the dmabuf->resv object locked and is mutual
+	 * exclusive with @cache_sgt_mapping.
+	 *
+	 * This callback is optional.
+	 */
+	void (*unpin)(struct dma_buf_attachment *attach);
+
+	/**
 	 * @map_dma_buf:
 	 *
 	 * This is called by dma_buf_map_attachment() and is used to map a
 	 * shared &dma_buf into device address space, and it is mandatory. It
-	 * can only be called if @attach has been called successfully. This
-	 * essentially pins the DMA buffer into place, and it cannot be moved
-	 * any more
+	 * can only be called if @attach has been called successfully.
 	 *
 	 * This call may sleep, e.g. when the backing storage first needs to be
 	 * allocated, or moved to a location suitable for all currently attached
@@ -141,9 +158,8 @@ struct dma_buf_ops {
 	 *
 	 * This is called by dma_buf_unmap_attachment() and should unmap and
 	 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
-	 * It should also unpin the backing storage if this is the last mapping
-	 * of the DMA buffer, it the exporter supports backing storage
-	 * migration.
+	 * For static dma_buf handling this might also unpins the backing
+	 * storage if this is the last mapping of the DMA buffer.
 	 */
 	void (*unmap_dma_buf)(struct dma_buf_attachment *,
 			      struct sg_table *,
@@ -312,6 +328,34 @@ struct dma_buf {
 };
 
 /**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+	/**
+	 * @move_notify
+	 *
+	 * If this callback is provided the framework can avoid pinning the
+	 * backing store while mappings exists.
+	 *
+	 * This callback is called with the lock of the reservation object
+	 * associated with the dma_buf held and the mapping function must be
+	 * called with this lock held as well. This makes sure that no mapping
+	 * is created concurrently with an ongoing move operation.
+	 *
+	 * Mappings stay valid and are not directly affected by this callback.
+	 * But the DMA-buf can now be in a different physical location, so all
+	 * mappings should be destroyed and re-created as soon as possible.
+	 *
+	 * New mappings can be created after this callback returns, and will
+	 * point to the new location of the DMA-buf.
+	 */
+	void (*move_notify)(struct dma_buf_attachment *attach);
+};
+
+/**
  * struct dma_buf_attachment - holds device-buffer attachment data
  * @dmabuf: buffer for this attachment.
  * @dev: device attached to the buffer.
@@ -319,8 +363,9 @@ struct dma_buf {
  * @sgt: cached mapping.
  * @dir: direction of cached mapping.
  * @priv: exporter specific attachment data.
- * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
- * dma_resv lock held.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
  *
  * This structure holds the attachment information between the dma_buf buffer
  * and its user device(s). The list contains one attachment struct per device
@@ -337,7 +382,8 @@ struct dma_buf_attachment {
 	struct list_head node;
 	struct sg_table *sgt;
 	enum dma_data_direction dir;
-	bool dynamic_mapping;
+	const struct dma_buf_attach_ops *importer_ops;
+	void *importer_priv;
 	void *priv;
 };
 
@@ -399,7 +445,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
  */
 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
 {
-	return dmabuf->ops->dynamic_mapping;
+	return !!dmabuf->ops->pin;
 }
 
 /**
@@ -413,16 +459,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
 static inline bool
 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
 {
-	return attach->dynamic_mapping;
+	return !!attach->importer_ops;
 }
 
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 					  struct device *dev);
 struct dma_buf_attachment *
 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
-		       bool dynamic_mapping);
+		       const struct dma_buf_attach_ops *importer_ops,
+		       void *importer_priv);
 void dma_buf_detach(struct dma_buf *dmabuf,
 		    struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
 
 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
 
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 9918a6c..9613d79 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -207,7 +207,7 @@ struct hdmi_drm_infoframe {
 	u16 max_fall;
 };
 
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
 ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
 				size_t size);
 ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 4f733a4..ca83376 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -10,7 +10,7 @@
 
 #include <drm/drm_fourcc.h>
 #include <linux/fb.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
 
 /* format array, use it to initialize a "struct simplefb_format" array */
 #define SIMPLEFB_FORMATS \
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index ac38798..65f6972 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -125,9 +125,10 @@ extern "C" {
 /* Flag that BO sharing will be explicitly synchronized */
 #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC		(1 << 7)
 /* Flag that indicates allocating MQD gart on GFX9, where the mtype
- * for the second page onward should be set to NC.
+ * for the second page onward should be set to NC. It should never
+ * be used by user space applications.
  */
-#define AMDGPU_GEM_CREATE_MQD_GFX9		(1 << 8)
+#define AMDGPU_GEM_CREATE_CP_MQD_GFX9		(1 << 8)
 /* Flag that BO may contain sensitive data that must be wiped before
  * releasing the memory
  */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 868bf79..808b48a 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -948,6 +948,8 @@ extern "C" {
 #define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
 #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
 
+#define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
 /**
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 829c0a4..2813e57 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1619,6 +1619,27 @@ struct drm_i915_gem_context_param {
  * By default, new contexts allow persistence.
  */
 #define I915_CONTEXT_PARAM_PERSISTENCE	0xb
+
+/*
+ * I915_CONTEXT_PARAM_RINGSIZE:
+ *
+ * Sets the size of the CS ringbuffer to use for logical ring contexts. This
+ * applies a limit of how many batches can be queued to HW before the caller
+ * is blocked due to lack of space for more commands.
+ *
+ * Only reliably possible to be set prior to first use, i.e. during
+ * construction. At any later point, the current execution must be flushed as
+ * the ring can only be changed while the context is idle. Note, the ringsize
+ * can be specified as a constructor property, see
+ * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
+ *
+ * Only applies to the current set of engine and lost when those engines
+ * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
+ *
+ * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
+ * Default is 16 KiB.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE	0xc
 /* Must be kept compact -- no holes and well documented */
 
 	__u64 value;
diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
index 95a00fb..1ec58d6 100644
--- a/include/uapi/drm/lima_drm.h
+++ b/include/uapi/drm/lima_drm.h
@@ -32,12 +32,19 @@ struct drm_lima_get_param {
 	__u64 value; /* out, parameter value */
 };
 
+/*
+ * heap buffer dynamically increase backup memory size when GP task fail
+ * due to lack of heap memory. size field of heap buffer is an up bound of
+ * the backup memory which can be set to a fairly large value.
+ */
+#define LIMA_BO_FLAG_HEAP  (1 << 0)
+
 /**
  * create a buffer for used by GPU
  */
 struct drm_lima_gem_create {
 	__u32 size;    /* in, buffer size */
-	__u32 flags;   /* in, currently no flags, must be zero */
+	__u32 flags;   /* in, buffer flags */
 	__u32 handle;  /* out, GEM buffer handle */
 	__u32 pad;     /* pad, must be zero */
 };
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index fcb741e..02e9175 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -86,6 +86,9 @@ extern "C" {
  *
  * DRM_VMW_PARAM_SM4_1
  * SM4_1 support is enabled.
+ *
+ * DRM_VMW_PARAM_SM5
+ * SM5 support is enabled.
  */
 
 #define DRM_VMW_PARAM_NUM_STREAMS      0
@@ -103,6 +106,7 @@ extern "C" {
 #define DRM_VMW_PARAM_DX               12
 #define DRM_VMW_PARAM_HW_CAPS2         13
 #define DRM_VMW_PARAM_SM4_1            14
+#define DRM_VMW_PARAM_SM5              15
 
 /**
  * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -1133,7 +1137,7 @@ struct drm_vmw_handle_close_arg {
  * svga3d surface flags split into 2, upper half and lower half.
  */
 enum drm_vmw_surface_version {
-	drm_vmw_gb_surface_v1
+	drm_vmw_gb_surface_v1,
 };
 
 /**
@@ -1144,6 +1148,7 @@ enum drm_vmw_surface_version {
  * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
  * @multisample_pattern: Multisampling pattern when msaa is supported.
  * @quality_level: Precision settings for each sample.
+ * @buffer_byte_stride: Buffer byte stride.
  * @must_be_zero: Reserved for future usage.
  *
  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
@@ -1152,10 +1157,11 @@ enum drm_vmw_surface_version {
 struct drm_vmw_gb_surface_create_ext_req {
 	struct drm_vmw_gb_surface_create_req base;
 	enum drm_vmw_surface_version version;
-	uint32_t svga3d_flags_upper_32_bits;
-	SVGA3dMSPattern multisample_pattern;
-	SVGA3dMSQualityLevel quality_level;
-	uint64_t must_be_zero;
+	__u32 svga3d_flags_upper_32_bits;
+	__u32 multisample_pattern;
+	__u32 quality_level;
+	__u32 buffer_byte_stride;
+	__u32 must_be_zero;
 };
 
 /**
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 1f9bc13..77252cb 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
 
 	/* layers */
 	int overlay_num;
-	struct mmp_overlay overlays[0];
+	struct mmp_overlay overlays[];
 };
 
 extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b6571c3..c4a93ce 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -10,7 +10,7 @@
  *
  * This is the register set for the fimd and new style framebuffer interface
  * found from the S3C2443 onwards into the S3C2416, S3C2450, the
- * S3C64XX series such as the S3C6400 and S3C6410, and EXYNOS series.
+ * S3C64XX series such as the S3C6400 and S3C6410, and Exynos series.
 */
 
 /* VIDCON0 */