Merge tag 'v3.10-rc7' into drm-next

Linux 3.10-rc7

The sdvo lvds fix in this -fixes pull

commit c3456fb3e4712d0448592af3c5d644c9472cd3c1
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Mon Jun 10 09:47:58 2013 +0200

    drm/i915: prefer VBT modes for SVDO-LVDS over EDID

has a silent functional conflict with

commit 990256aec2f10800595dddf4d1c3441fcd6b2616
Author: Ville Syrjälä <ville.syrjala@linux.intel.com>
Date:   Fri May 31 12:17:07 2013 +0000

    drm: Add probed modes in probe order

in drm-next. W simply need to add the vbt modes before edid modes, i.e. the
other way round than now.

Conflicts:
	drivers/gpu/drm/drm_prime.c
	drivers/gpu/drm/i915/intel_sdvo.c
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index f9df3b8..91ee107 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -2161,6 +2161,12 @@
       <title>EDID Helper Functions Reference</title>
 !Edrivers/gpu/drm/drm_edid.c
     </sect2>
+    <sect2>
+      <title>Rectangle Utilities Reference</title>
+!Pinclude/drm/drm_rect.h rect utils
+!Iinclude/drm/drm_rect.h
+!Edrivers/gpu/drm/drm_rect.c
+    </sect2>
   </sect1>
 
   <!-- Internals: vertical blanking -->
diff --git a/Documentation/devicetree/bindings/video/display-timing.txt b/Documentation/devicetree/bindings/video/display-timing.txt
index 1500385..e1d4a0b 100644
--- a/Documentation/devicetree/bindings/video/display-timing.txt
+++ b/Documentation/devicetree/bindings/video/display-timing.txt
@@ -34,6 +34,7 @@
 			- ignored     = ignored
  - interlaced (bool): boolean to enable interlaced mode
  - doublescan (bool): boolean to enable doublescan mode
+ - doubleclk (bool): boolean to enable doubleclock mode
 
 All the optional properties that are not bool follow the following logic:
     <1>: high active
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index eefdd91..f6362d8 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -81,17 +81,11 @@
 
 mtrr:n  Setup memory type range registers for the framebuffer
         where n:
-              0 - disabled (equivalent to nomtrr) (default)
-              1 - uncachable
-              2 - write-back
-              3 - write-combining
-              4 - write-through
+              0 - disabled (equivalent to nomtrr)
+              3 - write-combining (default)
 
-        If you see the following in dmesg, choose the type that matches
-        the old one.  In this example, use "mtrr:2".
-...
-mtrr: type mismatch for e0000000,8000000 old: write-back new: write-combining
-...
+	Values other than 0 and 3 will result in a warning and will be
+	treated just like 3.
 
 nomtrr  Do not use memory type range registers.
 
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d8e8eef..34f69cb 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -345,4 +345,11 @@
 
 #define IO_SPACE_LIMIT 0xffff
 
+#ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_add(unsigned long base,
+					 unsigned long size);
+extern void arch_phys_wc_del(int handle);
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
 #endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index e235582..f768f62 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -26,7 +26,10 @@
 #include <uapi/asm/mtrr.h>
 
 
-/*  The following functions are for use by other drivers  */
+/*
+ * The following functions are for use by other drivers that cannot use
+ * arch_phys_wc_add and arch_phys_wc_del.
+ */
 # ifdef CONFIG_MTRR
 extern u8 mtrr_type_lookup(u64 addr, u64 end);
 extern void mtrr_save_fixed_ranges(void *);
@@ -45,6 +48,7 @@
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
+extern int phys_wc_to_mtrr_index(int handle);
 #  else
 static inline u8 mtrr_type_lookup(u64 addr, u64 end)
 {
@@ -80,6 +84,10 @@
 static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 {
 }
+static inline int phys_wc_to_mtrr_index(int handle)
+{
+	return -1;
+}
 
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 726bf96..3533d4d 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -51,9 +51,13 @@
 #include <asm/e820.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
+#include <asm/pat.h>
 
 #include "mtrr.h"
 
+/* arch_phys_wc_add returns an MTRR register index plus this offset. */
+#define MTRR_TO_PHYS_WC_OFFSET 1000
+
 u32 num_var_ranges;
 
 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
@@ -524,6 +528,73 @@
 }
 EXPORT_SYMBOL(mtrr_del);
 
+/**
+ * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
+ * @base: Physical base address
+ * @size: Size of region
+ *
+ * If PAT is available, this does nothing.  If PAT is unavailable, it
+ * attempts to add a WC MTRR covering size bytes starting at base and
+ * logs an error if this fails.
+ *
+ * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
+ * but drivers should not try to interpret that return value.
+ */
+int arch_phys_wc_add(unsigned long base, unsigned long size)
+{
+	int ret;
+
+	if (pat_enabled)
+		return 0;  /* Success!  (We don't need to do anything.) */
+
+	ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
+	if (ret < 0) {
+		pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
+			(void *)base, (void *)(base + size - 1));
+		return ret;
+	}
+	return ret + MTRR_TO_PHYS_WC_OFFSET;
+}
+EXPORT_SYMBOL(arch_phys_wc_add);
+
+/*
+ * arch_phys_wc_del - undoes arch_phys_wc_add
+ * @handle: Return value from arch_phys_wc_add
+ *
+ * This cleans up after mtrr_add_wc_if_needed.
+ *
+ * The API guarantees that mtrr_del_wc_if_needed(error code) and
+ * mtrr_del_wc_if_needed(0) do nothing.
+ */
+void arch_phys_wc_del(int handle)
+{
+	if (handle >= 1) {
+		WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
+		mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
+	}
+}
+EXPORT_SYMBOL(arch_phys_wc_del);
+
+/*
+ * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
+ * @handle: Return value from arch_phys_wc_add
+ *
+ * This will turn the return value from arch_phys_wc_add into an mtrr
+ * index suitable for debugging.
+ *
+ * Note: There is no legitimate use for this function, except possibly
+ * in printk line.  Alas there is an illegitimate use in some ancient
+ * drm ioctls.
+ */
+int phys_wc_to_mtrr_index(int handle)
+{
+	if (handle < MTRR_TO_PHYS_WC_OFFSET)
+		return -1;
+	else
+		return handle - MTRR_TO_PHYS_WC_OFFSET;
+}
+EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
+
 /*
  * HACK ALERT!
  * These should be called implicitly, but we can't yet until all the initcall
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 2e04433..1b19239 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -603,7 +603,8 @@
 			vma->vm_ops = kerninfo.vm_ops;
 		} else if (io_remap_pfn_range(vma, vma->vm_start,
 				(kerninfo.aper_base + offset) >> PAGE_SHIFT,
-					    size, vma->vm_page_prot)) {
+				size,
+				pgprot_writecombine(vma->vm_page_prot))) {
 			goto out_again;
 		}
 		mutex_unlock(&(agp_fe.agp_mutex));
@@ -618,8 +619,9 @@
 		if (kerninfo.vm_ops) {
 			vma->vm_ops = kerninfo.vm_ops;
 		} else if (io_remap_pfn_range(vma, vma->vm_start,
-					    kerninfo.aper_base >> PAGE_SHIFT,
-					    size, vma->vm_page_prot)) {
+				kerninfo.aper_base >> PAGE_SHIFT,
+				size,
+				pgprot_writecombine(vma->vm_page_prot))) {
 			goto out_again;
 		}
 		mutex_unlock(&(agp_fe.agp_mutex));
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b16c50e..71ca63b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -213,6 +213,8 @@
 
 source "drivers/gpu/drm/cirrus/Kconfig"
 
+source "drivers/gpu/drm/rcar-du/Kconfig"
+
 source "drivers/gpu/drm/shmobile/Kconfig"
 
 source "drivers/gpu/drm/omapdrm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1c9f243..801bcaf 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,8 @@
 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
 		drm_crtc.o drm_modes.o drm_edid.o \
 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
-		drm_trace_points.o drm_global.o drm_prime.o
+		drm_trace_points.o drm_global.o drm_prime.o \
+		drm_rect.o
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -48,6 +49,7 @@
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 09da339..a5a1a03 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -271,26 +271,19 @@
 		return ret;
 	}
 
-	ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
-				    pci_resource_len(dev->pdev, 0),
-				    DRM_MTRR_WC);
+	ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+					pci_resource_len(dev->pdev, 0));
 
 	return 0;
 }
 
 void ast_mm_fini(struct ast_private *ast)
 {
-	struct drm_device *dev = ast->dev;
 	ttm_bo_device_release(&ast->ttm.bdev);
 
 	ast_ttm_global_release(ast);
 
-	if (ast->fb_mtrr >= 0) {
-		drm_mtrr_del(ast->fb_mtrr,
-			     pci_resource_start(dev->pdev, 0),
-			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
-		ast->fb_mtrr = -1;
-	}
+	arch_phys_wc_del(ast->fb_mtrr);
 }
 
 void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ed8cfc..36b9b0b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -271,9 +271,8 @@
 		return ret;
 	}
 
-	cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
-				    pci_resource_len(dev->pdev, 0),
-				    DRM_MTRR_WC);
+	cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+					   pci_resource_len(dev->pdev, 0));
 
 	cirrus->mm_inited = true;
 	return 0;
@@ -281,8 +280,6 @@
 
 void cirrus_mm_fini(struct cirrus_device *cirrus)
 {
-	struct drm_device *dev = cirrus->dev;
-
 	if (!cirrus->mm_inited)
 		return;
 
@@ -290,12 +287,8 @@
 
 	cirrus_ttm_global_release(cirrus);
 
-	if (cirrus->fb_mtrr >= 0) {
-		drm_mtrr_del(cirrus->fb_mtrr,
-			     pci_resource_start(dev->pdev, 0),
-			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
-		cirrus->fb_mtrr = -1;
-	}
+	arch_phys_wc_del(cirrus->fb_mtrr);
+	cirrus->fb_mtrr = 0;
 }
 
 void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 0128147..5a4dbb4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -210,12 +210,16 @@
 		if (drm_core_has_MTRR(dev)) {
 			if (map->type == _DRM_FRAME_BUFFER ||
 			    (map->flags & _DRM_WRITE_COMBINING)) {
-				map->mtrr = mtrr_add(map->offset, map->size,
-						     MTRR_TYPE_WRCOMB, 1);
+				map->mtrr =
+					arch_phys_wc_add(map->offset, map->size);
 			}
 		}
 		if (map->type == _DRM_REGISTERS) {
-			map->handle = ioremap(map->offset, map->size);
+			if (map->flags & _DRM_WRITE_COMBINING)
+				map->handle = ioremap_wc(map->offset,
+							 map->size);
+			else
+				map->handle = ioremap(map->offset, map->size);
 			if (!map->handle) {
 				kfree(map);
 				return -ENOMEM;
@@ -410,6 +414,15 @@
 
 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
 	map->handle = (void *)(unsigned long)maplist->user_token;
+
+	/*
+	 * It appears that there are no users of this value whatsoever --
+	 * drmAddMap just discards it.  Let's not encourage its use.
+	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
+	 *  it's not a real mtrr index anymore.)
+	 */
+	map->mtrr = -1;
+
 	return 0;
 }
 
@@ -451,11 +464,8 @@
 		iounmap(map->handle);
 		/* FALLTHROUGH */
 	case _DRM_FRAME_BUFFER:
-		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
-			int retcode;
-			retcode = mtrr_del(map->mtrr, map->offset, map->size);
-			DRM_DEBUG("mtrr_del=%d\n", retcode);
-		}
+		if (drm_core_has_MTRR(dev))
+			arch_phys_wc_del(map->mtrr);
 		break;
 	case _DRM_SHM:
 		vfree(map->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e7e9242..02838e6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -29,6 +29,7 @@
  *      Dave Airlie <airlied@linux.ie>
  *      Jesse Barnes <jesse.barnes@intel.com>
  */
+#include <linux/ctype.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -91,7 +92,7 @@
 
 /* Avoid boilerplate.  I'm tired of typing. */
 #define DRM_ENUM_NAME_FN(fnname, list)				\
-	char *fnname(int val)					\
+	const char *fnname(int val)				\
 	{							\
 		int i;						\
 		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
@@ -104,7 +105,7 @@
 /*
  * Global properties
  */
-static struct drm_prop_enum_list drm_dpms_enum_list[] =
+static const struct drm_prop_enum_list drm_dpms_enum_list[] =
 {	{ DRM_MODE_DPMS_ON, "On" },
 	{ DRM_MODE_DPMS_STANDBY, "Standby" },
 	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
@@ -116,7 +117,7 @@
 /*
  * Optional properties
  */
-static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
 {
 	{ DRM_MODE_SCALE_NONE, "None" },
 	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
@@ -124,7 +125,7 @@
 	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
 };
 
-static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
 {
 	{ DRM_MODE_DITHERING_OFF, "Off" },
 	{ DRM_MODE_DITHERING_ON, "On" },
@@ -134,7 +135,7 @@
 /*
  * Non-global properties, but "required" for certain connectors.
  */
-static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
@@ -143,7 +144,7 @@
 
 DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
 
-static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
@@ -153,7 +154,7 @@
 DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
 		 drm_dvi_i_subconnector_enum_list)
 
-static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+static const struct drm_prop_enum_list drm_tv_select_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -164,7 +165,7 @@
 
 DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
 
-static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
 {
 	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
 	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -176,7 +177,7 @@
 DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
 		 drm_tv_subconnector_enum_list)
 
-static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
 	{ DRM_MODE_DIRTY_OFF,      "Off"      },
 	{ DRM_MODE_DIRTY_ON,       "On"       },
 	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
@@ -184,7 +185,7 @@
 
 struct drm_conn_prop_enum_list {
 	int type;
-	char *name;
+	const char *name;
 	int count;
 };
 
@@ -210,7 +211,7 @@
 	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
 };
 
-static struct drm_prop_enum_list drm_encoder_enum_list[] =
+static const struct drm_prop_enum_list drm_encoder_enum_list[] =
 {	{ DRM_MODE_ENCODER_NONE, "None" },
 	{ DRM_MODE_ENCODER_DAC, "DAC" },
 	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
@@ -219,7 +220,7 @@
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 };
 
-char *drm_get_encoder_name(struct drm_encoder *encoder)
+const char *drm_get_encoder_name(const struct drm_encoder *encoder)
 {
 	static char buf[32];
 
@@ -230,7 +231,7 @@
 }
 EXPORT_SYMBOL(drm_get_encoder_name);
 
-char *drm_get_connector_name(struct drm_connector *connector)
+const char *drm_get_connector_name(const struct drm_connector *connector)
 {
 	static char buf[32];
 
@@ -241,7 +242,7 @@
 }
 EXPORT_SYMBOL(drm_get_connector_name);
 
-char *drm_get_connector_status_name(enum drm_connector_status status)
+const char *drm_get_connector_status_name(enum drm_connector_status status)
 {
 	if (status == connector_status_connected)
 		return "connected";
@@ -252,6 +253,28 @@
 }
 EXPORT_SYMBOL(drm_get_connector_status_name);
 
+static char printable_char(int c)
+{
+	return isascii(c) && isprint(c) ? c : '?';
+}
+
+const char *drm_get_format_name(uint32_t format)
+{
+	static char buf[32];
+
+	snprintf(buf, sizeof(buf),
+		 "%c%c%c%c %s-endian (0x%08x)",
+		 printable_char(format & 0xff),
+		 printable_char((format >> 8) & 0xff),
+		 printable_char((format >> 16) & 0xff),
+		 printable_char((format >> 24) & 0x7f),
+		 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+		 format);
+
+	return buf;
+}
+EXPORT_SYMBOL(drm_get_format_name);
+
 /**
  * drm_mode_object_get - allocate a new modeset identifier
  * @dev: DRM device
@@ -569,16 +592,8 @@
 		}
 
 		list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-			if (plane->fb == fb) {
-				/* should turn off the crtc */
-				ret = plane->funcs->disable_plane(plane);
-				if (ret)
-					DRM_ERROR("failed to disable plane with busy fb\n");
-				/* disconnect the plane from the fb and crtc: */
-				__drm_framebuffer_unreference(plane->fb);
-				plane->fb = NULL;
-				plane->crtc = NULL;
-			}
+			if (plane->fb == fb)
+				drm_plane_force_disable(plane);
 		}
 		drm_modeset_unlock_all(dev);
 	}
@@ -593,7 +608,7 @@
  * @crtc: CRTC object to init
  * @funcs: callbacks for the new CRTC
  *
- * Inits a new object created as base part of an driver crtc object.
+ * Inits a new object created as base part of a driver crtc object.
  *
  * RETURNS:
  * Zero on success, error code on failure.
@@ -628,11 +643,12 @@
 EXPORT_SYMBOL(drm_crtc_init);
 
 /**
- * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * drm_crtc_cleanup - Clean up the core crtc usage
  * @crtc: CRTC to cleanup
  *
- * Cleanup @crtc. Removes from drm modesetting space
- * does NOT free object, caller does that.
+ * This function cleans up @crtc and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the crtc structure itself,
+ * this is the responsibility of the caller.
  */
 void drm_crtc_cleanup(struct drm_crtc *crtc)
 {
@@ -657,7 +673,7 @@
 void drm_mode_probed_add(struct drm_connector *connector,
 			 struct drm_display_mode *mode)
 {
-	list_add(&mode->head, &connector->probed_modes);
+	list_add_tail(&mode->head, &connector->probed_modes);
 }
 EXPORT_SYMBOL(drm_mode_probed_add);
 
@@ -803,6 +819,21 @@
 }
 EXPORT_SYMBOL(drm_encoder_cleanup);
 
+/**
+ * drm_plane_init - Initialise a new plane object
+ * @dev: DRM device
+ * @plane: plane object to init
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @priv: plane is private (hidden from userspace)?
+ *
+ * Inits a new object created as base part of a driver plane object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
 		   unsigned long possible_crtcs,
 		   const struct drm_plane_funcs *funcs,
@@ -851,6 +882,14 @@
 }
 EXPORT_SYMBOL(drm_plane_init);
 
+/**
+ * drm_plane_cleanup - Clean up the core plane usage
+ * @plane: plane to cleanup
+ *
+ * This function cleans up @plane and removes it from the DRM mode setting
+ * core. Note that the function does *not* free the plane structure itself,
+ * this is the responsibility of the caller.
+ */
 void drm_plane_cleanup(struct drm_plane *plane)
 {
 	struct drm_device *dev = plane->dev;
@@ -868,6 +907,32 @@
 EXPORT_SYMBOL(drm_plane_cleanup);
 
 /**
+ * drm_plane_force_disable - Forcibly disable a plane
+ * @plane: plane to disable
+ *
+ * Forces the plane to be disabled.
+ *
+ * Used when the plane's current framebuffer is destroyed,
+ * and when restoring fbdev mode.
+ */
+void drm_plane_force_disable(struct drm_plane *plane)
+{
+	int ret;
+
+	if (!plane->fb)
+		return;
+
+	ret = plane->funcs->disable_plane(plane);
+	if (ret)
+		DRM_ERROR("failed to disable plane with busy fb\n");
+	/* disconnect the plane from the fb and crtc: */
+	__drm_framebuffer_unreference(plane->fb);
+	plane->fb = NULL;
+	plane->crtc = NULL;
+}
+EXPORT_SYMBOL(drm_plane_force_disable);
+
+/**
  * drm_mode_create - create a new display mode
  * @dev: DRM device
  *
@@ -1740,7 +1805,7 @@
 
 	plane_resp->plane_id = plane->base.id;
 	plane_resp->possible_crtcs = plane->possible_crtcs;
-	plane_resp->gamma_size = plane->gamma_size;
+	plane_resp->gamma_size = 0;
 
 	/*
 	 * This ioctl is called twice, once to determine how much space is
@@ -1834,7 +1899,8 @@
 		if (fb->pixel_format == plane->format_types[i])
 			break;
 	if (i == plane->format_count) {
-		DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+		DRM_DEBUG_KMS("Invalid pixel format %s\n",
+			      drm_get_format_name(fb->pixel_format));
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1906,18 +1972,31 @@
 int drm_mode_set_config_internal(struct drm_mode_set *set)
 {
 	struct drm_crtc *crtc = set->crtc;
-	struct drm_framebuffer *fb, *old_fb;
+	struct drm_framebuffer *fb;
+	struct drm_crtc *tmp;
 	int ret;
 
-	old_fb = crtc->fb;
+	/*
+	 * NOTE: ->set_config can also disable other crtcs (if we steal all
+	 * connectors from it), hence we need to refcount the fbs across all
+	 * crtcs. Atomic modeset will have saner semantics ...
+	 */
+	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+		tmp->old_fb = tmp->fb;
+
 	fb = set->fb;
 
 	ret = crtc->funcs->set_config(set);
 	if (ret == 0) {
-		if (old_fb)
-			drm_framebuffer_unreference(old_fb);
-		if (fb)
-			drm_framebuffer_reference(fb);
+		/* crtc->fb must be updated by ->set_config, enforces this. */
+		WARN_ON(fb != crtc->fb);
+	}
+
+	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+		if (tmp->fb)
+			drm_framebuffer_reference(tmp->fb);
+		if (tmp->old_fb)
+			drm_framebuffer_unreference(tmp->old_fb);
 	}
 
 	return ret;
@@ -2312,7 +2391,8 @@
 
 	ret = format_check(r);
 	if (ret) {
-		DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+		DRM_DEBUG_KMS("bad framebuffer format %s\n",
+			      drm_get_format_name(r->pixel_format));
 		return ret;
 	}
 
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ed1334e..f6829ba 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -122,6 +122,7 @@
 	int count = 0;
 	int mode_flags = 0;
 	bool verbose_prune = true;
+	enum drm_connector_status old_status;
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
 			drm_get_connector_name(connector));
@@ -137,7 +138,32 @@
 		if (connector->funcs->force)
 			connector->funcs->force(connector);
 	} else {
+		old_status = connector->status;
+
 		connector->status = connector->funcs->detect(connector, true);
+
+		/*
+		 * Normally either the driver's hpd code or the poll loop should
+		 * pick up any changes and fire the hotplug event. But if
+		 * userspace sneaks in a probe, we might miss a change. Hence
+		 * check here, and if anything changed start the hotplug code.
+		 */
+		if (old_status != connector->status) {
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+				      connector->base.id,
+				      drm_get_connector_name(connector),
+				      old_status, connector->status);
+
+			/*
+			 * The hotplug event code might call into the fb
+			 * helpers, and so expects that we do not hold any
+			 * locks. Fire up the poll struct instead, it will
+			 * disable itself again.
+			 */
+			dev->mode_config.delayed_event = true;
+			schedule_delayed_work(&dev->mode_config.output_poll_work,
+					      0);
+		}
 	}
 
 	/* Re-enable polling in case the global poll config changed. */
@@ -189,13 +215,14 @@
 	if (list_empty(&connector->modes))
 		return 0;
 
+	list_for_each_entry(mode, &connector->modes, head)
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
 	drm_mode_sort(&connector->modes);
 
 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
 			drm_get_connector_name(connector));
 	list_for_each_entry(mode, &connector->modes, head) {
-		mode->vrefresh = drm_mode_vrefresh(mode);
-
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 		drm_mode_debug_printmodeline(mode);
 	}
@@ -564,14 +591,13 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	if (!set)
-		return -EINVAL;
+	BUG_ON(!set);
+	BUG_ON(!set->crtc);
+	BUG_ON(!set->crtc->helper_private);
 
-	if (!set->crtc)
-		return -EINVAL;
-
-	if (!set->crtc->helper_private)
-		return -EINVAL;
+	/* Enforce sane interface api - has been abused by the fb helper. */
+	BUG_ON(!set->mode && set->fb);
+	BUG_ON(set->fb && set->num_connectors == 0);
 
 	crtc_funcs = set->crtc->helper_private;
 
@@ -645,11 +671,6 @@
 			mode_changed = true;
 		} else if (set->fb == NULL) {
 			mode_changed = true;
-		} else if (set->fb->depth != set->crtc->fb->depth) {
-			mode_changed = true;
-		} else if (set->fb->bits_per_pixel !=
-			   set->crtc->fb->bits_per_pixel) {
-			mode_changed = true;
 		} else if (set->fb->pixel_format !=
 			   set->crtc->fb->pixel_format) {
 			mode_changed = true;
@@ -759,12 +780,6 @@
 				ret = -EINVAL;
 				goto fail;
 			}
-			DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
-			for (i = 0; i < set->num_connectors; i++) {
-				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
-					      drm_get_connector_name(set->connectors[i]));
-				set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
-			}
 		}
 		drm_helper_disable_unused_functions(dev);
 	} else if (fb_changed) {
@@ -782,6 +797,22 @@
 		}
 	}
 
+	/*
+	 * crtc set_config helpers implicit set the crtc and all connected
+	 * encoders to DPMS on for a full mode set. But for just an fb update it
+	 * doesn't do that. To not confuse userspace, do an explicit DPMS_ON
+	 * unconditionally. This will also ensure driver internal dpms state is
+	 * consistent again.
+	 */
+	if (set->crtc->enabled) {
+		DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+		for (i = 0; i < set->num_connectors; i++) {
+			DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+				      drm_get_connector_name(set->connectors[i]));
+			set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+		}
+	}
+
 	kfree(save_connectors);
 	kfree(save_encoders);
 	kfree(save_crtcs);
@@ -980,7 +1011,11 @@
 	struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
 	struct drm_connector *connector;
 	enum drm_connector_status old_status;
-	bool repoll = false, changed = false;
+	bool repoll = false, changed;
+
+	/* Pick up any changes detected by the probe functions. */
+	changed = dev->mode_config.delayed_event;
+	dev->mode_config.delayed_event = false;
 
 	if (!drm_kms_helper_poll)
 		return;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9e62bbe..e8d17ee 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2321,6 +2321,31 @@
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+/*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+ */
+static unsigned int
+cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
+{
+	unsigned int clock = cea_mode->clock;
+
+	if (cea_mode->vrefresh % 6 != 0)
+		return clock;
+
+	/*
+	 * edid_cea_modes contains the 59.94Hz
+	 * variant for 240 and 480 line modes,
+	 * and the 60Hz variant otherwise.
+	 */
+	if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480)
+		clock = clock * 1001 / 1000;
+	else
+		clock = DIV_ROUND_UP(clock * 1000, 1001);
+
+	return clock;
+}
+
 /**
  * drm_match_cea_mode - look for a CEA mode matching given mode
  * @to_match: display mode
@@ -2339,21 +2364,9 @@
 		const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
 		unsigned int clock1, clock2;
 
-		clock1 = clock2 = cea_mode->clock;
-
 		/* Check both 60Hz and 59.94Hz */
-		if (cea_mode->vrefresh % 6 == 0) {
-			/*
-			 * edid_cea_modes contains the 59.94Hz
-			 * variant for 240 and 480 line modes,
-			 * and the 60Hz variant otherwise.
-			 */
-			if (cea_mode->vdisplay == 240 ||
-			    cea_mode->vdisplay == 480)
-				clock1 = clock1 * 1001 / 1000;
-			else
-				clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
-		}
+		clock1 = cea_mode->clock;
+		clock2 = cea_mode_alternate_clock(cea_mode);
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
@@ -2364,6 +2377,66 @@
 }
 EXPORT_SYMBOL(drm_match_cea_mode);
 
+static int
+add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *tmp;
+	LIST_HEAD(list);
+	int modes = 0;
+
+	/* Don't add CEA modes if the CEA extension block is missing */
+	if (!drm_find_cea_extension(edid))
+		return 0;
+
+	/*
+	 * Go through all probed modes and create a new mode
+	 * with the alternate clock for certain CEA modes.
+	 */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		const struct drm_display_mode *cea_mode;
+		struct drm_display_mode *newmode;
+		u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+		unsigned int clock1, clock2;
+
+		if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
+			continue;
+
+		cea_mode = &edid_cea_modes[cea_mode_idx];
+
+		clock1 = cea_mode->clock;
+		clock2 = cea_mode_alternate_clock(cea_mode);
+
+		if (clock1 == clock2)
+			continue;
+
+		if (mode->clock != clock1 && mode->clock != clock2)
+			continue;
+
+		newmode = drm_mode_duplicate(dev, cea_mode);
+		if (!newmode)
+			continue;
+
+		/*
+		 * The current mode could be either variant. Make
+		 * sure to pick the "other" clock for the new mode.
+		 */
+		if (mode->clock != clock1)
+			newmode->clock = clock1;
+		else
+			newmode->clock = clock2;
+
+		list_add_tail(&newmode->head, &list);
+	}
+
+	list_for_each_entry_safe(mode, tmp, &list, head) {
+		list_del(&mode->head);
+		drm_mode_probed_add(connector, mode);
+		modes++;
+	}
+
+	return modes;
+}
 
 static int
 do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
@@ -2946,6 +3019,7 @@
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 	num_modes += add_cea_modes(connector, edid);
+	num_modes += add_alternate_cea_modes(connector, edid);
 
 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
 		edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index fa445dd..a4f5ce1 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -186,12 +186,11 @@
 		goto relfw_out;
 	}
 
-	edid = kmalloc(fwsize, GFP_KERNEL);
+	edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
 	if (edid == NULL) {
 		err = -ENOMEM;
 		goto relfw_out;
 	}
-	memcpy(edid, fwdata, fwsize);
 
 	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
 		connector->bad_edid_counter++;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b78cbe7..3d13ca6e2 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -168,6 +168,9 @@
 	uint16_t *r_base, *g_base, *b_base;
 	int i;
 
+	if (helper->funcs->gamma_get == NULL)
+		return;
+
 	r_base = crtc->gamma_store;
 	g_base = r_base + crtc->gamma_size;
 	b_base = g_base + crtc->gamma_size;
@@ -284,13 +287,27 @@
  */
 bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 {
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_plane *plane;
 	bool error = false;
-	int i, ret;
+	int i;
 
-	drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+	drm_warn_on_modeset_not_all_locked(dev);
+
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+		drm_plane_force_disable(plane);
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+		struct drm_crtc *crtc = mode_set->crtc;
+		int ret;
+
+		if (crtc->funcs->cursor_set) {
+			ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
+			if (ret)
+				error = true;
+		}
+
 		ret = drm_mode_set_config_internal(mode_set);
 		if (ret)
 			error = true;
@@ -583,6 +600,14 @@
 		return 0;
 	}
 
+	/*
+	 * The driver really shouldn't advertise pseudo/directcolor
+	 * visuals if it can't deal with the palette.
+	 */
+	if (WARN_ON(!fb_helper->funcs->gamma_set ||
+		    !fb_helper->funcs->gamma_get))
+		return -EINVAL;
+
 	pindex = regno;
 
 	if (fb->bits_per_pixel == 16) {
@@ -626,12 +651,19 @@
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 {
 	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_device *dev = fb_helper->dev;
 	struct drm_crtc_helper_funcs *crtc_funcs;
 	u16 *red, *green, *blue, *transp;
 	struct drm_crtc *crtc;
 	int i, j, rc = 0;
 	int start;
 
+	drm_modeset_lock_all(dev);
+	if (!drm_fb_helper_is_bound(fb_helper)) {
+		drm_modeset_unlock_all(dev);
+		return -EBUSY;
+	}
+
 	for (i = 0; i < fb_helper->crtc_count; i++) {
 		crtc = fb_helper->crtc_info[i].mode_set.crtc;
 		crtc_funcs = crtc->helper_private;
@@ -654,10 +686,13 @@
 
 			rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
 			if (rc)
-				return rc;
+				goto out;
 		}
-		crtc_funcs->load_lut(crtc);
+		if (crtc_funcs->load_lut)
+			crtc_funcs->load_lut(crtc);
 	}
+ out:
+	drm_modeset_unlock_all(dev);
 	return rc;
 }
 EXPORT_SYMBOL(drm_fb_helper_setcmap);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index cf919e3..4321713 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -644,6 +644,55 @@
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
+/**
+ * drm_gem_mmap_obj - memory map a GEM object
+ * @obj: the GEM object to map
+ * @obj_size: the object size to be mapped, in bytes
+ * @vma: VMA for the area to be mapped
+ *
+ * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
+ * provided by the driver. Depending on their requirements, drivers can either
+ * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * the object will be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring), or mmap the buffer memory
+ * synchronously after calling drm_gem_mmap_obj.
+ *
+ * This function is mainly intended to implement the DMABUF mmap operation, when
+ * the GEM object is not looked up based on its fake offset. To implement the
+ * DRM mmap operation, drivers should use the drm_gem_mmap() function.
+ *
+ * Return 0 or success or -EINVAL if the object size is smaller than the VMA
+ * size, or if no gem_vm_ops are provided.
+ */
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+		     struct vm_area_struct *vma)
+{
+	struct drm_device *dev = obj->dev;
+
+	/* Check for valid size. */
+	if (obj_size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!dev->driver->gem_vm_ops)
+		return -EINVAL;
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = dev->driver->gem_vm_ops;
+	vma->vm_private_data = obj;
+	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	/* Take a ref for this mapping of the object, so that the fault
+	 * handler can dereference the mmap offset's pointer to the object.
+	 * This reference is cleaned up by the corresponding vm_close
+	 * (which should happen whether the vma was created by this call, or
+	 * by a vm_open due to mremap or partial unmap or whatever).
+	 */
+	drm_gem_object_reference(obj);
+
+	drm_vm_open_locked(dev, vma);
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_mmap_obj);
 
 /**
  * drm_gem_mmap - memory map routine for GEM objects
@@ -653,11 +702,9 @@
  * If a driver supports GEM object mapping, mmap calls on the DRM file
  * descriptor will end up here.
  *
- * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
  * contain the fake offset we created when the GTT map ioctl was called on
- * the object), we set up the driver fault handler so that any accesses
- * to the object can be trapped, to perform migration, GTT binding, surface
- * register allocation, or performance monitoring.
+ * the object) and map it with a call to drm_gem_mmap_obj().
  */
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
@@ -665,7 +712,6 @@
 	struct drm_device *dev = priv->minor->dev;
 	struct drm_gem_mm *mm = dev->mm_private;
 	struct drm_local_map *map = NULL;
-	struct drm_gem_object *obj;
 	struct drm_hash_item *hash;
 	int ret = 0;
 
@@ -686,32 +732,7 @@
 		goto out_unlock;
 	}
 
-	/* Check for valid size. */
-	if (map->size < vma->vm_end - vma->vm_start) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	obj = map->handle;
-	if (!obj->dev->driver->gem_vm_ops) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_ops = obj->dev->driver->gem_vm_ops;
-	vma->vm_private_data = map->handle;
-	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
-	/* Take a ref for this mapping of the object, so that the fault
-	 * handler can dereference the mmap offset's pointer to the object.
-	 * This reference is cleaned up by the corresponding vm_close
-	 * (which should happen whether the vma was created by this call, or
-	 * by a vm_open due to mremap or partial unmap or whatever).
-	 */
-	drm_gem_object_reference(obj);
-
-	drm_vm_open_locked(dev, vma);
+	ret = drm_gem_mmap_obj(map->handle, map->size, vma);
 
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 0a7e011..9efabce 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/export.h>
+#include <linux/dma-buf.h>
 #include <linux/dma-mapping.h>
 
 #include <drm/drmP.h>
@@ -32,11 +33,44 @@
 	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
 }
 
-static void drm_gem_cma_buf_destroy(struct drm_device *drm,
-		struct drm_gem_cma_object *cma_obj)
+/*
+ * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+ * @drm: The drm device
+ * @size: The GEM object size
+ *
+ * This function creates and initializes a GEM CMA object of the given size, but
+ * doesn't allocate any memory to back the object.
+ *
+ * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
+ */
+static struct drm_gem_cma_object *
+__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
 {
-	dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
-			cma_obj->paddr);
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+	if (!cma_obj)
+		return ERR_PTR(-ENOMEM);
+
+	gem_obj = &cma_obj->base;
+
+	ret = drm_gem_object_init(drm, gem_obj, size);
+	if (ret)
+		goto error;
+
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret) {
+		drm_gem_object_release(gem_obj);
+		goto error;
+	}
+
+	return cma_obj;
+
+error:
+	kfree(cma_obj);
+	return ERR_PTR(ret);
 }
 
 /*
@@ -49,44 +83,42 @@
 		unsigned int size)
 {
 	struct drm_gem_cma_object *cma_obj;
-	struct drm_gem_object *gem_obj;
+	struct sg_table *sgt = NULL;
 	int ret;
 
 	size = round_up(size, PAGE_SIZE);
 
-	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
-	if (!cma_obj)
-		return ERR_PTR(-ENOMEM);
+	cma_obj = __drm_gem_cma_create(drm, size);
+	if (IS_ERR(cma_obj))
+		return cma_obj;
 
 	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
 			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
 	if (!cma_obj->vaddr) {
-		dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
+		dev_err(drm->dev, "failed to allocate buffer with size %d\n",
+			size);
 		ret = -ENOMEM;
-		goto err_dma_alloc;
+		goto error;
 	}
 
-	gem_obj = &cma_obj->base;
+	sgt = kzalloc(sizeof(*cma_obj->sgt), GFP_KERNEL);
+	if (sgt == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
 
-	ret = drm_gem_object_init(drm, gem_obj, size);
-	if (ret)
-		goto err_obj_init;
+	ret = dma_get_sgtable(drm->dev, sgt, cma_obj->vaddr,
+			      cma_obj->paddr, size);
+	if (ret < 0)
+		goto error;
 
-	ret = drm_gem_create_mmap_offset(gem_obj);
-	if (ret)
-		goto err_create_mmap_offset;
+	cma_obj->sgt = sgt;
 
 	return cma_obj;
 
-err_create_mmap_offset:
-	drm_gem_object_release(gem_obj);
-
-err_obj_init:
-	drm_gem_cma_buf_destroy(drm, cma_obj);
-
-err_dma_alloc:
-	kfree(cma_obj);
-
+error:
+	kfree(sgt);
+	drm_gem_cma_free_object(&cma_obj->base);
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -143,11 +175,20 @@
 	if (gem_obj->map_list.map)
 		drm_gem_free_mmap_offset(gem_obj);
 
-	drm_gem_object_release(gem_obj);
-
 	cma_obj = to_drm_gem_cma_obj(gem_obj);
 
-	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
+	if (cma_obj->vaddr) {
+		dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
+				      cma_obj->vaddr, cma_obj->paddr);
+		if (cma_obj->sgt) {
+			sg_free_table(cma_obj->sgt);
+			kfree(cma_obj->sgt);
+		}
+	} else if (gem_obj->import_attach) {
+		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+	}
+
+	drm_gem_object_release(gem_obj);
 
 	kfree(cma_obj);
 }
@@ -174,10 +215,7 @@
 
 	cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
 			args->size, &args->handle);
-	if (IS_ERR(cma_obj))
-		return PTR_ERR(cma_obj);
-
-	return 0;
+	return PTR_RET(cma_obj);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
 
@@ -215,13 +253,26 @@
 };
 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
 
+static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
+				struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+
 /*
  * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
  */
 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
 {
-	struct drm_gem_object *gem_obj;
 	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
 	int ret;
 
 	ret = drm_gem_mmap(filp, vma);
@@ -231,12 +282,7 @@
 	gem_obj = vma->vm_private_data;
 	cma_obj = to_drm_gem_cma_obj(gem_obj);
 
-	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
-			vma->vm_end - vma->vm_start, vma->vm_page_prot);
-	if (ret)
-		drm_gem_vm_close(vma);
-
-	return ret;
+	return drm_gem_cma_mmap_obj(cma_obj, vma);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
 
@@ -270,3 +316,286 @@
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
 #endif
+
+/* -----------------------------------------------------------------------------
+ * DMA-BUF
+ */
+
+struct drm_gem_cma_dmabuf_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+};
+
+static int drm_gem_cma_dmabuf_attach(struct dma_buf *dmabuf, struct device *dev,
+				     struct dma_buf_attachment *attach)
+{
+	struct drm_gem_cma_dmabuf_attachment *cma_attach;
+
+	cma_attach = kzalloc(sizeof(*cma_attach), GFP_KERNEL);
+	if (!cma_attach)
+		return -ENOMEM;
+
+	cma_attach->dir = DMA_NONE;
+	attach->priv = cma_attach;
+
+	return 0;
+}
+
+static void drm_gem_cma_dmabuf_detach(struct dma_buf *dmabuf,
+				      struct dma_buf_attachment *attach)
+{
+	struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
+	struct sg_table *sgt;
+
+	if (cma_attach == NULL)
+		return;
+
+	sgt = &cma_attach->sgt;
+
+	if (cma_attach->dir != DMA_NONE)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+				cma_attach->dir);
+
+	sg_free_table(sgt);
+	kfree(cma_attach);
+	attach->priv = NULL;
+}
+
+static struct sg_table *
+drm_gem_cma_dmabuf_map(struct dma_buf_attachment *attach,
+		       enum dma_data_direction dir)
+{
+	struct drm_gem_cma_dmabuf_attachment *cma_attach = attach->priv;
+	struct drm_gem_cma_object *cma_obj = attach->dmabuf->priv;
+	struct drm_device *drm = cma_obj->base.dev;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt;
+	unsigned int i;
+	int nents, ret;
+
+	DRM_DEBUG_PRIME("\n");
+
+	if (WARN_ON(dir == DMA_NONE))
+		return ERR_PTR(-EINVAL);
+
+	/* Return the cached mapping when possible. */
+	if (cma_attach->dir == dir)
+		return &cma_attach->sgt;
+
+	/* Two mappings with different directions for the same attachment are
+	 * not allowed.
+	 */
+	if (WARN_ON(cma_attach->dir != DMA_NONE))
+		return ERR_PTR(-EBUSY);
+
+	sgt = &cma_attach->sgt;
+
+	ret = sg_alloc_table(sgt, cma_obj->sgt->orig_nents, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sgt.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_lock(&drm->struct_mutex);
+
+	rd = cma_obj->sgt->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+	if (!nents) {
+		DRM_ERROR("failed to map sgl with iommu.\n");
+		sg_free_table(sgt);
+		sgt = ERR_PTR(-EIO);
+		goto done;
+	}
+
+	cma_attach->dir = dir;
+	attach->priv = cma_attach;
+
+	DRM_DEBUG_PRIME("buffer size = %zu\n", cma_obj->base.size);
+
+done:
+	mutex_unlock(&drm->struct_mutex);
+	return sgt;
+}
+
+static void drm_gem_cma_dmabuf_unmap(struct dma_buf_attachment *attach,
+				     struct sg_table *sgt,
+				     enum dma_data_direction dir)
+{
+	/* Nothing to do. */
+}
+
+static void drm_gem_cma_dmabuf_release(struct dma_buf *dmabuf)
+{
+	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/*
+	 * drm_gem_cma_dmabuf_release() call means that file object's
+	 * f_count is 0 and it calls drm_gem_object_handle_unreference()
+	 * to drop the references that these values had been increased
+	 * at drm_prime_handle_to_fd()
+	 */
+	if (cma_obj->base.export_dma_buf == dmabuf) {
+		cma_obj->base.export_dma_buf = NULL;
+
+		/*
+		 * drop this gem object refcount to release allocated buffer
+		 * and resources.
+		 */
+		drm_gem_object_unreference_unlocked(&cma_obj->base);
+	}
+}
+
+static void *drm_gem_cma_dmabuf_kmap_atomic(struct dma_buf *dmabuf,
+					    unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void drm_gem_cma_dmabuf_kunmap_atomic(struct dma_buf *dmabuf,
+					     unsigned long page_num, void *addr)
+{
+	/* TODO */
+}
+
+static void *drm_gem_cma_dmabuf_kmap(struct dma_buf *dmabuf,
+				     unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void drm_gem_cma_dmabuf_kunmap(struct dma_buf *dmabuf,
+				      unsigned long page_num, void *addr)
+{
+	/* TODO */
+}
+
+static int drm_gem_cma_dmabuf_mmap(struct dma_buf *dmabuf,
+				   struct vm_area_struct *vma)
+{
+	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+	struct drm_gem_object *gem_obj = &cma_obj->base;
+	int ret;
+
+	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
+	if (ret < 0)
+		return ret;
+
+	return drm_gem_cma_mmap_obj(cma_obj, vma);
+}
+
+static void *drm_gem_cma_dmabuf_vmap(struct dma_buf *dmabuf)
+{
+	struct drm_gem_cma_object *cma_obj = dmabuf->priv;
+
+	return cma_obj->vaddr;
+}
+
+static struct dma_buf_ops drm_gem_cma_dmabuf_ops = {
+	.attach			= drm_gem_cma_dmabuf_attach,
+	.detach			= drm_gem_cma_dmabuf_detach,
+	.map_dma_buf		= drm_gem_cma_dmabuf_map,
+	.unmap_dma_buf		= drm_gem_cma_dmabuf_unmap,
+	.kmap			= drm_gem_cma_dmabuf_kmap,
+	.kmap_atomic		= drm_gem_cma_dmabuf_kmap_atomic,
+	.kunmap			= drm_gem_cma_dmabuf_kunmap,
+	.kunmap_atomic		= drm_gem_cma_dmabuf_kunmap_atomic,
+	.mmap			= drm_gem_cma_dmabuf_mmap,
+	.vmap			= drm_gem_cma_dmabuf_vmap,
+	.release		= drm_gem_cma_dmabuf_release,
+};
+
+struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm,
+					  struct drm_gem_object *obj, int flags)
+{
+	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+	return dma_buf_export(cma_obj, &drm_gem_cma_dmabuf_ops,
+			      cma_obj->base.size, flags);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_export);
+
+struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm,
+						 struct dma_buf *dma_buf)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	int ret;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/* is this one of own objects? */
+	if (dma_buf->ops == &drm_gem_cma_dmabuf_ops) {
+		struct drm_gem_object *obj;
+
+		cma_obj = dma_buf->priv;
+		obj = &cma_obj->base;
+
+		/* is it from our device? */
+		if (obj->dev == drm) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(obj);
+			dma_buf_put(dma_buf);
+			return obj;
+		}
+	}
+
+	/* Create a CMA GEM buffer. */
+	cma_obj = __drm_gem_cma_create(drm, dma_buf->size);
+	if (IS_ERR(cma_obj))
+		return ERR_PTR(PTR_ERR(cma_obj));
+
+	/* Attach to the buffer and map it. Make sure the mapping is contiguous
+	 * on the device memory bus, as that's all we support.
+	 */
+	attach = dma_buf_attach(dma_buf, drm->dev);
+	if (IS_ERR(attach)) {
+		ret = -EINVAL;
+		goto error_gem_free;
+	}
+
+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(sgt)) {
+		ret = sgt ? PTR_ERR(sgt) : -ENOMEM;
+		goto error_buf_detach;
+	}
+
+	if (sgt->nents != 1) {
+		ret = -EINVAL;
+		goto error_buf_unmap;
+	}
+
+	cma_obj->base.import_attach = attach;
+	cma_obj->paddr = sg_dma_address(sgt->sgl);
+	cma_obj->sgt = sgt;
+
+	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = %zu\n", cma_obj->paddr,
+			dma_buf->size);
+
+	return &cma_obj->base;
+
+error_buf_unmap:
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+error_buf_detach:
+	dma_buf_detach(dma_buf, attach);
+error_gem_free:
+	drm_gem_cma_free_object(&cma_obj->base);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dmabuf_import);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index e77bd8b..ffd7a7b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -38,6 +38,9 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#ifdef CONFIG_X86
+#include <asm/mtrr.h>
+#endif
 
 /**
  * Get the bus id.
@@ -181,7 +184,17 @@
 	map->type = r_list->map->type;
 	map->flags = r_list->map->flags;
 	map->handle = (void *)(unsigned long) r_list->user_token;
-	map->mtrr = r_list->map->mtrr;
+
+#ifdef CONFIG_X86
+	/*
+	 * There appears to be exactly one user of the mtrr index: dritest.
+	 * It's easy enough to keep it working on non-PAT systems.
+	 */
+	map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
+#else
+	map->mtrr = -1;
+#endif
+
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a371ff8..a6729bf 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -535,6 +535,8 @@
 		dmode->flags |= DRM_MODE_FLAG_INTERLACE;
 	if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
 		dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+	if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
+		dmode->flags |= DRM_MODE_FLAG_DBLCLK;
 	drm_mode_set_name(dmode);
 
 	return 0;
@@ -787,16 +789,17 @@
  * LOCKING:
  * None.
  *
- * Copy an existing mode into another mode, preserving the object id
- * of the destination mode.
+ * Copy an existing mode into another mode, preserving the object id and
+ * list head of the destination mode.
  */
 void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
 {
 	int id = dst->base.id;
+	struct list_head head = dst->head;
 
 	*dst = *src;
 	dst->base.id = id;
-	INIT_LIST_HEAD(&dst->head);
+	dst->head = head;
 }
 EXPORT_SYMBOL(drm_mode_copy);
 
@@ -1017,6 +1020,11 @@
 	diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
 	if (diff)
 		return diff;
+
+	diff = b->vrefresh - a->vrefresh;
+	if (diff)
+		return diff;
+
 	diff = b->clock - a->clock;
 	return diff;
 }
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 14194b6..80c0b2b 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -278,10 +278,10 @@
 		}
 		if (drm_core_has_MTRR(dev)) {
 			if (dev->agp)
-				dev->agp->agp_mtrr =
-					mtrr_add(dev->agp->agp_info.aper_base,
-						 dev->agp->agp_info.aper_size *
-						 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+				dev->agp->agp_mtrr = arch_phys_wc_add(
+					dev->agp->agp_info.aper_base,
+					dev->agp->agp_info.aper_size *
+					1024 * 1024);
 		}
 	}
 	return 0;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 5b7b911..e57c675 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -64,6 +64,29 @@
 };
 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
 
+static int drm_gem_map_attach(struct dma_buf *dma_buf,
+			      struct device *target_dev,
+			      struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	if (!dev->driver->gem_prime_pin)
+		return 0;
+
+	return dev->driver->gem_prime_pin(obj);
+}
+
+static void drm_gem_map_detach(struct dma_buf *dma_buf,
+			       struct dma_buf_attachment *attach)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	if (dev->driver->gem_prime_unpin)
+		dev->driver->gem_prime_unpin(obj);
+}
+
 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
 		enum dma_data_direction dir)
 {
@@ -146,6 +169,8 @@
 }
 
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
+	.attach = drm_gem_map_attach,
+	.detach = drm_gem_map_detach,
 	.map_dma_buf = drm_gem_map_dma_buf,
 	.unmap_dma_buf = drm_gem_unmap_dma_buf,
 	.release = drm_gem_dmabuf_release,
@@ -185,11 +210,6 @@
 struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
 				     struct drm_gem_object *obj, int flags)
 {
-	if (dev->driver->gem_prime_pin) {
-		int ret = dev->driver->gem_prime_pin(obj);
-		if (ret)
-			return ERR_PTR(ret);
-	}
 	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
@@ -276,7 +296,7 @@
 
 	attach = dma_buf_attach(dma_buf, dev->dev);
 	if (IS_ERR(attach))
-		return ERR_PTR(PTR_ERR(attach));
+		return ERR_CAST(attach);
 
 	get_dma_buf(dma_buf);
 
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
new file mode 100644
index 0000000..7047ca0
--- /dev/null
+++ b/drivers/gpu/drm/drm_rect.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drmP.h>
+#include <drm/drm_rect.h>
+
+/**
+ * drm_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * Calculate the intersection of rectangles @r1 and @r2.
+ * @r1 will be overwritten with the intersection.
+ *
+ * RETURNS:
+ * %true if rectangle @r1 is still visible after the operation,
+ * %false otherwise.
+ */
+bool drm_rect_intersect(struct drm_rect *r1, const struct drm_rect *r2)
+{
+	r1->x1 = max(r1->x1, r2->x1);
+	r1->y1 = max(r1->y1, r2->y1);
+	r1->x2 = min(r1->x2, r2->x2);
+	r1->y2 = min(r1->y2, r2->y2);
+
+	return drm_rect_visible(r1);
+}
+EXPORT_SYMBOL(drm_rect_intersect);
+
+/**
+ * drm_rect_clip_scaled - perform a scaled clip operation
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @clip: clip rectangle
+ * @hscale: horizontal scaling factor
+ * @vscale: vertical scaling factor
+ *
+ * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the
+ * same amounts multiplied by @hscale and @vscale.
+ *
+ * RETURNS:
+ * %true if rectangle @dst is still visible after being clipped,
+ * %false otherwise
+ */
+bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+			  const struct drm_rect *clip,
+			  int hscale, int vscale)
+{
+	int diff;
+
+	diff = clip->x1 - dst->x1;
+	if (diff > 0) {
+		int64_t tmp = src->x1 + (int64_t) diff * hscale;
+		src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+	diff = clip->y1 - dst->y1;
+	if (diff > 0) {
+		int64_t tmp = src->y1 + (int64_t) diff * vscale;
+		src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+	diff = dst->x2 - clip->x2;
+	if (diff > 0) {
+		int64_t tmp = src->x2 - (int64_t) diff * hscale;
+		src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+	diff = dst->y2 - clip->y2;
+	if (diff > 0) {
+		int64_t tmp = src->y2 - (int64_t) diff * vscale;
+		src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX);
+	}
+
+	return drm_rect_intersect(dst, clip);
+}
+EXPORT_SYMBOL(drm_rect_clip_scaled);
+
+static int drm_calc_scale(int src, int dst)
+{
+	int scale = 0;
+
+	if (src < 0 || dst < 0)
+		return -EINVAL;
+
+	if (dst == 0)
+		return 0;
+
+	scale = src / dst;
+
+	return scale;
+}
+
+/**
+ * drm_rect_calc_hscale - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * RETURNS:
+ * The horizontal scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_hscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_hscale, int max_hscale)
+{
+	int src_w = drm_rect_width(src);
+	int dst_w = drm_rect_width(dst);
+	int hscale = drm_calc_scale(src_w, dst_w);
+
+	if (hscale < 0 || dst_w == 0)
+		return hscale;
+
+	if (hscale < min_hscale || hscale > max_hscale)
+		return -ERANGE;
+
+	return hscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_hscale);
+
+/**
+ * drm_rect_calc_vscale - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * RETURNS:
+ * The vertical scaling factor, or errno of out of limits.
+ */
+int drm_rect_calc_vscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_vscale, int max_vscale)
+{
+	int src_h = drm_rect_height(src);
+	int dst_h = drm_rect_height(dst);
+	int vscale = drm_calc_scale(src_h, dst_h);
+
+	if (vscale < 0 || dst_h == 0)
+		return vscale;
+
+	if (vscale < min_vscale || vscale > max_vscale)
+		return -ERANGE;
+
+	return vscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_vscale);
+
+/**
+ * drm_calc_hscale_relaxed - calculate the horizontal scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_hscale: minimum allowed horizontal scaling factor
+ * @max_hscale: maximum allowed horizontal scaling factor
+ *
+ * Calculate the horizontal scaling factor as
+ * (@src width) / (@dst width).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The horizontal scaling factor.
+ */
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_hscale, int max_hscale)
+{
+	int src_w = drm_rect_width(src);
+	int dst_w = drm_rect_width(dst);
+	int hscale = drm_calc_scale(src_w, dst_w);
+
+	if (hscale < 0 || dst_w == 0)
+		return hscale;
+
+	if (hscale < min_hscale) {
+		int max_dst_w = src_w / min_hscale;
+
+		drm_rect_adjust_size(dst, max_dst_w - dst_w, 0);
+
+		return min_hscale;
+	}
+
+	if (hscale > max_hscale) {
+		int max_src_w = dst_w * max_hscale;
+
+		drm_rect_adjust_size(src, max_src_w - src_w, 0);
+
+		return max_hscale;
+	}
+
+	return hscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_hscale_relaxed);
+
+/**
+ * drm_rect_calc_vscale_relaxed - calculate the vertical scaling factor
+ * @src: source window rectangle
+ * @dst: destination window rectangle
+ * @min_vscale: minimum allowed vertical scaling factor
+ * @max_vscale: maximum allowed vertical scaling factor
+ *
+ * Calculate the vertical scaling factor as
+ * (@src height) / (@dst height).
+ *
+ * If the calculated scaling factor is below @min_vscale,
+ * decrease the height of rectangle @dst to compensate.
+ *
+ * If the calculated scaling factor is above @max_vscale,
+ * decrease the height of rectangle @src to compensate.
+ *
+ * RETURNS:
+ * The vertical scaling factor.
+ */
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_vscale, int max_vscale)
+{
+	int src_h = drm_rect_height(src);
+	int dst_h = drm_rect_height(dst);
+	int vscale = drm_calc_scale(src_h, dst_h);
+
+	if (vscale < 0 || dst_h == 0)
+		return vscale;
+
+	if (vscale < min_vscale) {
+		int max_dst_h = src_h / min_vscale;
+
+		drm_rect_adjust_size(dst, 0, max_dst_h - dst_h);
+
+		return min_vscale;
+	}
+
+	if (vscale > max_vscale) {
+		int max_src_h = dst_h * max_vscale;
+
+		drm_rect_adjust_size(src, 0, max_src_h - src_h);
+
+		return max_vscale;
+	}
+
+	return vscale;
+}
+EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed);
+
+/**
+ * drm_rect_debug_print - print the rectangle information
+ * @r: rectangle to print
+ * @fixed_point: rectangle is in 16.16 fixed point format
+ */
+void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
+{
+	int w = drm_rect_width(r);
+	int h = drm_rect_height(r);
+
+	if (fixed_point)
+		DRM_DEBUG_KMS("%d.%06ux%d.%06u%+d.%06u%+d.%06u\n",
+			      w >> 16, ((w & 0xffff) * 15625) >> 10,
+			      h >> 16, ((h & 0xffff) * 15625) >> 10,
+			      r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10,
+			      r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10);
+	else
+		DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
+}
+EXPORT_SYMBOL(drm_rect_debug_print);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 16f3ec5..577786c 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -451,14 +451,8 @@
 
 	drm_lastclose(dev);
 
-	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
-	    dev->agp && dev->agp->agp_mtrr >= 0) {
-		int retval;
-		retval = mtrr_del(dev->agp->agp_mtrr,
-				  dev->agp->agp_info.aper_base,
-				  dev->agp->agp_info.aper_size * 1024 * 1024);
-		DRM_DEBUG("mtrr_del=%d\n", retval);
-	}
+	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
+		arch_phys_wc_del(dev->agp->agp_mtrr);
 
 	if (dev->driver->unload)
 		dev->driver->unload(dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1d4f7c9..feb2003 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -43,18 +43,19 @@
 static void drm_vm_open(struct vm_area_struct *vma);
 static void drm_vm_close(struct vm_area_struct *vma);
 
-static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+static pgprot_t drm_io_prot(struct drm_local_map *map,
+			    struct vm_area_struct *vma)
 {
 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 
 #if defined(__i386__) || defined(__x86_64__)
-	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
-		pgprot_val(tmp) |= _PAGE_PCD;
-		pgprot_val(tmp) &= ~_PAGE_PWT;
-	}
+	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
+		tmp = pgprot_noncached(tmp);
+	else
+		tmp = pgprot_writecombine(tmp);
 #elif defined(__powerpc__)
 	pgprot_val(tmp) |= _PAGE_NO_CACHE;
-	if (map_type == _DRM_REGISTERS)
+	if (map->type == _DRM_REGISTERS)
 		pgprot_val(tmp) |= _PAGE_GUARDED;
 #elif defined(__ia64__)
 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
@@ -250,13 +251,8 @@
 			switch (map->type) {
 			case _DRM_REGISTERS:
 			case _DRM_FRAME_BUFFER:
-				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
-					int retcode;
-					retcode = mtrr_del(map->mtrr,
-							   map->offset,
-							   map->size);
-					DRM_DEBUG("mtrr_del = %d\n", retcode);
-				}
+				if (drm_core_has_MTRR(dev))
+					arch_phys_wc_del(map->mtrr);
 				iounmap(map->handle);
 				break;
 			case _DRM_SHM:
@@ -617,8 +613,7 @@
 	case _DRM_FRAME_BUFFER:
 	case _DRM_REGISTERS:
 		offset = drm_core_get_reg_ofs(dev);
-		vma->vm_flags |= VM_IO;	/* not in core dump */
-		vma->vm_page_prot = drm_io_prot(map->type, vma);
+		vma->vm_page_prot = drm_io_prot(map, vma);
 		if (io_remap_pfn_range(vma, vma->vm_start,
 				       (map->offset + offset) >> PAGE_SHIFT,
 				       vma->vm_end - vma->vm_start,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index c200e4d..073c10a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -169,12 +169,6 @@
 	return 0;
 }
 
-static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
-{
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-	/* drm framework doesn't check NULL */
-}
-
 static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -192,7 +186,6 @@
 	.mode_fixup	= exynos_drm_crtc_mode_fixup,
 	.mode_set	= exynos_drm_crtc_mode_set,
 	.mode_set_base	= exynos_drm_crtc_mode_set_base,
-	.load_lut	= exynos_drm_crtc_load_lut,
 	.disable	= exynos_drm_crtc_disable,
 };
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91f3ac6..40034ec 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -36,6 +36,7 @@
 	  intel_overlay.o \
 	  intel_sprite.o \
 	  intel_opregion.o \
+	  intel_sideband.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7017.o \
 	  dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 3edd981..757e0fa 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -32,12 +32,14 @@
 #define CH7xxx_REG_DID		0x4b
 
 #define CH7011_VID		0x83 /* 7010 as well */
+#define CH7010B_VID		0x05
 #define CH7009A_VID		0x84
 #define CH7009B_VID		0x85
 #define CH7301_VID		0x95
 
 #define CH7xxx_VID		0x84
 #define CH7xxx_DID		0x17
+#define CH7010_DID		0x16
 
 #define CH7xxx_NUM_REGS		0x4c
 
@@ -87,11 +89,20 @@
 	char *name;
 } ch7xxx_ids[] = {
 	{ CH7011_VID, "CH7011" },
+	{ CH7010B_VID, "CH7010B" },
 	{ CH7009A_VID, "CH7009A" },
 	{ CH7009B_VID, "CH7009B" },
 	{ CH7301_VID, "CH7301" },
 };
 
+static struct ch7xxx_did_struct {
+	uint8_t did;
+	char *name;
+} ch7xxx_dids[] = {
+	{ CH7xxx_DID, "CH7XXX" },
+	{ CH7010_DID, "CH7010B" },
+};
+
 struct ch7xxx_priv {
 	bool quiet;
 };
@@ -108,6 +119,18 @@
 	return NULL;
 }
 
+static char *ch7xxx_get_did(uint8_t did)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
+		if (ch7xxx_dids[i].did == did)
+			return ch7xxx_dids[i].name;
+	}
+
+	return NULL;
+}
+
 /** Reads an 8 bit register */
 static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
@@ -179,7 +202,7 @@
 	/* this will detect the CH7xxx chip on the specified i2c bus */
 	struct ch7xxx_priv *ch7xxx;
 	uint8_t vendor, device;
-	char *name;
+	char *name, *devid;
 
 	ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
 	if (ch7xxx == NULL)
@@ -204,7 +227,8 @@
 	if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
 		goto out;
 
-	if (device != CH7xxx_DID) {
+	devid = ch7xxx_get_did(device);
+	if (!devid) {
 		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
 				"slave %d.\n",
 			  vendor, adapter->name, dvo->slave_addr);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e913d32..76255a6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -61,11 +61,11 @@
 
 	seq_printf(m, "gen: %d\n", info->gen);
 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
-#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
-#define DEV_INFO_SEP ;
-	DEV_INFO_FLAGS;
-#undef DEV_INFO_FLAG
-#undef DEV_INFO_SEP
+#define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
+#define SEP_SEMICOLON ;
+	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
+#undef PRINT_FLAG
+#undef SEP_SEMICOLON
 
 	return 0;
 }
@@ -570,6 +570,7 @@
 	case RCS: return "render";
 	case VCS: return "bsd";
 	case BCS: return "blt";
+	case VECS: return "vebox";
 	default: return "";
 	}
 }
@@ -604,15 +605,80 @@
 	return purgeable ? " purgeable" : "";
 }
 
-static void print_error_buffers(struct seq_file *m,
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+			       const char *f, va_list args)
+{
+	unsigned len;
+
+	if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+		e->err = -ENOSPC;
+		return;
+	}
+
+	if (e->bytes == e->size - 1 || e->err)
+		return;
+
+	/* Seek the first printf which is hits start position */
+	if (e->pos < e->start) {
+		len = vsnprintf(NULL, 0, f, args);
+		if (e->pos + len <= e->start) {
+			e->pos += len;
+			return;
+		}
+
+		/* First vsnprintf needs to fit in full for memmove*/
+		if (len >= e->size) {
+			e->err = -EIO;
+			return;
+		}
+	}
+
+	len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+	if (len >= e->size - e->bytes)
+		len = e->size - e->bytes - 1;
+
+	/* If this is first printf in this window, adjust it so that
+	 * start position matches start of the buffer
+	 */
+	if (e->pos < e->start) {
+		const size_t off = e->start - e->pos;
+
+		/* Should not happen but be paranoid */
+		if (off > len || e->bytes) {
+			e->err = -EIO;
+			return;
+		}
+
+		memmove(e->buf, e->buf + off, len - off);
+		e->bytes = len - off;
+		e->pos = e->start;
+		return;
+	}
+
+	e->bytes += len;
+	e->pos += len;
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+	va_list args;
+
+	va_start(args, f);
+	i915_error_vprintf(e, f, args);
+	va_end(args);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
 				const char *name,
 				struct drm_i915_error_buffer *err,
 				int count)
 {
-	seq_printf(m, "%s [%d]:\n", name, count);
+	err_printf(m, "%s [%d]:\n", name, count);
 
 	while (count--) {
-		seq_printf(m, "  %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
+		err_printf(m, "  %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
 			   err->gtt_offset,
 			   err->size,
 			   err->read_domains,
@@ -627,50 +693,50 @@
 			   cache_level_str(err->cache_level));
 
 		if (err->name)
-			seq_printf(m, " (name: %d)", err->name);
+			err_printf(m, " (name: %d)", err->name);
 		if (err->fence_reg != I915_FENCE_REG_NONE)
-			seq_printf(m, " (fence: %d)", err->fence_reg);
+			err_printf(m, " (fence: %d)", err->fence_reg);
 
-		seq_printf(m, "\n");
+		err_printf(m, "\n");
 		err++;
 	}
 }
 
-static void i915_ring_error_state(struct seq_file *m,
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 				  struct drm_device *dev,
 				  struct drm_i915_error_state *error,
 				  unsigned ring)
 {
 	BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
-	seq_printf(m, "%s command stream:\n", ring_str(ring));
-	seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
-	seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
-	seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
-	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
-	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
-	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
-	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+	err_printf(m, "%s command stream:\n", ring_str(ring));
+	err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
+	err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+	err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
+	err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
+	err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
+	err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
+	err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
-		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
+		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
 
 	if (INTEL_INFO(dev)->gen >= 4)
-		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
-	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
-	seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+		err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+	err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
+	err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
 	if (INTEL_INFO(dev)->gen >= 6) {
-		seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
-		seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
-		seq_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+		err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+		err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+		err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
 			   error->semaphore_mboxes[ring][0],
 			   error->semaphore_seqno[ring][0]);
-		seq_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+		err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
 			   error->semaphore_mboxes[ring][1],
 			   error->semaphore_seqno[ring][1]);
 	}
-	seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
-	seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
-	seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
-	seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+	err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+	err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
+	err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+	err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
 }
 
 struct i915_error_state_file_priv {
@@ -678,9 +744,11 @@
 	struct drm_i915_error_state *error;
 };
 
-static int i915_error_state(struct seq_file *m, void *unused)
+
+static int i915_error_state(struct i915_error_state_file_priv *error_priv,
+			    struct drm_i915_error_state_buf *m)
+
 {
-	struct i915_error_state_file_priv *error_priv = m->private;
 	struct drm_device *dev = error_priv->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_state *error = error_priv->error;
@@ -688,34 +756,35 @@
 	int i, j, page, offset, elt;
 
 	if (!error) {
-		seq_printf(m, "no error state collected\n");
+		err_printf(m, "no error state collected\n");
 		return 0;
 	}
 
-	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 		   error->time.tv_usec);
-	seq_printf(m, "Kernel: " UTS_RELEASE "\n");
-	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
-	seq_printf(m, "EIR: 0x%08x\n", error->eir);
-	seq_printf(m, "IER: 0x%08x\n", error->ier);
-	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-	seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
-	seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
-	seq_printf(m, "CCID: 0x%08x\n", error->ccid);
+	err_printf(m, "Kernel: " UTS_RELEASE "\n");
+	err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+	err_printf(m, "EIR: 0x%08x\n", error->eir);
+	err_printf(m, "IER: 0x%08x\n", error->ier);
+	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+	err_printf(m, "CCID: 0x%08x\n", error->ccid);
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
-		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
 	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
-		seq_printf(m, "  INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+		err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
+			   error->extra_instdone[i]);
 
 	if (INTEL_INFO(dev)->gen >= 6) {
-		seq_printf(m, "ERROR: 0x%08x\n", error->error);
-		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+		err_printf(m, "ERROR: 0x%08x\n", error->error);
+		err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
 	}
 
 	if (INTEL_INFO(dev)->gen == 7)
-		seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
 	for_each_ring(ring, dev_priv, i)
 		i915_ring_error_state(m, dev, error, i);
@@ -734,24 +803,25 @@
 		struct drm_i915_error_object *obj;
 
 		if ((obj = error->ring[i].batchbuffer)) {
-			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+			err_printf(m, "%s --- gtt_offset = 0x%08x\n",
 				   dev_priv->ring[i].name,
 				   obj->gtt_offset);
 			offset = 0;
 			for (page = 0; page < obj->page_count; page++) {
 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-					seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
+					err_printf(m, "%08x :  %08x\n", offset,
+						   obj->pages[page][elt]);
 					offset += 4;
 				}
 			}
 		}
 
 		if (error->ring[i].num_requests) {
-			seq_printf(m, "%s --- %d requests\n",
+			err_printf(m, "%s --- %d requests\n",
 				   dev_priv->ring[i].name,
 				   error->ring[i].num_requests);
 			for (j = 0; j < error->ring[i].num_requests; j++) {
-				seq_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
 					   error->ring[i].requests[j].seqno,
 					   error->ring[i].requests[j].jiffies,
 					   error->ring[i].requests[j].tail);
@@ -759,13 +829,13 @@
 		}
 
 		if ((obj = error->ring[i].ringbuffer)) {
-			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
 				   dev_priv->ring[i].name,
 				   obj->gtt_offset);
 			offset = 0;
 			for (page = 0; page < obj->page_count; page++) {
 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-					seq_printf(m, "%08x :  %08x\n",
+					err_printf(m, "%08x :  %08x\n",
 						   offset,
 						   obj->pages[page][elt]);
 					offset += 4;
@@ -775,12 +845,12 @@
 
 		obj = error->ring[i].ctx;
 		if (obj) {
-			seq_printf(m, "%s --- HW Context = 0x%08x\n",
+			err_printf(m, "%s --- HW Context = 0x%08x\n",
 				   dev_priv->ring[i].name,
 				   obj->gtt_offset);
 			offset = 0;
 			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
-				seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
+				err_printf(m, "[%04x] %08x %08x %08x %08x\n",
 					   offset,
 					   obj->pages[0][elt],
 					   obj->pages[0][elt+1],
@@ -806,8 +876,7 @@
 		       size_t cnt,
 		       loff_t *ppos)
 {
-	struct seq_file *m = filp->private_data;
-	struct i915_error_state_file_priv *error_priv = m->private;
+	struct i915_error_state_file_priv *error_priv = filp->private_data;
 	struct drm_device *dev = error_priv->dev;
 	int ret;
 
@@ -842,25 +911,81 @@
 		kref_get(&error_priv->error->ref);
 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
-	return single_open(file, i915_error_state, error_priv);
+	file->private_data = error_priv;
+
+	return 0;
 }
 
 static int i915_error_state_release(struct inode *inode, struct file *file)
 {
-	struct seq_file *m = file->private_data;
-	struct i915_error_state_file_priv *error_priv = m->private;
+	struct i915_error_state_file_priv *error_priv = file->private_data;
 
 	if (error_priv->error)
 		kref_put(&error_priv->error->ref, i915_error_state_free);
 	kfree(error_priv);
 
-	return single_release(inode, file);
+	return 0;
+}
+
+static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
+				     size_t count, loff_t *pos)
+{
+	struct i915_error_state_file_priv *error_priv = file->private_data;
+	struct drm_i915_error_state_buf error_str;
+	loff_t tmp_pos = 0;
+	ssize_t ret_count = 0;
+	int ret = 0;
+
+	memset(&error_str, 0, sizeof(error_str));
+
+	/* We need to have enough room to store any i915_error_state printf
+	 * so that we can move it to start position.
+	 */
+	error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+	error_str.buf = kmalloc(error_str.size,
+				GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+	if (error_str.buf == NULL) {
+		error_str.size = PAGE_SIZE;
+		error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
+	}
+
+	if (error_str.buf == NULL) {
+		error_str.size = 128;
+		error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
+	}
+
+	if (error_str.buf == NULL)
+		return -ENOMEM;
+
+	error_str.start = *pos;
+
+	ret = i915_error_state(error_priv, &error_str);
+	if (ret)
+		goto out;
+
+	if (error_str.bytes == 0 && error_str.err) {
+		ret = error_str.err;
+		goto out;
+	}
+
+	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
+					    error_str.buf,
+					    error_str.bytes);
+
+	if (ret_count < 0)
+		ret = ret_count;
+	else
+		*pos = error_str.start + ret_count;
+out:
+	kfree(error_str.buf);
+	return ret ?: ret_count;
 }
 
 static const struct file_operations i915_error_state_fops = {
 	.owner = THIS_MODULE,
 	.open = i915_error_state_open,
-	.read = seq_read,
+	.read = i915_error_state_read,
 	.write = i915_error_state_write,
 	.llseek = default_llseek,
 	.release = i915_error_state_release,
@@ -941,7 +1066,7 @@
 			   MEMSTAT_VID_SHIFT);
 		seq_printf(m, "Current P-state: %d\n",
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
-	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1009,6 +1134,26 @@
 
 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
 			   dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+	} else if (IS_VALLEYVIEW(dev)) {
+		u32 freq_sts, val;
+
+		mutex_lock(&dev_priv->rps.hw_lock);
+		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
+
+		val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
+		seq_printf(m, "max GPU freq: %d MHz\n",
+			   vlv_gpu_freq(dev_priv->mem_freq, val));
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
+		seq_printf(m, "min GPU freq: %d MHz\n",
+			   vlv_gpu_freq(dev_priv->mem_freq, val));
+
+		seq_printf(m, "current GPU freq: %d MHz\n",
+			   vlv_gpu_freq(dev_priv->mem_freq,
+					(freq_sts >> 8) & 0xff));
+		mutex_unlock(&dev_priv->rps.hw_lock);
 	} else {
 		seq_printf(m, "no P-state info available\n");
 	}
@@ -1290,6 +1435,25 @@
 	return 0;
 }
 
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!IS_ULT(dev)) {
+		seq_puts(m, "not supported\n");
+		return 0;
+	}
+
+	if (I915_READ(IPS_CTL) & IPS_ENABLE)
+		seq_puts(m, "enabled\n");
+	else
+		seq_puts(m, "disabled\n");
+
+	return 0;
+}
+
 static int i915_sr_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1642,27 +1806,27 @@
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
 	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_DIV_A));
+		   vlv_dpio_read(dev_priv, _DPIO_DIV_A));
 	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_DIV_B));
+		   vlv_dpio_read(dev_priv, _DPIO_DIV_B));
 
 	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
 	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
 
 	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
 	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
 
 	seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+		   vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
 	seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
-		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+		   vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
 
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-		   intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+		   vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1812,7 +1976,11 @@
 	if (ret)
 		return ret;
 
-	*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+	if (IS_VALLEYVIEW(dev))
+		*val = vlv_gpu_freq(dev_priv->mem_freq,
+				    dev_priv->rps.max_delay);
+	else
+		*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -1837,9 +2005,16 @@
 	/*
 	 * Turbo will still be enabled, but won't go above the set value.
 	 */
-	do_div(val, GT_FREQUENCY_MULTIPLIER);
-	dev_priv->rps.max_delay = val;
-	gen6_set_rps(dev, val);
+	if (IS_VALLEYVIEW(dev)) {
+		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		dev_priv->rps.max_delay = val;
+		gen6_set_rps(dev, val);
+	} else {
+		do_div(val, GT_FREQUENCY_MULTIPLIER);
+		dev_priv->rps.max_delay = val;
+		gen6_set_rps(dev, val);
+	}
+
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -1863,7 +2038,11 @@
 	if (ret)
 		return ret;
 
-	*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+	if (IS_VALLEYVIEW(dev))
+		*val = vlv_gpu_freq(dev_priv->mem_freq,
+				    dev_priv->rps.min_delay);
+	else
+		*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -1888,9 +2067,15 @@
 	/*
 	 * Turbo will still be enabled, but won't go below the set value.
 	 */
-	do_div(val, GT_FREQUENCY_MULTIPLIER);
-	dev_priv->rps.min_delay = val;
-	gen6_set_rps(dev, val);
+	if (IS_VALLEYVIEW(dev)) {
+		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+		dev_priv->rps.min_delay = val;
+		valleyview_set_rps(dev, val);
+	} else {
+		do_div(val, GT_FREQUENCY_MULTIPLIER);
+		dev_priv->rps.min_delay = val;
+		gen6_set_rps(dev, val);
+	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return 0;
@@ -2057,6 +2242,7 @@
 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
 	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
 	{"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -2066,6 +2252,7 @@
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
 	{"i915_gfxec", i915_gfxec, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
+	{"i915_ips_status", i915_ips_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
 	{"i915_opregion", i915_opregion, 0},
 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 3b315ba..c52d866 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -42,7 +42,6 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <acpi/video.h>
-#include <asm/pat.h>
 
 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
@@ -956,6 +955,9 @@
 	case I915_PARAM_HAS_BLT:
 		value = intel_ring_initialized(&dev_priv->ring[BCS]);
 		break;
+	case I915_PARAM_HAS_VEBOX:
+		value = intel_ring_initialized(&dev_priv->ring[VECS]);
+		break;
 	case I915_PARAM_HAS_RELAXED_FENCING:
 		value = 1;
 		break;
@@ -1359,8 +1361,10 @@
 cleanup_gem:
 	mutex_lock(&dev->struct_mutex);
 	i915_gem_cleanup_ringbuffer(dev);
+	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
+	drm_mm_takedown(&dev_priv->mm.gtt_space);
 cleanup_irq:
 	drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1397,29 +1401,6 @@
 	master->driver_priv = NULL;
 }
 
-static void
-i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
-		unsigned long size)
-{
-	dev_priv->mm.gtt_mtrr = -1;
-
-#if defined(CONFIG_X86_PAT)
-	if (cpu_has_pat)
-		return;
-#endif
-
-	/* Set up a WC MTRR for non-PAT systems.  This is more common than
-	 * one would think, because the kernel disables PAT on first
-	 * generation Core chips because WC PAT gets overridden by a UC
-	 * MTRR if present.  Even if a UC MTRR isn't present.
-	 */
-	dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
-	if (dev_priv->mm.gtt_mtrr < 0) {
-		DRM_INFO("MTRR allocation failed.  Graphics "
-			 "performance may suffer.\n");
-	}
-}
-
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
 	struct apertures_struct *ap;
@@ -1431,7 +1412,7 @@
 		return;
 
 	ap->ranges[0].base = dev_priv->gtt.mappable_base;
-	ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
+	ap->ranges[0].size = dev_priv->gtt.mappable_end;
 
 	primary =
 		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1445,15 +1426,19 @@
 {
 	const struct intel_device_info *info = dev_priv->info;
 
-#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
-#define DEV_INFO_SEP ,
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
 	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
-			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
 			 info->gen,
 			 dev_priv->dev->pdev->device,
-			 DEV_INFO_FLAGS);
-#undef DEV_INFO_FLAG
-#undef DEV_INFO_SEP
+			 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
 }
 
 /**
@@ -1468,7 +1453,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (IS_HASWELL(dev))
+	if (HAS_FPGA_DBG_UNCLAIMED(dev))
 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 }
 
@@ -1574,8 +1559,8 @@
 		goto out_rmmap;
 	}
 
-	i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
-			aperture_size);
+	dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+						 aperture_size);
 
 	/* The i915 workqueue is primarily used for batched retirement of
 	 * requests (and thus managing bo) once the task has been completed
@@ -1629,6 +1614,7 @@
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
 	spin_lock_init(&dev_priv->rps.lock);
+	spin_lock_init(&dev_priv->backlight.lock);
 	mutex_init(&dev_priv->dpio_lock);
 
 	mutex_init(&dev_priv->rps.hw_lock);
@@ -1679,12 +1665,7 @@
 	intel_teardown_mchbar(dev);
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
-	if (dev_priv->mm.gtt_mtrr >= 0) {
-		mtrr_del(dev_priv->mm.gtt_mtrr,
-			 dev_priv->gtt.mappable_base,
-			 aperture_size);
-		dev_priv->mm.gtt_mtrr = -1;
-	}
+	arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
 	io_mapping_free(dev_priv->gtt.mappable);
 	dev_priv->gtt.gtt_remove(dev);
 out_rmmap:
@@ -1719,12 +1700,7 @@
 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
 	io_mapping_free(dev_priv->gtt.mappable);
-	if (dev_priv->mm.gtt_mtrr >= 0) {
-		mtrr_del(dev_priv->mm.gtt_mtrr,
-			 dev_priv->gtt.mappable_base,
-			 dev_priv->gtt.mappable_end);
-		dev_priv->mm.gtt_mtrr = -1;
-	}
+	arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
 
 	acpi_video_unregister();
 
@@ -1737,10 +1713,10 @@
 		 * free the memory space allocated for the child device
 		 * config parsed from VBT
 		 */
-		if (dev_priv->child_dev && dev_priv->child_dev_num) {
-			kfree(dev_priv->child_dev);
-			dev_priv->child_dev = NULL;
-			dev_priv->child_dev_num = 0;
+		if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+			kfree(dev_priv->vbt.child_dev);
+			dev_priv->vbt.child_dev = NULL;
+			dev_priv->vbt.child_dev_num = 0;
 		}
 
 		vga_switcheroo_unregister_client(dev->pdev);
@@ -1773,6 +1749,7 @@
 			i915_free_hws(dev);
 	}
 
+	drm_mm_takedown(&dev_priv->mm.gtt_space);
 	if (dev_priv->regs != NULL)
 		pci_iounmap(dev->pdev, dev_priv->regs);
 
@@ -1782,6 +1759,8 @@
 	destroy_workqueue(dev_priv->wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
 
+	dev_priv->gtt.gtt_remove(dev);
+
 	if (dev_priv->slab)
 		kmem_cache_destroy(dev_priv->slab);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a2e4953..59ff745 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -128,6 +128,10 @@
 MODULE_PARM_DESC(disable_power_well,
 		 "Disable the power well when possible (default: false)");
 
+int i915_enable_ips __read_mostly = 1;
+module_param_named(enable_ips, i915_enable_ips, int, 0600);
+MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
@@ -280,6 +284,7 @@
 	GEN7_FEATURES,
 	.is_ivybridge = 1,
 	.is_mobile = 1,
+	.has_fbc = 1,
 };
 
 static const struct intel_device_info intel_ivybridge_q_info = {
@@ -308,12 +313,19 @@
 static const struct intel_device_info intel_haswell_d_info = {
 	GEN7_FEATURES,
 	.is_haswell = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_vebox_ring = 1,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
 	GEN7_FEATURES,
 	.is_haswell = 1,
 	.is_mobile = 1,
+	.has_ddi = 1,
+	.has_fpga_dbg = 1,
+	.has_fbc = 1,
+	.has_vebox_ring = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {		/* aka */
@@ -549,6 +561,8 @@
 		 */
 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
 			dev_priv->display.crtc_disable(crtc);
+
+		intel_modeset_suspend_hw(dev);
 	}
 
 	i915_save_state(dev);
@@ -855,37 +869,14 @@
 
 int intel_gpu_reset(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = -ENODEV;
-
 	switch (INTEL_INFO(dev)->gen) {
 	case 7:
-	case 6:
-		ret = gen6_do_reset(dev);
-		break;
-	case 5:
-		ret = ironlake_do_reset(dev);
-		break;
-	case 4:
-		ret = i965_do_reset(dev);
-		break;
-	case 2:
-		ret = i8xx_do_reset(dev);
-		break;
+	case 6: return gen6_do_reset(dev);
+	case 5: return ironlake_do_reset(dev);
+	case 4: return i965_do_reset(dev);
+	case 2: return i8xx_do_reset(dev);
+	default: return -ENODEV;
 	}
-
-	/* Also reset the gpu hangman. */
-	if (dev_priv->gpu_error.stop_rings) {
-		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-		dev_priv->gpu_error.stop_rings = 0;
-		if (ret == -ENODEV) {
-			DRM_ERROR("Reset not implemented, but ignoring "
-				  "error for simulated gpu hangs\n");
-			ret = 0;
-		}
-	}
-
-	return ret;
 }
 
 /**
@@ -906,6 +897,7 @@
 int i915_reset(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	bool simulated;
 	int ret;
 
 	if (!i915_try_reset)
@@ -915,13 +907,26 @@
 
 	i915_gem_reset(dev);
 
-	ret = -ENODEV;
-	if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
+	simulated = dev_priv->gpu_error.stop_rings != 0;
+
+	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-	else
+		ret = -ENODEV;
+	} else {
 		ret = intel_gpu_reset(dev);
 
-	dev_priv->gpu_error.last_reset = get_seconds();
+		/* Also reset the gpu hangman. */
+		if (simulated) {
+			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+			dev_priv->gpu_error.stop_rings = 0;
+			if (ret == -ENODEV) {
+				DRM_ERROR("Reset not implemented, but ignoring "
+					  "error for simulated gpu hangs\n");
+				ret = 0;
+			}
+		} else
+			dev_priv->gpu_error.last_reset = get_seconds();
+	}
 	if (ret) {
 		DRM_ERROR("Failed to reset chip.\n");
 		mutex_unlock(&dev->struct_mutex);
@@ -984,12 +989,6 @@
 	struct intel_device_info *intel_info =
 		(struct intel_device_info *) ent->driver_data;
 
-	if (intel_info->is_valleyview)
-		if(!i915_preliminary_hw_support) {
-			DRM_ERROR("Preliminary hardware support disabled\n");
-			return -ENODEV;
-		}
-
 	/* Only bind to function 0 of the device. Early generations
 	 * used function 1 as a placeholder for multi-head. This causes
 	 * us confusion instead, especially on the systems where both
@@ -1218,16 +1217,16 @@
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
 {
-	/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
-	 * chip from rc6 before touching it for real. MI_MODE is masked, hence
-	 * harmless to write 0 into. */
+	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+	 * the chip from rc6 before touching it for real. MI_MODE is masked,
+	 * hence harmless to write 0 into. */
 	I915_WRITE_NOTRACE(MI_MODE, 0);
 }
 
 static void
 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
 {
-	if (IS_HASWELL(dev_priv->dev) &&
+	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
 		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
 			  reg);
@@ -1238,7 +1237,7 @@
 static void
 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
 {
-	if (IS_HASWELL(dev_priv->dev) &&
+	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
 		DRM_ERROR("Unclaimed write to %x\n", reg);
 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9d00dc..359a200 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -76,6 +76,8 @@
 };
 #define plane_name(p) ((p) + 'A')
 
+#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
+
 enum port {
 	PORT_A = 0,
 	PORT_B,
@@ -86,6 +88,24 @@
 };
 #define port_name(p) ((p) + 'A')
 
+enum intel_display_power_domain {
+	POWER_DOMAIN_PIPE_A,
+	POWER_DOMAIN_PIPE_B,
+	POWER_DOMAIN_PIPE_C,
+	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+	POWER_DOMAIN_TRANSCODER_A,
+	POWER_DOMAIN_TRANSCODER_B,
+	POWER_DOMAIN_TRANSCODER_C,
+	POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+
 enum hpd_pin {
 	HPD_NONE = 0,
 	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
@@ -295,9 +315,8 @@
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
 	void (*update_wm)(struct drm_device *dev);
 	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
-				 uint32_t sprite_width, int pixel_size);
-	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
-				 struct drm_display_mode *mode);
+				 uint32_t sprite_width, int pixel_size,
+				 bool enable);
 	void (*modeset_global_resources)(struct drm_device *dev);
 	/* Returns the active state of the crtc, and if the crtc is active,
 	 * fills out the pipe-config with the hw state. */
@@ -331,68 +350,56 @@
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
 };
 
-#define DEV_INFO_FLAGS \
-	DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
-	DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
-	DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
-	DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
-	DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
-	DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
-	DEV_INFO_FLAG(has_llc)
+#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
+	func(is_mobile) sep \
+	func(is_i85x) sep \
+	func(is_i915g) sep \
+	func(is_i945gm) sep \
+	func(is_g33) sep \
+	func(need_gfx_hws) sep \
+	func(is_g4x) sep \
+	func(is_pineview) sep \
+	func(is_broadwater) sep \
+	func(is_crestline) sep \
+	func(is_ivybridge) sep \
+	func(is_valleyview) sep \
+	func(is_haswell) sep \
+	func(has_force_wake) sep \
+	func(has_fbc) sep \
+	func(has_pipe_cxsr) sep \
+	func(has_hotplug) sep \
+	func(cursor_needs_physical) sep \
+	func(has_overlay) sep \
+	func(overlay_needs_physical) sep \
+	func(supports_tv) sep \
+	func(has_bsd_ring) sep \
+	func(has_blt_ring) sep \
+	func(has_vebox_ring) sep \
+	func(has_llc) sep \
+	func(has_ddi) sep \
+	func(has_fpga_dbg)
+
+#define DEFINE_FLAG(name) u8 name:1
+#define SEP_SEMICOLON ;
 
 struct intel_device_info {
 	u32 display_mmio_offset;
 	u8 num_pipes:3;
 	u8 gen;
-	u8 is_mobile:1;
-	u8 is_i85x:1;
-	u8 is_i915g:1;
-	u8 is_i945gm:1;
-	u8 is_g33:1;
-	u8 need_gfx_hws:1;
-	u8 is_g4x:1;
-	u8 is_pineview:1;
-	u8 is_broadwater:1;
-	u8 is_crestline:1;
-	u8 is_ivybridge:1;
-	u8 is_valleyview:1;
-	u8 has_force_wake:1;
-	u8 is_haswell:1;
-	u8 has_fbc:1;
-	u8 has_pipe_cxsr:1;
-	u8 has_hotplug:1;
-	u8 cursor_needs_physical:1;
-	u8 has_overlay:1;
-	u8 overlay_needs_physical:1;
-	u8 supports_tv:1;
-	u8 has_bsd_ring:1;
-	u8 has_blt_ring:1;
-	u8 has_llc:1;
+	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 };
 
+#undef DEFINE_FLAG
+#undef SEP_SEMICOLON
+
 enum i915_cache_level {
 	I915_CACHE_NONE = 0,
 	I915_CACHE_LLC,
 	I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
 };
 
+typedef uint32_t gen6_gtt_pte_t;
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
@@ -428,6 +435,9 @@
 				   struct sg_table *st,
 				   unsigned int pg_start,
 				   enum i915_cache_level cache_level);
+	gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
+				     dma_addr_t addr,
+				     enum i915_cache_level level);
 };
 #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
 
@@ -449,6 +459,9 @@
 			       struct sg_table *st,
 			       unsigned int pg_start,
 			       enum i915_cache_level cache_level);
+	gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
+				     dma_addr_t addr,
+				     enum i915_cache_level level);
 	int (*enable)(struct drm_device *dev);
 	void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
 };
@@ -457,6 +470,7 @@
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_ID 0
 struct i915_hw_context {
+	struct kref ref;
 	int id;
 	bool is_initialized;
 	struct drm_i915_file_private *file_priv;
@@ -658,6 +672,7 @@
 
 struct intel_gen6_power_mgmt {
 	struct work_struct work;
+	struct delayed_work vlv_work;
 	u32 pm_iir;
 	/* lock - irqsave spinlock that protectects the work_struct and
 	 * pm_iir. */
@@ -668,6 +683,7 @@
 	u8 cur_delay;
 	u8 min_delay;
 	u8 max_delay;
+	u8 rpe_delay;
 	u8 hw_max;
 
 	struct delayed_work delayed_resume_work;
@@ -812,14 +828,21 @@
 	u32 object_count;
 };
 
+struct drm_i915_error_state_buf {
+	unsigned bytes;
+	unsigned size;
+	int err;
+	u8 *buf;
+	loff_t start;
+	loff_t pos;
+};
+
 struct i915_gpu_error {
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
-	uint32_t last_acthd[I915_NUM_RINGS];
-	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
 	/* For reset and error_state handling. */
 	spinlock_t lock;
@@ -875,6 +898,37 @@
 	MODESET_SUSPENDED,
 };
 
+struct intel_vbt_data {
+	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+	/* Feature bits */
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	unsigned int display_clock_mode:1;
+	unsigned int fdi_rx_polarity_inverted:1;
+	int lvds_ssc_freq;
+	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+
+	/* eDP */
+	int edp_rate;
+	int edp_lanes;
+	int edp_preemphasis;
+	int edp_vswing;
+	bool edp_initialized;
+	bool edp_support;
+	int edp_bpp;
+	struct edp_power_seq edp_pps;
+
+	int crt_ddc_pin;
+
+	int child_dev_num;
+	struct child_device_config *child_dev;
+};
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *slab;
@@ -941,6 +995,7 @@
 			HPD_MARK_DISABLED = 2
 		} hpd_mark;
 	} hpd_stats[HPD_NUM_PINS];
+	u32 hpd_event_bits;
 	struct timer_list hotplug_reenable_timer;
 
 	int num_pch_pll;
@@ -953,6 +1008,7 @@
 	struct intel_fbc_work *fbc_work;
 
 	struct intel_opregion opregion;
+	struct intel_vbt_data vbt;
 
 	/* overlay */
 	struct intel_overlay *overlay;
@@ -962,37 +1018,15 @@
 	struct {
 		int level;
 		bool enabled;
+		spinlock_t lock; /* bl registers and the above bl fields */
 		struct backlight_device *device;
 	} backlight;
 
 	/* LVDS info */
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
-	/* Feature bits from the VBIOS */
-	unsigned int int_tv_support:1;
-	unsigned int lvds_dither:1;
-	unsigned int lvds_vbt:1;
-	unsigned int int_crt_support:1;
-	unsigned int lvds_use_ssc:1;
-	unsigned int display_clock_mode:1;
-	unsigned int fdi_rx_polarity_inverted:1;
-	int lvds_ssc_freq;
-	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
-	struct {
-		int rate;
-		int lanes;
-		int preemphasis;
-		int vswing;
-
-		bool initialized;
-		bool support;
-		int bpp;
-		struct edp_power_seq pps;
-	} edp;
 	bool no_aux_handshake;
 
-	int crt_ddc_pin;
 	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -1020,10 +1054,6 @@
 	/* Kernel Modesetting */
 
 	struct sdvo_device_mapping sdvo_mappings[2];
-	/* indicate whether the LVDS_BORDER should be enabled or not */
-	unsigned int lvds_border_bits;
-	/* Panel fitter placement and size for Ironlake+ */
-	u32 pch_pf_pos, pch_pf_size;
 
 	struct drm_crtc *plane_to_crtc_mapping[3];
 	struct drm_crtc *pipe_to_crtc_mapping[3];
@@ -1038,8 +1068,6 @@
 	/* indicates the reduced downclock for LVDS*/
 	int lvds_downclock;
 	u16 orig_clock;
-	int child_dev_num;
-	struct child_device_config *child_dev;
 
 	bool mchbar_need_disable;
 
@@ -1059,6 +1087,8 @@
 
 	struct i915_gpu_error gpu_error;
 
+	struct drm_i915_gem_object *vlv_pctx;
+
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 
@@ -1274,6 +1304,9 @@
 	/** Postion in the ringbuffer of the end of the request */
 	u32 tail;
 
+	/** Context related to this request */
+	struct i915_hw_context *ctx;
+
 	/** Time at which this request was emitted, in jiffies. */
 	unsigned long emitted_jiffies;
 
@@ -1341,6 +1374,7 @@
 
 #define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
 #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
@@ -1373,8 +1407,9 @@
 
 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
-#define HAS_DDI(dev)		(IS_HASWELL(dev))
+#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
 #define HAS_POWER_WELL(dev)	(IS_HASWELL(dev))
+#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
 
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
@@ -1435,6 +1470,7 @@
 extern int i915_enable_ppgtt __read_mostly;
 extern unsigned int i915_preliminary_hw_support __read_mostly;
 extern int i915_disable_power_well __read_mostly;
+extern int i915_enable_ips __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -1486,8 +1522,6 @@
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
-void intel_enable_asle(struct drm_device *dev);
-
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
 #else
@@ -1703,6 +1737,17 @@
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct intel_ring_buffer *ring,
 			struct drm_file *file, int to_id);
+void i915_gem_context_free(struct kref *ctx_ref);
+static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
+{
+	kref_get(&ctx->ref);
+}
+
+static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
+{
+	kref_put(&ctx->ref, i915_gem_context_free);
+}
+
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file);
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1784,6 +1829,8 @@
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+__printf(2, 3)
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
@@ -1800,7 +1847,7 @@
 /* intel_i2c.c */
 extern int intel_setup_gmbus(struct drm_device *dev);
 extern void intel_teardown_gmbus(struct drm_device *dev);
-extern inline bool intel_gmbus_is_port_valid(unsigned port)
+static inline bool intel_gmbus_is_port_valid(unsigned port)
 {
 	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
 }
@@ -1809,7 +1856,7 @@
 		struct drm_i915_private *dev_priv, unsigned port);
 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 {
 	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
 }
@@ -1821,14 +1868,10 @@
 extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
 extern void intel_opregion_asle_intr(struct drm_device *dev);
-extern void intel_opregion_gse_intr(struct drm_device *dev);
-extern void intel_opregion_enable_asle(struct drm_device *dev);
 #else
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 
 /* intel_acpi.c */
@@ -1842,6 +1885,7 @@
 
 /* modesetting */
 extern void intel_modeset_init_hw(struct drm_device *dev);
+extern void intel_modeset_suspend_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1854,6 +1898,9 @@
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
+extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 extern int intel_enable_rc6(const struct drm_device *dev);
@@ -1865,10 +1912,11 @@
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
-extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
+					    struct intel_overlay_error_state *error);
 
 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
-extern void intel_display_print_error_state(struct seq_file *m,
+extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 					    struct drm_device *dev,
 					    struct intel_display_error_state *error);
 #endif
@@ -1883,8 +1931,20 @@
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
-int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
-int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+
+/* intel_sideband.c */
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
+void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+		   enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+		     enum intel_sbi_destination destination);
+
+int vlv_gpu_freq(int ddr_freq, int val);
+int vlv_freq_opcode(int ddr_freq, int val);
 
 #define __i915_read(x, y) \
 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 970ad17..e5b6a92 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2039,6 +2039,11 @@
 	request->seqno = intel_ring_get_seqno(ring);
 	request->ring = ring;
 	request->tail = request_ring_position;
+	request->ctx = ring->last_context;
+
+	if (request->ctx)
+		i915_gem_context_reference(request->ctx);
+
 	request->emitted_jiffies = jiffies;
 	was_empty = list_empty(&ring->request_list);
 	list_add_tail(&request->list, &ring->request_list);
@@ -2091,6 +2096,17 @@
 	spin_unlock(&file_priv->mm.lock);
 }
 
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
+{
+	list_del(&request->list);
+	i915_gem_request_remove_from_client(request);
+
+	if (request->ctx)
+		i915_gem_context_unreference(request->ctx);
+
+	kfree(request);
+}
+
 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
 				      struct intel_ring_buffer *ring)
 {
@@ -2101,9 +2117,7 @@
 					   struct drm_i915_gem_request,
 					   list);
 
-		list_del(&request->list);
-		i915_gem_request_remove_from_client(request);
-		kfree(request);
+		i915_gem_free_request(request);
 	}
 
 	while (!list_empty(&ring->active_list)) {
@@ -2195,9 +2209,7 @@
 		 */
 		ring->last_retired_head = request->tail;
 
-		list_del(&request->list);
-		i915_gem_request_remove_from_client(request);
-		kfree(request);
+		i915_gem_free_request(request);
 	}
 
 	/* Move any buffers on the active list that are no longer referenced
@@ -2678,18 +2690,33 @@
 	return fence - dev_priv->fence_regs;
 }
 
+struct write_fence {
+	struct drm_device *dev;
+	struct drm_i915_gem_object *obj;
+	int fence;
+};
+
 static void i915_gem_write_fence__ipi(void *data)
 {
+	struct write_fence *args = data;
+
+	/* Required for SNB+ with LLC */
 	wbinvd();
+
+	/* Required for VLV */
+	i915_gem_write_fence(args->dev, args->fence, args->obj);
 }
 
 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 					 struct drm_i915_fence_reg *fence,
 					 bool enable)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fence_reg = fence_number(dev_priv, fence);
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct write_fence args = {
+		.dev = obj->base.dev,
+		.fence = fence_number(dev_priv, fence),
+		.obj = enable ? obj : NULL,
+	};
 
 	/* In order to fully serialize access to the fenced region and
 	 * the update to the fence register we need to take extreme
@@ -2700,13 +2727,19 @@
 	 * SNB+ we need to take a step further and emit an explicit wbinvd()
 	 * on each processor in order to manually flush all memory
 	 * transactions before updating the fence register.
+	 *
+	 * However, Valleyview complicates matter. There the wbinvd is
+	 * insufficient and unlike SNB/IVB requires the serialising
+	 * register write. (Note that that register write by itself is
+	 * conversely not sufficient for SNB+.) To compromise, we do both.
 	 */
-	if (HAS_LLC(obj->base.dev))
-		on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
-	i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
+	if (INTEL_INFO(args.dev)->gen >= 6)
+		on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
+	else
+		i915_gem_write_fence(args.dev, args.fence, args.obj);
 
 	if (enable) {
-		obj->fence_reg = fence_reg;
+		obj->fence_reg = args.fence;
 		fence->obj = obj;
 		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
 	} else {
@@ -2932,6 +2965,8 @@
 	struct drm_mm_node *node;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
 	bool mappable, fenceable;
+	size_t gtt_max = map_and_fenceable ?
+		dev_priv->gtt.mappable_end : dev_priv->gtt.total;
 	int ret;
 
 	fence_size = i915_gem_get_gtt_size(dev,
@@ -2958,9 +2993,11 @@
 	/* If the object is bigger than the entire aperture, reject it early
 	 * before evicting everything in a vain attempt to find space.
 	 */
-	if (obj->base.size >
-	    (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
-		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+	if (obj->base.size > gtt_max) {
+		DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n",
+			  obj->base.size,
+			  map_and_fenceable ? "mappable" : "total",
+			  gtt_max);
 		return -E2BIG;
 	}
 
@@ -2976,14 +3013,10 @@
 		return -ENOMEM;
 	}
 
- search_free:
-	if (map_and_fenceable)
-		ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
-							  size, alignment, obj->cache_level,
-							  0, dev_priv->gtt.mappable_end);
-	else
-		ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
-						 size, alignment, obj->cache_level);
+search_free:
+	ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+						  size, alignment,
+						  obj->cache_level, 0, gtt_max);
 	if (ret) {
 		ret = i915_gem_evict_something(dev, size, alignment,
 					       obj->cache_level,
@@ -3977,12 +4010,21 @@
 			goto cleanup_bsd_ring;
 	}
 
+	if (HAS_VEBOX(dev)) {
+		ret = intel_init_vebox_ring_buffer(dev);
+		if (ret)
+			goto cleanup_blt_ring;
+	}
+
+
 	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
 	if (ret)
-		goto cleanup_blt_ring;
+		goto cleanup_vebox_ring;
 
 	return 0;
 
+cleanup_vebox_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
 	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
 cleanup_bsd_ring:
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a1e8ecb..39bcc08 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -124,10 +124,10 @@
 	return ret;
 }
 
-static void do_destroy(struct i915_hw_context *ctx)
+void i915_gem_context_free(struct kref *ctx_ref)
 {
-	if (ctx->file_priv)
-		idr_remove(&ctx->file_priv->context_idr, ctx->id);
+	struct i915_hw_context *ctx = container_of(ctx_ref,
+						   typeof(*ctx), ref);
 
 	drm_gem_object_unreference(&ctx->obj->base);
 	kfree(ctx);
@@ -145,6 +145,7 @@
 	if (ctx == NULL)
 		return ERR_PTR(-ENOMEM);
 
+	kref_init(&ctx->ref);
 	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
 	if (ctx->obj == NULL) {
 		kfree(ctx);
@@ -155,7 +156,8 @@
 	if (INTEL_INFO(dev)->gen >= 7) {
 		ret = i915_gem_object_set_cache_level(ctx->obj,
 						      I915_CACHE_LLC_MLC);
-		if (ret)
+		/* Failure shouldn't ever happen this early */
+		if (WARN_ON(ret))
 			goto err_out;
 	}
 
@@ -169,18 +171,18 @@
 	if (file_priv == NULL)
 		return ctx;
 
-	ctx->file_priv = file_priv;
-
 	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
 			GFP_KERNEL);
 	if (ret < 0)
 		goto err_out;
+
+	ctx->file_priv = file_priv;
 	ctx->id = ret;
 
 	return ctx;
 
 err_out:
-	do_destroy(ctx);
+	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
 }
 
@@ -213,12 +215,16 @@
 	 */
 	dev_priv->ring[RCS].default_context = ctx;
 	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
-	if (ret)
+	if (ret) {
+		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
 		goto err_destroy;
+	}
 
 	ret = do_switch(ctx);
-	if (ret)
+	if (ret) {
+		DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
 		goto err_unpin;
+	}
 
 	DRM_DEBUG_DRIVER("Default HW context loaded\n");
 	return 0;
@@ -226,7 +232,7 @@
 err_unpin:
 	i915_gem_object_unpin(ctx->obj);
 err_destroy:
-	do_destroy(ctx);
+	i915_gem_context_unreference(ctx);
 	return ret;
 }
 
@@ -236,6 +242,7 @@
 
 	if (!HAS_HW_CONTEXTS(dev)) {
 		dev_priv->hw_contexts_disabled = true;
+		DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
 		return;
 	}
 
@@ -248,11 +255,13 @@
 
 	if (dev_priv->hw_context_size > (1<<20)) {
 		dev_priv->hw_contexts_disabled = true;
+		DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
 		return;
 	}
 
 	if (create_default_context(dev_priv)) {
 		dev_priv->hw_contexts_disabled = true;
+		DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
 		return;
 	}
 
@@ -262,6 +271,7 @@
 void i915_gem_context_fini(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
 
 	if (dev_priv->hw_contexts_disabled)
 		return;
@@ -271,9 +281,16 @@
 	 * other code, leading to spurious errors. */
 	intel_gpu_reset(dev);
 
-	i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
+	i915_gem_object_unpin(dctx->obj);
 
-	do_destroy(dev_priv->ring[RCS].default_context);
+	/* When default context is created and switched to, base object refcount
+	 * will be 2 (+1 from object creation and +1 from do_switch()).
+	 * i915_gem_context_fini() will be called after gpu_idle() has switched
+	 * to default context. So we need to unreference the base object once
+	 * to offset the do_switch part, so that i915_gem_context_unreference()
+	 * can then free the base object correctly. */
+	drm_gem_object_unreference(&dctx->obj->base);
+	i915_gem_context_unreference(dctx);
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -282,8 +299,7 @@
 
 	BUG_ON(id == DEFAULT_CONTEXT_ID);
 
-	do_destroy(ctx);
-
+	i915_gem_context_unreference(ctx);
 	return 0;
 }
 
@@ -325,6 +341,7 @@
 	if (ret)
 		return ret;
 
+	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
 	if (IS_GEN7(ring->dev))
 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 	else
@@ -353,13 +370,13 @@
 static int do_switch(struct i915_hw_context *to)
 {
 	struct intel_ring_buffer *ring = to->ring;
-	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
+	struct i915_hw_context *from = ring->last_context;
 	u32 hw_flags = 0;
 	int ret;
 
-	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
+	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
-	if (from_obj == to->obj)
+	if (from == to)
 		return 0;
 
 	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -382,7 +399,7 @@
 
 	if (!to->is_initialized || is_default_context(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
-	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
+	else if (WARN_ON_ONCE(from == to)) /* not yet expected */
 		hw_flags |= MI_FORCE_RESTORE;
 
 	ret = mi_set_context(ring, to, hw_flags);
@@ -397,9 +414,9 @@
 	 * is a bit suboptimal because the retiring can occur simply after the
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
-	if (from_obj != NULL) {
-		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_gem_object_move_to_active(from_obj, ring);
+	if (from != NULL) {
+		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+		i915_gem_object_move_to_active(from->obj, ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -407,15 +424,26 @@
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		from_obj->dirty = 1;
-		BUG_ON(from_obj->ring != ring);
-		i915_gem_object_unpin(from_obj);
+		from->obj->dirty = 1;
+		BUG_ON(from->obj->ring != ring);
 
-		drm_gem_object_unreference(&from_obj->base);
+		ret = i915_add_request(ring, NULL, NULL);
+		if (ret) {
+			/* Too late, we've already scheduled a context switch.
+			 * Try to undo the change so that the hw state is
+			 * consistent with out tracking. In case of emergency,
+			 * scream.
+			 */
+			WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
+			return ret;
+		}
+
+		i915_gem_object_unpin(from->obj);
+		i915_gem_context_unreference(from);
 	}
 
-	drm_gem_object_reference(&to->obj->base);
-	ring->last_context_obj = to->obj;
+	i915_gem_context_reference(to);
+	ring->last_context = to;
 	to->is_initialized = true;
 
 	return 0;
@@ -444,6 +472,8 @@
 	if (dev_priv->hw_contexts_disabled)
 		return 0;
 
+	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
 	if (ring != &dev_priv->ring[RCS])
 		return 0;
 
@@ -512,8 +542,8 @@
 		return -ENOENT;
 	}
 
-	do_destroy(ctx);
-
+	idr_remove(&ctx->file_priv->context_idr, ctx->id);
+	i915_gem_context_unreference(ctx);
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 117ce38..a8bb62c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -885,6 +885,15 @@
 			return -EPERM;
 		}
 		break;
+	case I915_EXEC_VEBOX:
+		ring = &dev_priv->ring[VECS];
+		if (ctx_id != 0) {
+			DRM_DEBUG("Ring %s doesn't support contexts\n",
+				  ring->name);
+			return -EPERM;
+		}
+		break;
+
 	default:
 		DRM_DEBUG("execbuf with unknown ring: %d\n",
 			  (int)(args->flags & I915_EXEC_RING_MASK));
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index bdb0d771..ddad13f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,6 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
-typedef uint32_t gen6_gtt_pte_t;
-
 /* PPGTT stuff */
 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
 
@@ -44,29 +42,22 @@
 #define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
 
-static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
-					     dma_addr_t addr,
-					     enum i915_cache_level level)
+static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+				      dma_addr_t addr,
+				      enum i915_cache_level level)
 {
 	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
 	switch (level) {
 	case I915_CACHE_LLC_MLC:
-		/* Haswell doesn't set L3 this way */
-		if (IS_HASWELL(dev))
-			pte |= GEN6_PTE_CACHE_LLC;
-		else
-			pte |= GEN6_PTE_CACHE_LLC_MLC;
+		pte |= GEN6_PTE_CACHE_LLC_MLC;
 		break;
 	case I915_CACHE_LLC:
 		pte |= GEN6_PTE_CACHE_LLC;
 		break;
 	case I915_CACHE_NONE:
-		if (IS_HASWELL(dev))
-			pte |= HSW_PTE_UNCACHED;
-		else
-			pte |= GEN6_PTE_UNCACHED;
+		pte |= GEN6_PTE_UNCACHED;
 		break;
 	default:
 		BUG();
@@ -75,16 +66,48 @@
 	return pte;
 }
 
-static int gen6_ppgtt_enable(struct drm_device *dev)
+#define BYT_PTE_WRITEABLE		(1 << 1)
+#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
+
+static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
+				     dma_addr_t addr,
+				     enum i915_cache_level level)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t pd_offset;
-	struct intel_ring_buffer *ring;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	/* Mark the page as writeable.  Other platforms don't have a
+	 * setting for read-only/writable, so this matches that behavior.
+	 */
+	pte |= BYT_PTE_WRITEABLE;
+
+	if (level != I915_CACHE_NONE)
+		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+	return pte;
+}
+
+static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
+				     dma_addr_t addr,
+				     enum i915_cache_level level)
+{
+	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	if (level != I915_CACHE_NONE)
+		pte |= GEN6_PTE_CACHE_LLC;
+
+	return pte;
+}
+
+static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
+{
+	struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
 	gen6_gtt_pte_t __iomem *pd_addr;
 	uint32_t pd_entry;
 	int i;
 
+	WARN_ON(ppgtt->pd_offset & 0x3f);
 	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
 		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
 	for (i = 0; i < ppgtt->num_pd_entries; i++) {
@@ -97,6 +120,19 @@
 		writel(pd_entry, pd_addr + i);
 	}
 	readl(pd_addr);
+}
+
+static int gen6_ppgtt_enable(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t pd_offset;
+	struct intel_ring_buffer *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	int i;
+
+	BUG_ON(ppgtt->pd_offset & 0x3f);
+
+	gen6_write_pdes(ppgtt);
 
 	pd_offset = ppgtt->pd_offset;
 	pd_offset /= 64; /* in cachelines, */
@@ -154,9 +190,9 @@
 	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 	unsigned last_pte, i;
 
-	scratch_pte = gen6_pte_encode(ppgtt->dev,
-				      ppgtt->scratch_page_dma_addr,
-				      I915_CACHE_LLC);
+	scratch_pte = ppgtt->pte_encode(ppgtt->dev,
+					ppgtt->scratch_page_dma_addr,
+					I915_CACHE_LLC);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
@@ -191,8 +227,8 @@
 		dma_addr_t page_addr;
 
 		page_addr = sg_page_iter_dma_address(&sg_iter);
-		pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
-						    cache_level);
+		pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
+						      cache_level);
 		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
 			kunmap_atomic(pt_vaddr);
 			act_pt++;
@@ -233,8 +269,15 @@
 	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
 	 * entries. For aliasing ppgtt support we just steal them at the end for
 	 * now. */
-       first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+	first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
 
+	if (IS_HASWELL(dev)) {
+		ppgtt->pte_encode = hsw_pte_encode;
+	} else if (IS_VALLEYVIEW(dev)) {
+		ppgtt->pte_encode = byt_pte_encode;
+	} else {
+		ppgtt->pte_encode = gen6_pte_encode;
+	}
 	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
 	ppgtt->enable = gen6_ppgtt_enable;
 	ppgtt->clear_range = gen6_ppgtt_clear_range;
@@ -437,7 +480,8 @@
 
 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
 		addr = sg_page_iter_dma_address(&sg_iter);
-		iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
+		iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
+			  &gtt_entries[i]);
 		i++;
 	}
 
@@ -449,7 +493,7 @@
 	 */
 	if (i != 0)
 		WARN_ON(readl(&gtt_entries[i-1])
-			!= gen6_pte_encode(dev, addr, level));
+			!= dev_priv->gtt.pte_encode(dev, addr, level));
 
 	/* This next bit makes the above posting read even more important. We
 	 * want to flush the TLBs only after we're certain all the PTE updates
@@ -474,8 +518,9 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
-				      I915_CACHE_LLC);
+	scratch_pte = dev_priv->gtt.pte_encode(dev,
+					       dev_priv->gtt.scratch_page_dma,
+					       I915_CACHE_LLC);
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
 	readl(gtt_base);
@@ -809,6 +854,13 @@
 	} else {
 		dev_priv->gtt.gtt_probe = gen6_gmch_probe;
 		dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+		if (IS_HASWELL(dev)) {
+			dev_priv->gtt.pte_encode = hsw_pte_encode;
+		} else if (IS_VALLEYVIEW(dev)) {
+			dev_priv->gtt.pte_encode = byt_pte_encode;
+		} else {
+			dev_priv->gtt.pte_encode = gen6_pte_encode;
+		}
 	}
 
 	ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 130d1db..89cbfab 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -62,7 +62,10 @@
 	 * its value of TOLUD.
 	 */
 	base = 0;
-	if (INTEL_INFO(dev)->gen >= 6) {
+	if (IS_VALLEYVIEW(dev)) {
+		pci_read_config_dword(dev->pdev, 0x5c, &base);
+		base &= ~((1<<20) - 1);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		/* Read Base Data of Stolen Memory Register (BDSM) directly.
 		 * Note that there is also a MCHBAR miror at 0x1080c0 or
 		 * we could use device 2:0x5c instead.
@@ -136,6 +139,7 @@
 err_fb:
 	drm_mm_put_block(compressed_fb);
 err:
+	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
 	return -ENOSPC;
 }
 
@@ -182,6 +186,7 @@
 int i915_gem_init_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int bios_reserved = 0;
 
 	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
 	if (dev_priv->mm.stolen_base == 0)
@@ -190,8 +195,12 @@
 	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
 		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
 
+	if (IS_VALLEYVIEW(dev))
+		bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+
 	/* Basic memrange allocator for stolen space */
-	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
+	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
+		    bios_reserved);
 
 	return 0;
 }
@@ -330,7 +339,6 @@
 
 	/* KISS and expect everything to be page-aligned */
 	BUG_ON(stolen_offset & 4095);
-	BUG_ON(gtt_offset & 4095);
 	BUG_ON(size & 4095);
 
 	if (WARN_ON(size == 0))
@@ -351,6 +359,10 @@
 		return NULL;
 	}
 
+	/* Some objects just need physical mem from stolen space */
+	if (gtt_offset == -1)
+		return obj;
+
 	/* To simplify the initialisation sequence between KMS and GTT,
 	 * we allow construction of the stolen object prior to
 	 * setting up the GTT space. The actual reservation will occur
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0aa2ef0..e17bbe2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -112,6 +112,213 @@
 	}
 }
 
+static bool ivb_can_enable_err_int(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc;
+	enum pipe pipe;
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		if (crtc->cpu_fifo_underrun_disabled)
+			return false;
+	}
+
+	return true;
+}
+
+static bool cpt_can_enable_serr_int(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	struct intel_crtc *crtc;
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		if (crtc->pch_fifo_underrun_disabled)
+			return false;
+	}
+
+	return true;
+}
+
+static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
+						 enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
+					  DE_PIPEB_FIFO_UNDERRUN;
+
+	if (enable)
+		ironlake_enable_display_irq(dev_priv, bit);
+	else
+		ironlake_disable_display_irq(dev_priv, bit);
+}
+
+static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
+						  bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (enable) {
+		if (!ivb_can_enable_err_int(dev))
+			return;
+
+		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
+					 ERR_INT_FIFO_UNDERRUN_B |
+					 ERR_INT_FIFO_UNDERRUN_C);
+
+		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+	} else {
+		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+	}
+}
+
+static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+					    bool enable)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
+						SDE_TRANSB_FIFO_UNDER;
+
+	if (enable)
+		I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+	else
+		I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
+
+	POSTING_READ(SDEIMR);
+}
+
+static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
+					    enum transcoder pch_transcoder,
+					    bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (enable) {
+		if (!cpt_can_enable_serr_int(dev))
+			return;
+
+		I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
+				     SERR_INT_TRANS_B_FIFO_UNDERRUN |
+				     SERR_INT_TRANS_C_FIFO_UNDERRUN);
+
+		I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+	} else {
+		I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
+	}
+
+	POSTING_READ(SDEIMR);
+}
+
+/**
+ * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pipe: pipe
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable CPU fifo underruns for a specific
+ * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
+ * reporting for one pipe may also disable all the other CPU error interruts for
+ * the other pipes, due to the fact that there's just one interrupt mask/enable
+ * bit for all the pipes.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+					   enum pipe pipe, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+	ret = !intel_crtc->cpu_fifo_underrun_disabled;
+
+	if (enable == ret)
+		goto done;
+
+	intel_crtc->cpu_fifo_underrun_disabled = !enable;
+
+	if (IS_GEN5(dev) || IS_GEN6(dev))
+		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
+	else if (IS_GEN7(dev))
+		ivybridge_set_fifo_underrun_reporting(dev, enable);
+
+done:
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	return ret;
+}
+
+/**
+ * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
+ * @dev: drm device
+ * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
+ * @enable: true if we want to report FIFO underrun errors, false otherwise
+ *
+ * This function makes us disable or enable PCH fifo underruns for a specific
+ * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
+ * underrun reporting for one transcoder may also disable all the other PCH
+ * error interruts for the other transcoders, due to the fact that there's just
+ * one interrupt mask/enable bit for all the transcoders.
+ *
+ * Returns the previous state of underrun reporting.
+ */
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+					   enum transcoder pch_transcoder,
+					   bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe p;
+	struct drm_crtc *crtc;
+	struct intel_crtc *intel_crtc;
+	unsigned long flags;
+	bool ret;
+
+	if (HAS_PCH_LPT(dev)) {
+		crtc = NULL;
+		for_each_pipe(p) {
+			struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
+			if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
+				crtc = c;
+				break;
+			}
+		}
+		if (!crtc) {
+			DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
+			return false;
+		}
+	} else {
+		crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+	}
+	intel_crtc = to_intel_crtc(crtc);
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+
+	ret = !intel_crtc->pch_fifo_underrun_disabled;
+
+	if (enable == ret)
+		goto done;
+
+	intel_crtc->pch_fifo_underrun_disabled = !enable;
+
+	if (HAS_PCH_IBX(dev))
+		ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+	else
+		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
+
+done:
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	return ret;
+}
+
+
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 {
@@ -142,28 +349,21 @@
 }
 
 /**
- * intel_enable_asle - enable ASLE interrupt for OpRegion
+ * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
  */
-void intel_enable_asle(struct drm_device *dev)
+static void i915_enable_asle_pipestat(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	unsigned long irqflags;
 
-	/* FIXME: opregion/asle for VLV */
-	if (IS_VALLEYVIEW(dev))
+	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
 		return;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
-	if (HAS_PCH_SPLIT(dev))
-		ironlake_enable_display_irq(dev_priv, DE_GSE);
-	else {
-		i915_enable_pipestat(dev_priv, 1,
-				     PIPE_LEGACY_BLC_EVENT_ENABLE);
-		if (INTEL_INFO(dev)->gen >= 4)
-			i915_enable_pipestat(dev_priv, 0,
-					     PIPE_LEGACY_BLC_EVENT_ENABLE);
-	}
+	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+	if (INTEL_INFO(dev)->gen >= 4)
+		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
 
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
@@ -181,10 +381,16 @@
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
 
-	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Locking is horribly broken here, but whatever. */
+		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		return intel_crtc->active;
+	} else {
+		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+	}
 }
 
 /* Called from drm generic code, passed a 'crtc', which
@@ -334,6 +540,21 @@
 						     crtc);
 }
 
+static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+{
+	enum drm_connector_status old_status;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	old_status = connector->status;
+
+	connector->status = connector->funcs->detect(connector, false);
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+		      connector->base.id,
+		      drm_get_connector_name(connector),
+		      old_status, connector->status);
+	return (old_status != connector->status);
+}
+
 /*
  * Handle hotplug events outside the interrupt handler proper.
  */
@@ -350,6 +571,8 @@
 	struct drm_connector *connector;
 	unsigned long irqflags;
 	bool hpd_disabled = false;
+	bool changed = false;
+	u32 hpd_event_bits;
 
 	/* HPD irq before everything is fully set up. */
 	if (!dev_priv->enable_hotplug_processing)
@@ -359,6 +582,9 @@
 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+	hpd_event_bits = dev_priv->hpd_event_bits;
+	dev_priv->hpd_event_bits = 0;
 	list_for_each_entry(connector, &mode_config->connector_list, head) {
 		intel_connector = to_intel_connector(connector);
 		intel_encoder = intel_connector->encoder;
@@ -373,6 +599,10 @@
 				| DRM_CONNECTOR_POLL_DISCONNECT;
 			hpd_disabled = true;
 		}
+		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
+		}
 	}
 	 /* if there were no outputs to poll, poll was disabled,
 	  * therefore make sure it's enabled when disabling HPD on
@@ -385,14 +615,20 @@
 
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
-	list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
-		if (intel_encoder->hot_plug)
-			intel_encoder->hot_plug(intel_encoder);
-
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		intel_encoder = intel_connector->encoder;
+		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+			if (intel_encoder->hot_plug)
+				intel_encoder->hot_plug(intel_encoder);
+			if (intel_hpd_irq_event(dev, connector))
+				changed = true;
+		}
+	}
 	mutex_unlock(&mode_config->mutex);
 
-	/* Just fire off a uevent and let userspace tell us what to do */
-	drm_helper_hpd_irq_event(dev);
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
 }
 
 static void ironlake_handle_rps_change(struct drm_device *dev)
@@ -464,10 +700,11 @@
 	pm_iir = dev_priv->rps.pm_iir;
 	dev_priv->rps.pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
-	I915_WRITE(GEN6_PMIMR, 0);
+	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
+	I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
 	spin_unlock_irq(&dev_priv->rps.lock);
 
-	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
+	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
 		return;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -482,7 +719,21 @@
 	 */
 	if (!(new_delay > dev_priv->rps.max_delay ||
 	      new_delay < dev_priv->rps.min_delay)) {
-		gen6_set_rps(dev_priv->dev, new_delay);
+		if (IS_VALLEYVIEW(dev_priv->dev))
+			valleyview_set_rps(dev_priv->dev, new_delay);
+		else
+			gen6_set_rps(dev_priv->dev, new_delay);
+	}
+
+	if (IS_VALLEYVIEW(dev_priv->dev)) {
+		/*
+		 * On VLV, when we enter RC6 we may not be at the minimum
+		 * voltage level, so arm a timer to check.  It should only
+		 * fire when there's activity or once after we've entered
+		 * RC6, and then won't be re-armed until the next RPS interrupt.
+		 */
+		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
+				 msecs_to_jiffies(100));
 	}
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
@@ -529,7 +780,7 @@
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+	dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -561,7 +812,7 @@
 		return;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+	dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -573,25 +824,26 @@
 			       u32 gt_iir)
 {
 
-	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
-		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+	if (gt_iir &
+	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
 		notify_ring(dev, &dev_priv->ring[RCS]);
-	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
 		notify_ring(dev, &dev_priv->ring[VCS]);
-	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+	if (gt_iir & GT_BLT_USER_INTERRUPT)
 		notify_ring(dev, &dev_priv->ring[BCS]);
 
-	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
-		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
-		      GT_RENDER_CS_ERROR_INTERRUPT)) {
+	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+		      GT_BSD_CS_ERROR_INTERRUPT |
+		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
 		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
 		i915_handle_error(dev, false);
 	}
 
-	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
 		ivybridge_handle_parity_error(dev);
 }
 
+/* Legacy way of handling PM interrupts */
 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
 				u32 pm_iir)
 {
@@ -636,6 +888,7 @@
 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
 			continue;
 
+		dev_priv->hpd_event_bits |= (1 << i);
 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
 				   dev_priv->hpd_stats[i].hpd_last_jiffies
 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -643,6 +896,7 @@
 			dev_priv->hpd_stats[i].hpd_cnt = 0;
 		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
 			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
+			dev_priv->hpd_event_bits &= ~(1 << i);
 			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
 			ret = true;
 		} else {
@@ -669,6 +923,38 @@
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+ * we must be able to deal with other PM interrupts. This is complicated because
+ * of the way in which we use the masks to defer the RPS work (which for
+ * posterity is necessary because of forcewake).
+ */
+static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
+			       u32 pm_iir)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+	if (dev_priv->rps.pm_iir) {
+		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+		/* never want to mask useful interrupts. (also posting read) */
+		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+		/* TODO: if queue_work is slow, move it out of the spinlock */
+		queue_work(dev_priv->wq, &dev_priv->rps.work);
+	}
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+	if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+
+		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+			i915_handle_error(dev_priv->dev, false);
+		}
+	}
+}
+
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -740,7 +1026,7 @@
 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
 			gmbus_irq_handler(dev);
 
-		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+		if (pm_iir & GEN6_PM_RPS_EVENTS)
 			gen6_queue_rps_work(dev_priv, pm_iir);
 
 		I915_WRITE(GTIIR, gt_iir);
@@ -763,10 +1049,12 @@
 			ibx_hpd_irq_setup(dev);
 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 	}
-	if (pch_iir & SDE_AUDIO_POWER_MASK)
+	if (pch_iir & SDE_AUDIO_POWER_MASK) {
+		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
+			       SDE_AUDIO_POWER_SHIFT);
 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
-				 SDE_AUDIO_POWER_SHIFT);
+				 port_name(port));
+	}
 
 	if (pch_iir & SDE_AUX_MASK)
 		dp_aux_irq_handler(dev);
@@ -795,10 +1083,64 @@
 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
-	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
-		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
-		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+							  false))
+			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+
+	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+							  false))
+			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+}
+
+static void ivb_err_int_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 err_int = I915_READ(GEN7_ERR_INT);
+
+	if (err_int & ERR_INT_POISON)
+		DRM_ERROR("Poison interrupt\n");
+
+	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
+		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
+		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
+		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
+			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+
+	I915_WRITE(GEN7_ERR_INT, err_int);
+}
+
+static void cpt_serr_int_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 serr_int = I915_READ(SERR_INT);
+
+	if (serr_int & SERR_INT_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
+		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
+							  false))
+			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+
+	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
+		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
+							  false))
+			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+
+	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
+		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
+							  false))
+			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+
+	I915_WRITE(SERR_INT, serr_int);
 }
 
 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
@@ -812,10 +1154,12 @@
 			ibx_hpd_irq_setup(dev);
 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 	}
-	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
-		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
-				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
-				 SDE_AUDIO_POWER_SHIFT_CPT);
+	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
+		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+			       SDE_AUDIO_POWER_SHIFT_CPT);
+		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
+				 port_name(port));
+	}
 
 	if (pch_iir & SDE_AUX_MASK_CPT)
 		dp_aux_irq_handler(dev);
@@ -834,6 +1178,9 @@
 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
 					 pipe_name(pipe),
 					 I915_READ(FDI_RX_IIR(pipe)));
+
+	if (pch_iir & SDE_ERROR_CPT)
+		cpt_serr_int_handler(dev);
 }
 
 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
@@ -846,6 +1193,14 @@
 
 	atomic_inc(&dev_priv->irq_received);
 
+	/* We get interrupts on unclaimed registers, so check for this before we
+	 * do any I915_{READ,WRITE}. */
+	if (IS_HASWELL(dev) &&
+	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+		DRM_ERROR("Unclaimed register before interrupt\n");
+		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -861,6 +1216,12 @@
 		POSTING_READ(SDEIER);
 	}
 
+	/* On Haswell, also mask ERR_INT because we don't want to risk
+	 * generating "unclaimed register" interrupts from inside the interrupt
+	 * handler. */
+	if (IS_HASWELL(dev))
+		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
 	gt_iir = I915_READ(GTIIR);
 	if (gt_iir) {
 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -870,11 +1231,14 @@
 
 	de_iir = I915_READ(DEIIR);
 	if (de_iir) {
+		if (de_iir & DE_ERR_INT_IVB)
+			ivb_err_int_handler(dev);
+
 		if (de_iir & DE_AUX_CHANNEL_A_IVB)
 			dp_aux_irq_handler(dev);
 
 		if (de_iir & DE_GSE_IVB)
-			intel_opregion_gse_intr(dev);
+			intel_opregion_asle_intr(dev);
 
 		for (i = 0; i < 3; i++) {
 			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
@@ -901,12 +1265,17 @@
 
 	pm_iir = I915_READ(GEN6_PMIIR);
 	if (pm_iir) {
-		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+		if (IS_HASWELL(dev))
+			hsw_pm_irq_handler(dev_priv, pm_iir);
+		else if (pm_iir & GEN6_PM_RPS_EVENTS)
 			gen6_queue_rps_work(dev_priv, pm_iir);
 		I915_WRITE(GEN6_PMIIR, pm_iir);
 		ret = IRQ_HANDLED;
 	}
 
+	if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
+		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
 	if (!HAS_PCH_NOP(dev)) {
@@ -921,9 +1290,10 @@
 			       struct drm_i915_private *dev_priv,
 			       u32 gt_iir)
 {
-	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+	if (gt_iir &
+	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
 		notify_ring(dev, &dev_priv->ring[RCS]);
-	if (gt_iir & GT_BSD_USER_INTERRUPT)
+	if (gt_iir & ILK_BSD_USER_INTERRUPT)
 		notify_ring(dev, &dev_priv->ring[VCS]);
 }
 
@@ -968,7 +1338,7 @@
 		dp_aux_irq_handler(dev);
 
 	if (de_iir & DE_GSE)
-		intel_opregion_gse_intr(dev);
+		intel_opregion_asle_intr(dev);
 
 	if (de_iir & DE_PIPEA_VBLANK)
 		drm_handle_vblank(dev, 0);
@@ -976,6 +1346,17 @@
 	if (de_iir & DE_PIPEB_VBLANK)
 		drm_handle_vblank(dev, 1);
 
+	if (de_iir & DE_POISON)
+		DRM_ERROR("Poison interrupt\n");
+
+	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
+		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
+		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
 	if (de_iir & DE_PLANEA_FLIP_DONE) {
 		intel_prepare_page_flip(dev, 0);
 		intel_finish_page_flip_plane(dev, 0);
@@ -1002,7 +1383,7 @@
 	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
 		ironlake_handle_rps_change(dev);
 
-	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+	if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
 		gen6_queue_rps_work(dev_priv, pm_iir);
 
 	I915_WRITE(GTIIR, gt_iir);
@@ -1222,11 +1603,13 @@
 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
 		i915_error_object_free(error->ring[i].batchbuffer);
 		i915_error_object_free(error->ring[i].ringbuffer);
+		i915_error_object_free(error->ring[i].ctx);
 		kfree(error->ring[i].requests);
 	}
 
 	kfree(error->active_bo);
 	kfree(error->overlay);
+	kfree(error->display);
 	kfree(error);
 }
 static void capture_bo(struct drm_i915_error_buffer *err,
@@ -1932,11 +2315,11 @@
 			  struct drm_i915_gem_request, list)->seqno;
 }
 
-static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring,
+				     u32 ring_seqno, bool *err)
 {
 	if (list_empty(&ring->request_list) ||
-	    i915_seqno_passed(ring->get_seqno(ring, false),
-			      ring_last_seqno(ring))) {
+	    i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) {
 		/* Issue a wake-up to catch stuck h/w. */
 		if (waitqueue_active(&ring->irq_queue)) {
 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -2003,28 +2386,33 @@
 	return false;
 }
 
+static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring)
+{
+	if (IS_GEN2(ring->dev))
+		return false;
+
+	/* Is the chip hanging on a WAIT_FOR_EVENT?
+	 * If so we can simply poke the RB_WAIT bit
+	 * and break the hang. This should work on
+	 * all but the second generation chipsets.
+	 */
+	return !kick_ring(ring);
+}
+
 static bool i915_hangcheck_hung(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	if (dev_priv->gpu_error.hangcheck_count++ > 1) {
 		bool hung = true;
+		struct intel_ring_buffer *ring;
+		int i;
 
 		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
 		i915_handle_error(dev, true);
 
-		if (!IS_GEN2(dev)) {
-			struct intel_ring_buffer *ring;
-			int i;
-
-			/* Is the chip hanging on a WAIT_FOR_EVENT?
-			 * If so we can simply poke the RB_WAIT bit
-			 * and break the hang. This should work on
-			 * all but the second generation chipsets.
-			 */
-			for_each_ring(ring, dev_priv, i)
-				hung &= !kick_ring(ring);
-		}
+		for_each_ring(ring, dev_priv, i)
+			hung &= i915_hangcheck_ring_hung(ring);
 
 		return hung;
 	}
@@ -2042,19 +2430,19 @@
 {
 	struct drm_device *dev = (struct drm_device *)data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
 	struct intel_ring_buffer *ring;
 	bool err = false, idle;
 	int i;
+	u32 seqno[I915_NUM_RINGS];
+	bool work_done;
 
 	if (!i915_enable_hangcheck)
 		return;
 
-	memset(acthd, 0, sizeof(acthd));
 	idle = true;
 	for_each_ring(ring, dev_priv, i) {
-	    idle &= i915_hangcheck_ring_idle(ring, &err);
-	    acthd[i] = intel_ring_get_active_head(ring);
+		seqno[i] = ring->get_seqno(ring, false);
+		idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err);
 	}
 
 	/* If all work is done then ACTHD clearly hasn't advanced. */
@@ -2070,20 +2458,19 @@
 		return;
 	}
 
-	i915_get_extra_instdone(dev, instdone);
-	if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
-		   sizeof(acthd)) == 0 &&
-	    memcmp(dev_priv->gpu_error.prev_instdone, instdone,
-		   sizeof(instdone)) == 0) {
+	work_done = false;
+	for_each_ring(ring, dev_priv, i) {
+		if (ring->hangcheck.seqno != seqno[i]) {
+			work_done = true;
+			ring->hangcheck.seqno = seqno[i];
+		}
+	}
+
+	if (!work_done) {
 		if (i915_hangcheck_hung(dev))
 			return;
 	} else {
 		dev_priv->gpu_error.hangcheck_count = 0;
-
-		memcpy(dev_priv->gpu_error.last_acthd, acthd,
-		       sizeof(acthd));
-		memcpy(dev_priv->gpu_error.prev_instdone, instdone,
-		       sizeof(instdone));
 	}
 
 repeat:
@@ -2113,6 +2500,42 @@
 	I915_WRITE(GTIER, 0x0);
 	POSTING_READ(GTIER);
 
+	/* south display irq */
+	I915_WRITE(SDEIMR, 0xffffffff);
+	/*
+	 * SDEIER is also touched by the interrupt handler to work around missed
+	 * PCH interrupts. Hence we can't update it after the interrupt handler
+	 * is enabled - instead we unconditionally enable all PCH interrupt
+	 * sources here, but then only unmask them as needed with SDEIMR.
+	 */
+	I915_WRITE(SDEIER, 0xffffffff);
+	POSTING_READ(SDEIER);
+}
+
+static void ivybridge_irq_preinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(HWSTAM, 0xeffe);
+
+	/* XXX hotplug from PCH */
+
+	I915_WRITE(DEIMR, 0xffffffff);
+	I915_WRITE(DEIER, 0x0);
+	POSTING_READ(DEIER);
+
+	/* and GT */
+	I915_WRITE(GTIMR, 0xffffffff);
+	I915_WRITE(GTIER, 0x0);
+	POSTING_READ(GTIER);
+
+	/* Power management */
+	I915_WRITE(GEN6_PMIMR, 0xffffffff);
+	I915_WRITE(GEN6_PMIER, 0x0);
+	POSTING_READ(GEN6_PMIER);
+
 	if (HAS_PCH_NOP(dev))
 		return;
 
@@ -2201,14 +2624,18 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 mask;
 
-	if (HAS_PCH_IBX(dev))
-		mask = SDE_GMBUS | SDE_AUX_MASK;
-	else
-		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
-
 	if (HAS_PCH_NOP(dev))
 		return;
 
+	if (HAS_PCH_IBX(dev)) {
+		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
+		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+	} else {
+		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+
+		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
+	}
+
 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
 	I915_WRITE(SDEIMR, ~mask);
 }
@@ -2219,8 +2646,9 @@
 	/* enable kind of interrupts always enabled */
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-			   DE_AUX_CHANNEL_A;
-	u32 render_irqs;
+			   DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+			   DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
+	u32 gt_irqs;
 
 	dev_priv->irq_mask = ~display_mask;
 
@@ -2235,17 +2663,15 @@
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
+	gt_irqs = GT_RENDER_USER_INTERRUPT;
+
 	if (IS_GEN6(dev))
-		render_irqs =
-			GT_USER_INTERRUPT |
-			GEN6_BSD_USER_INTERRUPT |
-			GEN6_BLITTER_USER_INTERRUPT;
+		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
 	else
-		render_irqs =
-			GT_USER_INTERRUPT |
-			GT_PIPE_NOTIFY |
-			GT_BSD_USER_INTERRUPT;
-	I915_WRITE(GTIER, render_irqs);
+		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
+			   ILK_BSD_USER_INTERRUPT;
+
+	I915_WRITE(GTIER, gt_irqs);
 	POSTING_READ(GTIER);
 
 	ibx_irq_postinstall(dev);
@@ -2269,12 +2695,15 @@
 		DE_PLANEC_FLIP_DONE_IVB |
 		DE_PLANEB_FLIP_DONE_IVB |
 		DE_PLANEA_FLIP_DONE_IVB |
-		DE_AUX_CHANNEL_A_IVB;
-	u32 render_irqs;
+		DE_AUX_CHANNEL_A_IVB |
+		DE_ERR_INT_IVB;
+	u32 pm_irqs = GEN6_PM_RPS_EVENTS;
+	u32 gt_irqs;
 
 	dev_priv->irq_mask = ~display_mask;
 
 	/* should always can generate irq */
+	I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
 	I915_WRITE(DEIIR, I915_READ(DEIIR));
 	I915_WRITE(DEIMR, dev_priv->irq_mask);
 	I915_WRITE(DEIER,
@@ -2284,16 +2713,32 @@
 		   DE_PIPEA_VBLANK_IVB);
 	POSTING_READ(DEIER);
 
-	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+	dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
-	I915_WRITE(GTIER, render_irqs);
+	gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+		  GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+	I915_WRITE(GTIER, gt_irqs);
 	POSTING_READ(GTIER);
 
+	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+	if (HAS_VEBOX(dev))
+		pm_irqs |= PM_VEBOX_USER_INTERRUPT |
+			PM_VEBOX_CS_ERROR_INTERRUPT;
+
+	/* Our enable/disable rps functions may touch these registers so
+	 * make sure to set a known state for only the non-RPS bits.
+	 * The RMW is extra paranoia since this should be called after being set
+	 * to a known state in preinstall.
+	 * */
+	I915_WRITE(GEN6_PMIMR,
+		   (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
+	I915_WRITE(GEN6_PMIER,
+		   (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
+	POSTING_READ(GEN6_PMIER);
+
 	ibx_irq_postinstall(dev);
 
 	return 0;
@@ -2302,10 +2747,9 @@
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 gt_irqs;
 	u32 enable_mask;
 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
-	u32 render_irqs;
-	u16 msid;
 
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
 	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2321,13 +2765,6 @@
 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
-	/* Hack for broken MSIs on VLV */
-	pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
-	pci_read_config_word(dev->pdev, 0x98, &msid);
-	msid &= 0xff; /* mask out delivery bits */
-	msid |= (1<<14);
-	pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
-
 	I915_WRITE(PORT_HOTPLUG_EN, 0);
 	POSTING_READ(PORT_HOTPLUG_EN);
 
@@ -2348,9 +2785,9 @@
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
-		GEN6_BLITTER_USER_INTERRUPT;
-	I915_WRITE(GTIER, render_irqs);
+	gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
+		GT_BLT_USER_INTERRUPT;
+	I915_WRITE(GTIER, gt_irqs);
 	POSTING_READ(GTIER);
 
 	/* ack & enable invalid PTE error interrupts */
@@ -2402,6 +2839,8 @@
 	I915_WRITE(DEIMR, 0xffffffff);
 	I915_WRITE(DEIER, 0x0);
 	I915_WRITE(DEIIR, I915_READ(DEIIR));
+	if (IS_GEN7(dev))
+		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
 
 	I915_WRITE(GTIMR, 0xffffffff);
 	I915_WRITE(GTIER, 0x0);
@@ -2413,6 +2852,8 @@
 	I915_WRITE(SDEIMR, 0xffffffff);
 	I915_WRITE(SDEIER, 0x0);
 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
+		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
 }
 
 static void i8xx_irq_preinstall(struct drm_device * dev)
@@ -2626,7 +3067,7 @@
 	I915_WRITE(IER, enable_mask);
 	POSTING_READ(IER);
 
-	intel_opregion_enable_asle(dev);
+	i915_enable_asle_pipestat(dev);
 
 	return 0;
 }
@@ -2860,7 +3301,7 @@
 	I915_WRITE(PORT_HOTPLUG_EN, 0);
 	POSTING_READ(PORT_HOTPLUG_EN);
 
-	intel_opregion_enable_asle(dev);
+	i915_enable_asle_pipestat(dev);
 
 	return 0;
 }
@@ -3113,9 +3554,9 @@
 		dev->driver->disable_vblank = valleyview_disable_vblank;
 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-		/* Share pre & uninstall handlers with ILK/SNB */
+		/* Share uninstall handlers with ILK/SNB */
 		dev->driver->irq_handler = ivybridge_irq_handler;
-		dev->driver->irq_preinstall = ironlake_irq_preinstall;
+		dev->driver->irq_preinstall = ivybridge_irq_preinstall;
 		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
 		dev->driver->enable_vblank = ivybridge_enable_vblank;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d6b62e..5a593d2 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -265,13 +265,19 @@
 #define  MI_SEMAPHORE_UPDATE	    (1<<21)
 #define  MI_SEMAPHORE_COMPARE	    (1<<20)
 #define  MI_SEMAPHORE_REGISTER	    (1<<18)
-#define  MI_SEMAPHORE_SYNC_RV	    (2<<16)
-#define  MI_SEMAPHORE_SYNC_RB	    (0<<16)
-#define  MI_SEMAPHORE_SYNC_VR	    (0<<16)
-#define  MI_SEMAPHORE_SYNC_VB	    (2<<16)
-#define  MI_SEMAPHORE_SYNC_BR	    (2<<16)
-#define  MI_SEMAPHORE_SYNC_BV	    (0<<16)
-#define  MI_SEMAPHORE_SYNC_INVALID  (1<<0)
+#define  MI_SEMAPHORE_SYNC_VR	    (0<<16) /* RCS  wait for VCS  (RVSYNC) */
+#define  MI_SEMAPHORE_SYNC_VER	    (1<<16) /* RCS  wait for VECS (RVESYNC) */
+#define  MI_SEMAPHORE_SYNC_BR	    (2<<16) /* RCS  wait for BCS  (RBSYNC) */
+#define  MI_SEMAPHORE_SYNC_BV	    (0<<16) /* VCS  wait for BCS  (VBSYNC) */
+#define  MI_SEMAPHORE_SYNC_VEV	    (1<<16) /* VCS  wait for VECS (VVESYNC) */
+#define  MI_SEMAPHORE_SYNC_RV	    (2<<16) /* VCS  wait for RCS  (VRSYNC) */
+#define  MI_SEMAPHORE_SYNC_RB	    (0<<16) /* BCS  wait for RCS  (BRSYNC) */
+#define  MI_SEMAPHORE_SYNC_VEB	    (1<<16) /* BCS  wait for VECS (BVESYNC) */
+#define  MI_SEMAPHORE_SYNC_VB	    (2<<16) /* BCS  wait for VCS  (BVSYNC) */
+#define  MI_SEMAPHORE_SYNC_BVE	    (0<<16) /* VECS wait for BCS  (VEBSYNC) */
+#define  MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
+#define  MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
+#define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
 /*
  * 3D instructions used by the kernel
  */
@@ -342,33 +348,74 @@
 #define  DEBUG_RESET_DISPLAY		(1<<9)
 
 /*
- * DPIO - a special bus for various display related registers to hide behind:
- *  0x800c: m1, m2, n, p1, p2, k dividers
- *  0x8014: REF and SFR select
- *  0x8014: N divider, VCO select
- *  0x801c/3c: core clock bits
- *  0x8048/68: low pass filter coefficients
- *  0x8100: fast clock controls
+ * IOSF sideband
+ */
+#define VLV_IOSF_DOORBELL_REQ			(VLV_DISPLAY_BASE + 0x2100)
+#define   IOSF_DEVFN_SHIFT			24
+#define   IOSF_OPCODE_SHIFT			16
+#define   IOSF_PORT_SHIFT			8
+#define   IOSF_BYTE_ENABLES_SHIFT		4
+#define   IOSF_BAR_SHIFT			1
+#define   IOSF_SB_BUSY				(1<<0)
+#define   IOSF_PORT_PUNIT			0x4
+#define   IOSF_PORT_NC				0x11
+#define   IOSF_PORT_DPIO			0x12
+#define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
+#define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
+
+#define PUNIT_OPCODE_REG_READ			6
+#define PUNIT_OPCODE_REG_WRITE			7
+
+#define PUNIT_REG_GPU_LFM			0xd3
+#define PUNIT_REG_GPU_FREQ_REQ			0xd4
+#define PUNIT_REG_GPU_FREQ_STS			0xd8
+#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ		0xdc
+
+#define PUNIT_FUSE_BUS2				0xf6 /* bits 47:40 */
+#define PUNIT_FUSE_BUS1				0xf5 /* bits 55:48 */
+
+#define IOSF_NC_FB_GFX_FREQ_FUSE		0x1c
+#define   FB_GFX_MAX_FREQ_FUSE_SHIFT		3
+#define   FB_GFX_MAX_FREQ_FUSE_MASK		0x000007f8
+#define   FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT	11
+#define   FB_GFX_FGUARANTEED_FREQ_FUSE_MASK	0x0007f800
+#define IOSF_NC_FB_GFX_FMAX_FUSE_HI		0x34
+#define   FB_FMAX_VMIN_FREQ_HI_MASK		0x00000007
+#define IOSF_NC_FB_GFX_FMAX_FUSE_LO		0x30
+#define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
+#define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
+
+/*
+ * DPIO - a special bus for various display related registers to hide behind
  *
  * DPIO is VLV only.
+ *
+ * Note: digital port B is DDI0, digital pot C is DDI1
  */
-#define DPIO_PKT			(VLV_DISPLAY_BASE + 0x2100)
-#define  DPIO_RID			(0<<24)
-#define  DPIO_OP_WRITE			(1<<16)
-#define  DPIO_OP_READ			(0<<16)
-#define  DPIO_PORTID			(0x12<<8)
-#define  DPIO_BYTE			(0xf<<4)
-#define  DPIO_BUSY			(1<<0) /* status only */
-#define DPIO_DATA			(VLV_DISPLAY_BASE + 0x2104)
-#define DPIO_REG			(VLV_DISPLAY_BASE + 0x2108)
+#define DPIO_DEVFN			0
+#define DPIO_OPCODE_REG_WRITE		1
+#define DPIO_OPCODE_REG_READ		0
+
 #define DPIO_CTL			(VLV_DISPLAY_BASE + 0x2110)
 #define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
 #define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
 #define  DPIO_SFR_BYPASS		(1<<1)
 #define  DPIO_RESET			(1<<0)
 
+#define _DPIO_TX3_SWING_CTL4_A		0x690
+#define _DPIO_TX3_SWING_CTL4_B		0x2a90
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+					_DPIO_TX3_SWING_CTL4_B)
+
+/*
+ * Per pipe/PLL DPIO regs
+ */
 #define _DPIO_DIV_A			0x800c
 #define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
+#define   DPIO_POST_DIV_DAC		0
+#define   DPIO_POST_DIV_HDMIDP		1 /* DAC 225-400M rate */
+#define   DPIO_POST_DIV_LVDS1		2
+#define   DPIO_POST_DIV_LVDS2		3
 #define   DPIO_K_SHIFT			(24) /* 4 bits */
 #define   DPIO_P1_SHIFT			(21) /* 3 bits */
 #define   DPIO_P2_SHIFT			(16) /* 5 bits */
@@ -394,14 +441,111 @@
 #define _DPIO_CORE_CLK_B		0x803c
 #define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
 
+#define _DPIO_IREF_CTL_A		0x8040
+#define _DPIO_IREF_CTL_B		0x8060
+#define DPIO_IREF_CTL(pipe) _PIPE(pipe, _DPIO_IREF_CTL_A, _DPIO_IREF_CTL_B)
+
+#define DPIO_IREF_BCAST			0xc044
+#define _DPIO_IREF_A			0x8044
+#define _DPIO_IREF_B			0x8064
+#define DPIO_IREF(pipe) _PIPE(pipe, _DPIO_IREF_A, _DPIO_IREF_B)
+
+#define _DPIO_PLL_CML_A			0x804c
+#define _DPIO_PLL_CML_B			0x806c
+#define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B)
+
 #define _DPIO_LFP_COEFF_A		0x8048
 #define _DPIO_LFP_COEFF_B		0x8068
 #define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
 
+#define DPIO_CALIBRATION		0x80ac
+
 #define DPIO_FASTCLK_DISABLE		0x8100
 
-#define DPIO_DATA_CHANNEL1		0x8220
-#define DPIO_DATA_CHANNEL2		0x8420
+/*
+ * Per DDI channel DPIO regs
+ */
+
+#define _DPIO_PCS_TX_0			0x8200
+#define _DPIO_PCS_TX_1			0x8400
+#define   DPIO_PCS_TX_LANE2_RESET	(1<<16)
+#define   DPIO_PCS_TX_LANE1_RESET	(1<<7)
+#define DPIO_PCS_TX(port) _PORT(port, _DPIO_PCS_TX_0, _DPIO_PCS_TX_1)
+
+#define _DPIO_PCS_CLK_0			0x8204
+#define _DPIO_PCS_CLK_1			0x8404
+#define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN	(1<<22)
+#define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define   DPIO_PCS_CLK_DATAWIDTH_SHIFT	(6)
+#define   DPIO_PCS_CLK_SOFT_RESET	(1<<5)
+#define DPIO_PCS_CLK(port) _PORT(port, _DPIO_PCS_CLK_0, _DPIO_PCS_CLK_1)
+
+#define _DPIO_PCS_CTL_OVR1_A		0x8224
+#define _DPIO_PCS_CTL_OVR1_B		0x8424
+#define DPIO_PCS_CTL_OVER1(port) _PORT(port, _DPIO_PCS_CTL_OVR1_A, \
+				       _DPIO_PCS_CTL_OVR1_B)
+
+#define _DPIO_PCS_STAGGER0_A		0x822c
+#define _DPIO_PCS_STAGGER0_B		0x842c
+#define DPIO_PCS_STAGGER0(port) _PORT(port, _DPIO_PCS_STAGGER0_A, \
+				      _DPIO_PCS_STAGGER0_B)
+
+#define _DPIO_PCS_STAGGER1_A		0x8230
+#define _DPIO_PCS_STAGGER1_B		0x8430
+#define DPIO_PCS_STAGGER1(port) _PORT(port, _DPIO_PCS_STAGGER1_A, \
+				      _DPIO_PCS_STAGGER1_B)
+
+#define _DPIO_PCS_CLOCKBUF0_A		0x8238
+#define _DPIO_PCS_CLOCKBUF0_B		0x8438
+#define DPIO_PCS_CLOCKBUF0(port) _PORT(port, _DPIO_PCS_CLOCKBUF0_A, \
+				       _DPIO_PCS_CLOCKBUF0_B)
+
+#define _DPIO_PCS_CLOCKBUF8_A		0x825c
+#define _DPIO_PCS_CLOCKBUF8_B		0x845c
+#define DPIO_PCS_CLOCKBUF8(port) _PORT(port, _DPIO_PCS_CLOCKBUF8_A, \
+				       _DPIO_PCS_CLOCKBUF8_B)
+
+#define _DPIO_TX_SWING_CTL2_A		0x8288
+#define _DPIO_TX_SWING_CTL2_B		0x8488
+#define DPIO_TX_SWING_CTL2(port) _PORT(port, _DPIO_TX_SWING_CTL2_A, \
+				       _DPIO_TX_SWING_CTL2_B)
+
+#define _DPIO_TX_SWING_CTL3_A		0x828c
+#define _DPIO_TX_SWING_CTL3_B		0x848c
+#define DPIO_TX_SWING_CTL3(port) _PORT(port, _DPIO_TX_SWING_CTL3_A, \
+				       _DPIO_TX_SWING_CTL3_B)
+
+#define _DPIO_TX_SWING_CTL4_A		0x8290
+#define _DPIO_TX_SWING_CTL4_B		0x8490
+#define DPIO_TX_SWING_CTL4(port) _PORT(port, _DPIO_TX_SWING_CTL4_A, \
+				       _DPIO_TX_SWING_CTL4_B)
+
+#define _DPIO_TX_OCALINIT_0		0x8294
+#define _DPIO_TX_OCALINIT_1		0x8494
+#define   DPIO_TX_OCALINIT_EN		(1<<31)
+#define DPIO_TX_OCALINIT(port) _PORT(port, _DPIO_TX_OCALINIT_0, \
+				     _DPIO_TX_OCALINIT_1)
+
+#define _DPIO_TX_CTL_0			0x82ac
+#define _DPIO_TX_CTL_1			0x84ac
+#define DPIO_TX_CTL(port) _PORT(port, _DPIO_TX_CTL_0, _DPIO_TX_CTL_1)
+
+#define _DPIO_TX_LANE_0			0x82b8
+#define _DPIO_TX_LANE_1			0x84b8
+#define DPIO_TX_LANE(port) _PORT(port, _DPIO_TX_LANE_0, _DPIO_TX_LANE_1)
+
+#define _DPIO_DATA_CHANNEL1		0x8220
+#define _DPIO_DATA_CHANNEL2		0x8420
+#define DPIO_DATA_CHANNEL(port) _PORT(port, _DPIO_DATA_CHANNEL1, _DPIO_DATA_CHANNEL2)
+
+#define _DPIO_PORT0_PCS0		0x0220
+#define _DPIO_PORT0_PCS1		0x0420
+#define _DPIO_PORT1_PCS2		0x2620
+#define _DPIO_PORT1_PCS3		0x2820
+#define DPIO_DATA_LANE_A(port) _PORT(port, _DPIO_PORT0_PCS0, _DPIO_PORT1_PCS2)
+#define DPIO_DATA_LANE_B(port) _PORT(port, _DPIO_PORT0_PCS1, _DPIO_PORT1_PCS3)
+#define DPIO_DATA_CHANNEL1              0x8220
+#define DPIO_DATA_CHANNEL2              0x8420
 
 /*
  * Fence registers
@@ -443,6 +587,7 @@
 #define RENDER_RING_BASE	0x02000
 #define BSD_RING_BASE		0x04000
 #define GEN6_BSD_RING_BASE	0x12000
+#define VEBOX_RING_BASE		0x1a000
 #define BLT_RING_BASE		0x22000
 #define RING_TAIL(base)		((base)+0x30)
 #define RING_HEAD(base)		((base)+0x34)
@@ -450,12 +595,20 @@
 #define RING_CTL(base)		((base)+0x3c)
 #define RING_SYNC_0(base)	((base)+0x40)
 #define RING_SYNC_1(base)	((base)+0x44)
-#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
-#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
-#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
-#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
-#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
-#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define RING_SYNC_2(base)	((base)+0x48)
+#define GEN6_RVSYNC	(RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC	(RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_RVESYNC	(RING_SYNC_2(RENDER_RING_BASE))
+#define GEN6_VBSYNC	(RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_VRSYNC	(RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VVESYNC	(RING_SYNC_2(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC	(RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC	(RING_SYNC_1(BLT_RING_BASE))
+#define GEN6_BVESYNC	(RING_SYNC_2(BLT_RING_BASE))
+#define GEN6_VEBSYNC	(RING_SYNC_0(VEBOX_RING_BASE))
+#define GEN6_VERSYNC	(RING_SYNC_1(VEBOX_RING_BASE))
+#define GEN6_VEVSYNC	(RING_SYNC_2(VEBOX_RING_BASE))
+#define GEN6_NOSYNC 0
 #define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
@@ -467,6 +620,7 @@
 #define DONE_REG		0x40b0
 #define BSD_HWS_PGA_GEN7	(0x04180)
 #define BLT_HWS_PGA_GEN7	(0x04280)
+#define VEBOX_HWS_PGA_GEN7	(0x04380)
 #define RING_ACTHD(base)	((base)+0x74)
 #define RING_NOPID(base)	((base)+0x94)
 #define RING_IMR(base)		((base)+0xa8)
@@ -527,7 +681,11 @@
 
 #define ERROR_GEN6	0x040a0
 #define GEN7_ERR_INT	0x44040
-#define   ERR_INT_MMIO_UNCLAIMED (1<<13)
+#define   ERR_INT_POISON		(1<<31)
+#define   ERR_INT_MMIO_UNCLAIMED	(1<<13)
+#define   ERR_INT_FIFO_UNDERRUN_C	(1<<6)
+#define   ERR_INT_FIFO_UNDERRUN_B	(1<<3)
+#define   ERR_INT_FIFO_UNDERRUN_A	(1<<0)
 
 #define FPGA_DBG		0x42300
 #define   FPGA_DBG_RM_NOCLAIM	(1<<31)
@@ -583,24 +741,7 @@
 #define VLV_IIR		(VLV_DISPLAY_BASE + 0x20a4)
 #define VLV_IMR		(VLV_DISPLAY_BASE + 0x20a8)
 #define VLV_ISR		(VLV_DISPLAY_BASE + 0x20ac)
-#define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
-#define   I915_DISPLAY_PORT_INTERRUPT			(1<<17)
-#define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
-#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT	(1<<14) /* p-state */
-#define   I915_HWB_OOM_INTERRUPT			(1<<13)
-#define   I915_SYNC_STATUS_INTERRUPT			(1<<12)
-#define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT	(1<<11)
-#define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT	(1<<10)
-#define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT	(1<<9)
-#define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT	(1<<8)
-#define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT		(1<<7)
-#define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT		(1<<6)
-#define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT		(1<<5)
-#define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT		(1<<4)
-#define   I915_DEBUG_INTERRUPT				(1<<2)
-#define   I915_USER_INTERRUPT				(1<<1)
-#define   I915_ASLE_INTERRUPT				(1<<0)
-#define   I915_BSD_USER_INTERRUPT                      (1<<25)
+#define VLV_PCBR	(VLV_DISPLAY_BASE + 0x2120)
 #define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
 #define EIR		0x020b0
 #define EMR		0x020b4
@@ -712,28 +853,6 @@
 #define CACHE_MODE_1		0x7004 /* IVB+ */
 #define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
 
-/* GEN6 interrupt control
- * Note that the per-ring interrupt bits do alias with the global interrupt bits
- * in GTIMR. */
-#define GEN6_RENDER_HWSTAM	0x2098
-#define GEN6_RENDER_IMR		0x20a8
-#define   GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT		(1 << 8)
-#define   GEN6_RENDER_PPGTT_PAGE_FAULT			(1 << 7)
-#define   GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED		(1 << 6)
-#define   GEN6_RENDER_L3_PARITY_ERROR			(1 << 5)
-#define   GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT	(1 << 4)
-#define   GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR	(1 << 3)
-#define   GEN6_RENDER_SYNC_STATUS			(1 << 2)
-#define   GEN6_RENDER_DEBUG_INTERRUPT			(1 << 1)
-#define   GEN6_RENDER_USER_INTERRUPT			(1 << 0)
-
-#define GEN6_BLITTER_HWSTAM	0x22098
-#define GEN6_BLITTER_IMR	0x220a8
-#define   GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT	(1 << 26)
-#define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
-#define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
-#define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
-
 #define GEN6_BLITTER_ECOSKPD	0x221d0
 #define   GEN6_BLITTER_LOCK_SHIFT			16
 #define   GEN6_BLITTER_FBC_NOTIFY			(1<<3)
@@ -744,9 +863,52 @@
 #define   GEN6_BSD_SLEEP_INDICATOR	(1 << 3)
 #define   GEN6_BSD_GO_INDICATOR		(1 << 4)
 
-#define GEN6_BSD_HWSTAM			0x12098
-#define GEN6_BSD_IMR			0x120a8
-#define   GEN6_BSD_USER_INTERRUPT	(1 << 12)
+/* On modern GEN architectures interrupt control consists of two sets
+ * of registers. The first set pertains to the ring generating the
+ * interrupt. The second control is for the functional block generating the
+ * interrupt. These are PM, GT, DE, etc.
+ *
+ * Luckily *knocks on wood* all the ring interrupt bits match up with the
+ * GT interrupt bits, so we don't need to duplicate the defines.
+ *
+ * These defines should cover us well from SNB->HSW with minor exceptions
+ * it can also work on ILK.
+ */
+#define GT_BLT_FLUSHDW_NOTIFY_INTERRUPT		(1 << 26)
+#define GT_BLT_CS_ERROR_INTERRUPT		(1 << 25)
+#define GT_BLT_USER_INTERRUPT			(1 << 22)
+#define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
+#define GT_BSD_USER_INTERRUPT			(1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
+#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
+#define GT_RENDER_SYNC_STATUS_INTERRUPT		(1 <<  2)
+#define GT_RENDER_DEBUG_INTERRUPT		(1 <<  1)
+#define GT_RENDER_USER_INTERRUPT		(1 <<  0)
+
+#define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
+#define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
+
+/* These are all the "old" interrupts */
+#define ILK_BSD_USER_INTERRUPT				(1<<5)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT			(1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT	(1<<14) /* p-state */
+#define I915_HWB_OOM_INTERRUPT				(1<<13)
+#define I915_SYNC_STATUS_INTERRUPT			(1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT	(1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT	(1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT	(1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT	(1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT		(1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT		(1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT		(1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT		(1<<4)
+#define I915_DEBUG_INTERRUPT				(1<<2)
+#define I915_USER_INTERRUPT				(1<<1)
+#define I915_ASLE_INTERRUPT				(1<<0)
+#define I915_BSD_USER_INTERRUPT				(1 << 25)
 
 #define GEN6_BSD_RNCID			0x12198
 
@@ -807,7 +969,9 @@
 #define   DPFC_CTL_EN		(1<<31)
 #define   DPFC_CTL_PLANEA	(0<<30)
 #define   DPFC_CTL_PLANEB	(1<<30)
+#define   IVB_DPFC_CTL_PLANE_SHIFT	(29)
 #define   DPFC_CTL_FENCE_EN	(1<<29)
+#define   IVB_DPFC_CTL_FENCE_EN	(1<<28)
 #define   DPFC_CTL_PERSISTENT_MODE	(1<<25)
 #define   DPFC_SR_EN		(1<<10)
 #define   DPFC_CTL_LIMIT_1X	(0<<6)
@@ -840,6 +1004,7 @@
 #define ILK_DPFC_CHICKEN	0x43224
 #define ILK_FBC_RT_BASE		0x2128
 #define   ILK_FBC_RT_VALID	(1<<0)
+#define   SNB_FBC_FRONT_BUFFER	(1<<1)
 
 #define ILK_DISPLAY_CHICKEN1	0x42000
 #define   ILK_FBCQ_DIS		(1<<22)
@@ -855,6 +1020,21 @@
 #define   SNB_CPU_FENCE_ENABLE	(1<<29)
 #define DPFC_CPU_FENCE_OFFSET	0x100104
 
+/* Framebuffer compression for Ivybridge */
+#define IVB_FBC_RT_BASE			0x7020
+
+#define IPS_CTL		0x43408
+#define   IPS_ENABLE	(1 << 31)
+
+#define _HSW_PIPE_SLICE_CHICKEN_1_A	0x420B0
+#define _HSW_PIPE_SLICE_CHICKEN_1_B	0x420B4
+#define   HSW_BYPASS_FBC_QUEUE		(1<<22)
+#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
+					     _HSW_PIPE_SLICE_CHICKEN_1_A, + \
+					     _HSW_PIPE_SLICE_CHICKEN_1_B)
+
+#define HSW_CLKGATE_DISABLE_PART_1	0x46500
+#define   HSW_DPFC_GATING_DISABLE	(1<<23)
 
 /*
  * GPIO regs
@@ -963,7 +1143,10 @@
 #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
 #define   DPLL_LOCK_VLV			(1<<15)
+#define   DPLL_INTEGRATED_CRI_CLK_VLV	(1<<14)
 #define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13)
+#define   DPLL_PORTC_READY_MASK		(0xf << 4)
+#define   DPLL_PORTB_READY_MASK		(0xf)
 
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
 /*
@@ -1967,6 +2150,10 @@
 #define   BLM_PIPE_A			(0 << 29)
 #define   BLM_PIPE_B			(1 << 29)
 #define   BLM_PIPE_C			(2 << 29) /* ivb + */
+#define   BLM_TRANSCODER_A		BLM_PIPE_A /* hsw */
+#define   BLM_TRANSCODER_B		BLM_PIPE_B
+#define   BLM_TRANSCODER_C		BLM_PIPE_C
+#define   BLM_TRANSCODER_EDP		(3 << 29)
 #define   BLM_PIPE(pipe)		((pipe) << 29)
 #define   BLM_POLARITY_I965		(1 << 28) /* gen4 only */
 #define   BLM_PHASE_IN_INTERUPT_STATUS	(1 << 26)
@@ -2540,9 +2727,7 @@
 #define   DP_PRE_EMPHASIS_SHIFT		22
 
 /* How many wires to use. I guess 3 was too hard */
-#define   DP_PORT_WIDTH_1		(0 << 19)
-#define   DP_PORT_WIDTH_2		(1 << 19)
-#define   DP_PORT_WIDTH_4		(3 << 19)
+#define   DP_PORT_WIDTH(width)		(((width) - 1) << 19)
 #define   DP_PORT_WIDTH_MASK		(7 << 19)
 
 /* Mystic DPCD version 1.1 special mode */
@@ -2646,18 +2831,20 @@
  * which is after the LUTs, so we want the bytes for our color format.
  * For our current usage, this is always 3, one byte for R, G and B.
  */
-#define _PIPEA_GMCH_DATA_M			0x70050
-#define _PIPEB_GMCH_DATA_M			0x71050
+#define _PIPEA_DATA_M_G4X	0x70050
+#define _PIPEB_DATA_M_G4X	0x71050
 
 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
 #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
+#define  TU_SIZE_SHIFT		25
 #define  TU_SIZE_MASK           (0x3f << 25)
 
 #define  DATA_LINK_M_N_MASK	(0xffffff)
 #define  DATA_LINK_N_MAX	(0x800000)
 
-#define _PIPEA_GMCH_DATA_N			0x70054
-#define _PIPEB_GMCH_DATA_N			0x71054
+#define _PIPEA_DATA_N_G4X	0x70054
+#define _PIPEB_DATA_N_G4X	0x71054
+#define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
 
 /*
  * Computing Link M and N values for the Display Port link
@@ -2670,16 +2857,18 @@
  * Attributes and VB-ID.
  */
 
-#define _PIPEA_DP_LINK_M				0x70060
-#define _PIPEB_DP_LINK_M				0x71060
+#define _PIPEA_LINK_M_G4X	0x70060
+#define _PIPEB_LINK_M_G4X	0x71060
+#define   PIPEA_DP_LINK_M_MASK			(0xffffff)
 
-#define _PIPEA_DP_LINK_N				0x70064
-#define _PIPEB_DP_LINK_N				0x71064
+#define _PIPEA_LINK_N_G4X	0x70064
+#define _PIPEB_LINK_N_G4X	0x71064
+#define   PIPEA_DP_LINK_N_MASK			(0xffffff)
 
-#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
-#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
-#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
-#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+#define PIPE_DATA_M_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
+#define PIPE_DATA_N_G4X(pipe) _PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
+#define PIPE_LINK_M_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+#define PIPE_LINK_N_G4X(pipe) _PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
 
 /* Display & cursor control */
 
@@ -2715,6 +2904,7 @@
 #define   PIPECONF_INTERLACED_ILK		(3 << 21)
 #define   PIPECONF_INTERLACED_DBL_ILK		(4 << 21) /* ilk/snb only */
 #define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK	(5 << 21) /* ilk/snb only */
+#define   PIPECONF_INTERLACE_MODE_MASK		(7 << 21)
 #define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
 #define   PIPECONF_COLOR_RANGE_SELECT	(1 << 13)
 #define   PIPECONF_BPC_MASK	(0x7 << 5)
@@ -2915,6 +3105,10 @@
 #define WM3S_LP_IVB		0x45128
 #define  WM1S_LP_EN		(1<<31)
 
+#define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
+	(WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
+	 ((fbc) << WM1_LP_FBC_SHIFT) | ((pri) << WM1_LP_SR_SHIFT) | (cur))
+
 /* Memory latency timer register */
 #define MLTR_ILK		0x11222
 #define  MLTR_WM1_SHIFT		0
@@ -3474,6 +3668,15 @@
 #define _LGC_PALETTE_B           0x4a800
 #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
 
+#define _GAMMA_MODE_A		0x4a480
+#define _GAMMA_MODE_B		0x4ac80
+#define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
+#define GAMMA_MODE_MODE_MASK	(3 << 0)
+#define GAMMA_MODE_MODE_8bit	(0 << 0)
+#define GAMMA_MODE_MODE_10bit	(1 << 0)
+#define GAMMA_MODE_MODE_12bit	(2 << 0)
+#define GAMMA_MODE_MODE_SPLIT	(3 << 0)
+
 /* interrupts */
 #define DE_MASTER_IRQ_CONTROL   (1 << 31)
 #define DE_SPRITEB_FLIP_DONE    (1 << 29)
@@ -3502,7 +3705,7 @@
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
 
 /* More Ivybridge lolz */
-#define DE_ERR_DEBUG_IVB		(1<<30)
+#define DE_ERR_INT_IVB			(1<<30)
 #define DE_GSE_IVB			(1<<29)
 #define DE_PCH_EVENT_IVB		(1<<28)
 #define DE_DP_A_HOTPLUG_IVB		(1<<27)
@@ -3525,21 +3728,6 @@
 #define DEIIR   0x44008
 #define DEIER   0x4400c
 
-/* GT interrupt.
- * Note that for gen6+ the ring-specific interrupt bits do alias with the
- * corresponding bits in the per-ring interrupt control registers. */
-#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT	(1 << 26)
-#define GT_GEN6_BLT_CS_ERROR_INTERRUPT		(1 << 25)
-#define GT_GEN6_BLT_USER_INTERRUPT		(1 << 22)
-#define GT_GEN6_BSD_CS_ERROR_INTERRUPT		(1 << 15)
-#define GT_GEN6_BSD_USER_INTERRUPT		(1 << 12)
-#define GT_BSD_USER_INTERRUPT			(1 << 5) /* ilk only */
-#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT	(1 << 5)
-#define GT_PIPE_NOTIFY				(1 << 4)
-#define GT_RENDER_CS_ERROR_INTERRUPT		(1 << 3)
-#define GT_SYNC_STATUS				(1 << 2)
-#define GT_USER_INTERRUPT			(1 << 0)
-
 #define GTISR   0x44010
 #define GTIMR   0x44014
 #define GTIIR   0x44018
@@ -3569,6 +3757,9 @@
 # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE	(1 << 5)
 # define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
 
+#define CHICKEN_PAR1_1		0x42080
+#define  FORCE_ARB_IDLE_PLANES	(1 << 14)
+
 #define DISP_ARB_CTL	0x45000
 #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
 #define  DISP_FBC_WM_DIS		(1<<15)
@@ -3661,6 +3852,7 @@
 				 SDE_PORTC_HOTPLUG_CPT |	\
 				 SDE_PORTB_HOTPLUG_CPT)
 #define SDE_GMBUS_CPT		(1 << 17)
+#define SDE_ERROR_CPT		(1 << 16)
 #define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
 #define SDE_AUDIO_CP_CHG_C_CPT	(1 << 9)
 #define SDE_FDI_RXC_CPT		(1 << 8)
@@ -3685,6 +3877,12 @@
 #define SDEIIR  0xc4008
 #define SDEIER  0xc400c
 
+#define SERR_INT			0xc4040
+#define  SERR_INT_POISON		(1<<31)
+#define  SERR_INT_TRANS_C_FIFO_UNDERRUN	(1<<6)
+#define  SERR_INT_TRANS_B_FIFO_UNDERRUN	(1<<3)
+#define  SERR_INT_TRANS_A_FIFO_UNDERRUN	(1<<0)
+
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030		/* SHOTPLUG_CTL */
 #define PORTD_HOTPLUG_ENABLE            (1 << 20)
@@ -3794,34 +3992,34 @@
 
 /* transcoder */
 
-#define _TRANS_HTOTAL_A          0xe0000
-#define  TRANS_HTOTAL_SHIFT     16
-#define  TRANS_HACTIVE_SHIFT    0
-#define _TRANS_HBLANK_A          0xe0004
-#define  TRANS_HBLANK_END_SHIFT 16
-#define  TRANS_HBLANK_START_SHIFT 0
-#define _TRANS_HSYNC_A           0xe0008
-#define  TRANS_HSYNC_END_SHIFT  16
-#define  TRANS_HSYNC_START_SHIFT 0
-#define _TRANS_VTOTAL_A          0xe000c
-#define  TRANS_VTOTAL_SHIFT     16
-#define  TRANS_VACTIVE_SHIFT    0
-#define _TRANS_VBLANK_A          0xe0010
-#define  TRANS_VBLANK_END_SHIFT 16
-#define  TRANS_VBLANK_START_SHIFT 0
-#define _TRANS_VSYNC_A           0xe0014
-#define  TRANS_VSYNC_END_SHIFT  16
-#define  TRANS_VSYNC_START_SHIFT 0
-#define _TRANS_VSYNCSHIFT_A	0xe0028
+#define _PCH_TRANS_HTOTAL_A		0xe0000
+#define  TRANS_HTOTAL_SHIFT		16
+#define  TRANS_HACTIVE_SHIFT		0
+#define _PCH_TRANS_HBLANK_A		0xe0004
+#define  TRANS_HBLANK_END_SHIFT		16
+#define  TRANS_HBLANK_START_SHIFT	0
+#define _PCH_TRANS_HSYNC_A		0xe0008
+#define  TRANS_HSYNC_END_SHIFT		16
+#define  TRANS_HSYNC_START_SHIFT	0
+#define _PCH_TRANS_VTOTAL_A		0xe000c
+#define  TRANS_VTOTAL_SHIFT		16
+#define  TRANS_VACTIVE_SHIFT		0
+#define _PCH_TRANS_VBLANK_A		0xe0010
+#define  TRANS_VBLANK_END_SHIFT		16
+#define  TRANS_VBLANK_START_SHIFT	0
+#define _PCH_TRANS_VSYNC_A		0xe0014
+#define  TRANS_VSYNC_END_SHIFT	 	16
+#define  TRANS_VSYNC_START_SHIFT	0
+#define _PCH_TRANS_VSYNCSHIFT_A		0xe0028
 
-#define _TRANSA_DATA_M1          0xe0030
-#define _TRANSA_DATA_N1          0xe0034
-#define _TRANSA_DATA_M2          0xe0038
-#define _TRANSA_DATA_N2          0xe003c
-#define _TRANSA_DP_LINK_M1       0xe0040
-#define _TRANSA_DP_LINK_N1       0xe0044
-#define _TRANSA_DP_LINK_M2       0xe0048
-#define _TRANSA_DP_LINK_N2       0xe004c
+#define _PCH_TRANSA_DATA_M1	0xe0030
+#define _PCH_TRANSA_DATA_N1	0xe0034
+#define _PCH_TRANSA_DATA_M2	0xe0038
+#define _PCH_TRANSA_DATA_N2	0xe003c
+#define _PCH_TRANSA_LINK_M1	0xe0040
+#define _PCH_TRANSA_LINK_N1	0xe0044
+#define _PCH_TRANSA_LINK_M2	0xe0048
+#define _PCH_TRANSA_LINK_N2	0xe004c
 
 /* Per-transcoder DIP controls */
 
@@ -3890,44 +4088,45 @@
 #define HSW_TVIDEO_DIP_VSC_DATA(trans) \
 	 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
 
-#define _TRANS_HTOTAL_B          0xe1000
-#define _TRANS_HBLANK_B          0xe1004
-#define _TRANS_HSYNC_B           0xe1008
-#define _TRANS_VTOTAL_B          0xe100c
-#define _TRANS_VBLANK_B          0xe1010
-#define _TRANS_VSYNC_B           0xe1014
-#define _TRANS_VSYNCSHIFT_B	 0xe1028
+#define _PCH_TRANS_HTOTAL_B          0xe1000
+#define _PCH_TRANS_HBLANK_B          0xe1004
+#define _PCH_TRANS_HSYNC_B           0xe1008
+#define _PCH_TRANS_VTOTAL_B          0xe100c
+#define _PCH_TRANS_VBLANK_B          0xe1010
+#define _PCH_TRANS_VSYNC_B           0xe1014
+#define _PCH_TRANS_VSYNCSHIFT_B	 0xe1028
 
-#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
-#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
-#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
-#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
-#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
-#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
-#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
-				     _TRANS_VSYNCSHIFT_B)
+#define PCH_TRANS_HTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
+#define PCH_TRANS_HBLANK(pipe) _PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
+#define PCH_TRANS_HSYNC(pipe) _PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
+#define PCH_TRANS_VTOTAL(pipe) _PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
+#define PCH_TRANS_VBLANK(pipe) _PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
+#define PCH_TRANS_VSYNC(pipe) _PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
+#define PCH_TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, \
+					 _PCH_TRANS_VSYNCSHIFT_B)
 
-#define _TRANSB_DATA_M1          0xe1030
-#define _TRANSB_DATA_N1          0xe1034
-#define _TRANSB_DATA_M2          0xe1038
-#define _TRANSB_DATA_N2          0xe103c
-#define _TRANSB_DP_LINK_M1       0xe1040
-#define _TRANSB_DP_LINK_N1       0xe1044
-#define _TRANSB_DP_LINK_M2       0xe1048
-#define _TRANSB_DP_LINK_N2       0xe104c
+#define _PCH_TRANSB_DATA_M1	0xe1030
+#define _PCH_TRANSB_DATA_N1	0xe1034
+#define _PCH_TRANSB_DATA_M2	0xe1038
+#define _PCH_TRANSB_DATA_N2	0xe103c
+#define _PCH_TRANSB_LINK_M1	0xe1040
+#define _PCH_TRANSB_LINK_N1	0xe1044
+#define _PCH_TRANSB_LINK_M2	0xe1048
+#define _PCH_TRANSB_LINK_N2	0xe104c
 
-#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
-#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
-#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
-#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
-#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
-#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
-#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
-#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+#define PCH_TRANS_DATA_M1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+#define PCH_TRANS_DATA_N1(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+#define PCH_TRANS_DATA_M2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+#define PCH_TRANS_DATA_N2(pipe) _PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+#define PCH_TRANS_LINK_M1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+#define PCH_TRANS_LINK_N1(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+#define PCH_TRANS_LINK_M2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+#define PCH_TRANS_LINK_N2(pipe) _PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
 
-#define _TRANSACONF              0xf0008
-#define _TRANSBCONF              0xf1008
-#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
+#define _PCH_TRANSACONF              0xf0008
+#define _PCH_TRANSBCONF              0xf1008
+#define PCH_TRANSCONF(pipe) _PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
+#define LPT_TRANSCONF		_PCH_TRANSACONF /* lpt has only one transcoder */
 #define  TRANS_DISABLE          (0<<31)
 #define  TRANS_ENABLE           (1<<31)
 #define  TRANS_STATE_MASK       (1<<30)
@@ -4011,10 +4210,9 @@
 #define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
 #define  FDI_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
 #define  FDI_LINK_TRAIN_VOL_EMP_MASK		(0x3f<<22)
-#define  FDI_DP_PORT_WIDTH_X1           (0<<19)
-#define  FDI_DP_PORT_WIDTH_X2           (1<<19)
-#define  FDI_DP_PORT_WIDTH_X3           (2<<19)
-#define  FDI_DP_PORT_WIDTH_X4           (3<<19)
+#define  FDI_DP_PORT_WIDTH_SHIFT		19
+#define  FDI_DP_PORT_WIDTH_MASK			(7 << FDI_DP_PORT_WIDTH_SHIFT)
+#define  FDI_DP_PORT_WIDTH(width)           (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
 #define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
 /* Ironlake: hardwired to 1 */
 #define  FDI_TX_PLL_ENABLE              (1<<14)
@@ -4039,7 +4237,6 @@
 /* train, dp width same as FDI_TX */
 #define  FDI_FS_ERRC_ENABLE		(1<<27)
 #define  FDI_FE_ERRC_ENABLE		(1<<26)
-#define  FDI_DP_PORT_WIDTH_X8           (7<<19)
 #define  FDI_RX_POLARITY_REVERSED_LPT	(1<<16)
 #define  FDI_8BPC                       (0<<16)
 #define  FDI_10BPC                      (1<<16)
@@ -4061,9 +4258,6 @@
 #define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT	(2<<8)
 #define  FDI_LINK_TRAIN_NORMAL_CPT		(3<<8)
 #define  FDI_LINK_TRAIN_PATTERN_MASK_CPT	(3<<8)
-/* LPT */
-#define  FDI_PORT_WIDTH_2X_LPT			(1<<19)
-#define  FDI_PORT_WIDTH_1X_LPT			(0<<19)
 
 #define _FDI_RXA_MISC			0xf0010
 #define _FDI_RXB_MISC			0xf1010
@@ -4309,6 +4503,7 @@
 #define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
 #define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
 #define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   GEN7_RC_CTL_TO_MODE			(1<<28)
 #define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
 #define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
 #define GEN6_RP_DOWN_TIMEOUT			0xA010
@@ -4370,7 +4565,7 @@
 #define  GEN6_PM_RP_DOWN_THRESHOLD		(1<<4)
 #define  GEN6_PM_RP_UP_EI_EXPIRED		(1<<2)
 #define  GEN6_PM_RP_DOWN_EI_EXPIRED		(1<<1)
-#define  GEN6_PM_DEFERRED_EVENTS		(GEN6_PM_RP_UP_THRESHOLD | \
+#define  GEN6_PM_RPS_EVENTS			(GEN6_PM_RP_UP_THRESHOLD | \
 						 GEN6_PM_RP_DOWN_THRESHOLD | \
 						 GEN6_PM_RP_DOWN_TIMEOUT)
 
@@ -4392,20 +4587,6 @@
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
 
-#define VLV_IOSF_DOORBELL_REQ			0x182100
-#define   IOSF_DEVFN_SHIFT			24
-#define   IOSF_OPCODE_SHIFT			16
-#define   IOSF_PORT_SHIFT			8
-#define   IOSF_BYTE_ENABLES_SHIFT		4
-#define   IOSF_BAR_SHIFT			1
-#define   IOSF_SB_BUSY				(1<<0)
-#define   IOSF_PORT_PUNIT			0x4
-#define VLV_IOSF_DATA				0x182104
-#define VLV_IOSF_ADDR				0x182108
-
-#define PUNIT_OPCODE_REG_READ			6
-#define PUNIT_OPCODE_REG_WRITE			7
-
 #define GEN6_GT_CORE_STATUS		0x138060
 #define   GEN6_CORE_CPD_STATE_MASK	(7<<4)
 #define   GEN6_RCn_MASK			7
@@ -4602,9 +4783,6 @@
 #define  TRANS_DDI_EDP_INPUT_B_ONOFF	(5<<12)
 #define  TRANS_DDI_EDP_INPUT_C_ONOFF	(6<<12)
 #define  TRANS_DDI_BFI_ENABLE		(1<<4)
-#define  TRANS_DDI_PORT_WIDTH_X1	(0<<1)
-#define  TRANS_DDI_PORT_WIDTH_X2	(1<<1)
-#define  TRANS_DDI_PORT_WIDTH_X4	(3<<1)
 
 /* DisplayPort Transport Control */
 #define DP_TP_CTL_A			0x64040
@@ -4648,9 +4826,7 @@
 #define  DDI_BUF_PORT_REVERSAL			(1<<16)
 #define  DDI_BUF_IS_IDLE			(1<<7)
 #define  DDI_A_4_LANES				(1<<4)
-#define  DDI_PORT_WIDTH_X1			(0<<1)
-#define  DDI_PORT_WIDTH_X2			(1<<1)
-#define  DDI_PORT_WIDTH_X4			(3<<1)
+#define  DDI_PORT_WIDTH(width)			(((width) - 1) << 1)
 #define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
 
 /* DDI Buffer Translations */
@@ -4774,6 +4950,9 @@
 #define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
 #define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
 
+#define WM_MISC				0x45260
+#define  WM_MISC_DATA_PARTITION_5_6	(1 << 0)
+
 #define WM_DBG				0x45280
 #define  WM_DBG_DISALLOW_MULTIPLE_LP	(1<<0)
 #define  WM_DBG_DISALLOW_MAXFIFO	(1<<1)
@@ -4787,6 +4966,9 @@
 #define _PIPE_A_CSC_COEFF_RV_GV	0x49020
 #define _PIPE_A_CSC_COEFF_BV	0x49024
 #define _PIPE_A_CSC_MODE	0x49028
+#define   CSC_BLACK_SCREEN_OFFSET	(1 << 2)
+#define   CSC_POSITION_BEFORE_GAMMA	(1 << 1)
+#define   CSC_MODE_YUV_TO_RGB		(1 << 0)
 #define _PIPE_A_CSC_PREOFF_HI	0x49030
 #define _PIPE_A_CSC_PREOFF_ME	0x49034
 #define _PIPE_A_CSC_PREOFF_LO	0x49038
@@ -4808,10 +4990,6 @@
 #define _PIPE_B_CSC_POSTOFF_ME	0x49144
 #define _PIPE_B_CSC_POSTOFF_LO	0x49148
 
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
-
 #define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
 #define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
 #define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 41f0fde..88b9a66 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -192,6 +192,7 @@
 static void i915_save_display(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
 
 	/* Display arbitration control */
 	if (INTEL_INFO(dev)->gen <= 4)
@@ -202,6 +203,8 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_save_display_reg(dev);
 
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
 	/* LVDS state */
 	if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
@@ -222,6 +225,8 @@
 			dev_priv->regfile.saveLVDS = I915_READ(LVDS);
 	}
 
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
 	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
 		dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
@@ -257,6 +262,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 mask = 0xffffffff;
+	unsigned long flags;
 
 	/* Display arbitration */
 	if (INTEL_INFO(dev)->gen <= 4)
@@ -265,6 +271,8 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_restore_display_reg(dev);
 
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
 	/* LVDS state */
 	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
 		I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
@@ -304,6 +312,8 @@
 		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 	}
 
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
 	/* only restore FBC info on the platform that supports FBC*/
 	intel_disable_fbc(dev);
 	if (I915_HAS_FBC(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d5e1890..6875b56 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -212,7 +212,13 @@
 	int ret;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+	if (IS_VALLEYVIEW(dev_priv->dev)) {
+		u32 freq;
+		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+		ret = vlv_gpu_freq(dev_priv->mem_freq, (freq >> 8) & 0xff);
+	} else {
+		ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -226,7 +232,10 @@
 	int ret;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
+	else
+		ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -246,16 +255,25 @@
 	if (ret)
 		return ret;
 
-	val /= GT_FREQUENCY_MULTIPLIER;
-
 	mutex_lock(&dev_priv->rps.hw_lock);
 
-	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-	hw_max = dev_priv->rps.hw_max;
-	non_oc_max = (rp_state_cap & 0xff);
-	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+	if (IS_VALLEYVIEW(dev_priv->dev)) {
+		val = vlv_freq_opcode(dev_priv->mem_freq, val);
 
-	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+		hw_max = valleyview_rps_max_freq(dev_priv);
+		hw_min = valleyview_rps_min_freq(dev_priv);
+		non_oc_max = hw_max;
+	} else {
+		val /= GT_FREQUENCY_MULTIPLIER;
+
+		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		hw_max = dev_priv->rps.hw_max;
+		non_oc_max = (rp_state_cap & 0xff);
+		hw_min = ((rp_state_cap & 0xff0000) >> 16);
+	}
+
+	if (val < hw_min || val > hw_max ||
+	    val < dev_priv->rps.min_delay) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
@@ -264,8 +282,12 @@
 		DRM_DEBUG("User requested overclocking to %d\n",
 			  val * GT_FREQUENCY_MULTIPLIER);
 
-	if (dev_priv->rps.cur_delay > val)
-		gen6_set_rps(dev_priv->dev, val);
+	if (dev_priv->rps.cur_delay > val) {
+		if (IS_VALLEYVIEW(dev_priv->dev))
+			valleyview_set_rps(dev_priv->dev, val);
+		else
+			gen6_set_rps(dev_priv->dev, val);
+	}
 
 	dev_priv->rps.max_delay = val;
 
@@ -282,7 +304,10 @@
 	int ret;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
+	else
+		ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -302,21 +327,32 @@
 	if (ret)
 		return ret;
 
-	val /= GT_FREQUENCY_MULTIPLIER;
-
 	mutex_lock(&dev_priv->rps.hw_lock);
 
-	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-	hw_max = dev_priv->rps.hw_max;
-	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+	if (IS_VALLEYVIEW(dev)) {
+		val = vlv_freq_opcode(dev_priv->mem_freq, val);
+
+		hw_max = valleyview_rps_max_freq(dev_priv);
+		hw_min = valleyview_rps_min_freq(dev_priv);
+	} else {
+		val /= GT_FREQUENCY_MULTIPLIER;
+
+		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		hw_max = dev_priv->rps.hw_max;
+		hw_min = ((rp_state_cap & 0xff0000) >> 16);
+	}
 
 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
 		mutex_unlock(&dev_priv->rps.hw_lock);
 		return -EINVAL;
 	}
 
-	if (dev_priv->rps.cur_delay < val)
-		gen6_set_rps(dev_priv->dev, val);
+	if (dev_priv->rps.cur_delay < val) {
+		if (IS_VALLEYVIEW(dev))
+			valleyview_set_rps(dev, val);
+		else
+			gen6_set_rps(dev_priv->dev, val);
+	}
 
 	dev_priv->rps.min_delay = val;
 
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 985a097..5ef30b2 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -148,13 +148,13 @@
 		dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
 		dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
 
-		dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
-		dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
-		dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
-		dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
-		dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
-		dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
-		dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+		dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
+		dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
+		dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
+		dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
+		dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
+		dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
+		dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
 	}
 
 	dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
@@ -205,13 +205,13 @@
 		dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
 		dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
 
-		dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
-		dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
-		dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
-		dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
-		dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
-		dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
-		dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+		dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
+		dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
+		dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
+		dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
+		dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
+		dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
+		dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
 	}
 
 	dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
@@ -259,14 +259,14 @@
 		dev_priv->regfile.saveDP_B = I915_READ(DP_B);
 		dev_priv->regfile.saveDP_C = I915_READ(DP_C);
 		dev_priv->regfile.saveDP_D = I915_READ(DP_D);
-		dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
-		dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
-		dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
-		dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
-		dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
-		dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
-		dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
-		dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+		dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
+		dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
+		dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
+		dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
+		dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
+		dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
+		dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
+		dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
 	}
 	/* FIXME: regfile.save TV & SDVO state */
 
@@ -282,14 +282,14 @@
 
 	/* Display port ratios (must be done before clock is set) */
 	if (SUPPORTS_INTEGRATED_DP(dev)) {
-		I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
-		I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
-		I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
-		I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
-		I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
-		I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
-		I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
-		I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+		I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+		I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+		I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+		I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+		I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
+		I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
+		I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
+		I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
 	}
 
 	/* Fences */
@@ -379,13 +379,13 @@
 		I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
 		I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
 
-		I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
-		I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
-		I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
-		I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
-		I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
-		I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
-		I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+		I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+		I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+		I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+		I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+		I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+		I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+		I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
 	}
 
 	/* Restore plane info */
@@ -448,13 +448,13 @@
 		I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
 		I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
 
-		I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
-		I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
-		I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
-		I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
-		I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
-		I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
-		I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+		I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+		I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+		I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+		I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+		I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+		I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+		I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
 	}
 
 	/* Restore plane info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 95070b2..53f2bed 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -212,7 +212,7 @@
 	if (!lvds_options)
 		return;
 
-	dev_priv->lvds_dither = lvds_options->pixel_dither;
+	dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
 	if (lvds_options->panel_type == 0xff)
 		return;
 
@@ -226,7 +226,7 @@
 	if (!lvds_lfp_data_ptrs)
 		return;
 
-	dev_priv->lvds_vbt = 1;
+	dev_priv->vbt.lvds_vbt = 1;
 
 	panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
 					       lvds_lfp_data_ptrs,
@@ -238,7 +238,7 @@
 
 	fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
 
-	dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+	dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
 
 	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
@@ -274,9 +274,9 @@
 		/* check the resolution, just to be sure */
 		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
 		    fp_timing->y_res == panel_fixed_mode->vdisplay) {
-			dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+			dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
 			DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
-				      dev_priv->bios_lvds_val);
+				      dev_priv->vbt.bios_lvds_val);
 		}
 	}
 }
@@ -316,7 +316,7 @@
 
 	fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
 
-	dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+	dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
 
 	DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
@@ -345,20 +345,20 @@
 
 	general = find_section(bdb, BDB_GENERAL_FEATURES);
 	if (general) {
-		dev_priv->int_tv_support = general->int_tv_support;
-		dev_priv->int_crt_support = general->int_crt_support;
-		dev_priv->lvds_use_ssc = general->enable_ssc;
-		dev_priv->lvds_ssc_freq =
+		dev_priv->vbt.int_tv_support = general->int_tv_support;
+		dev_priv->vbt.int_crt_support = general->int_crt_support;
+		dev_priv->vbt.lvds_use_ssc = general->enable_ssc;
+		dev_priv->vbt.lvds_ssc_freq =
 			intel_bios_ssc_frequency(dev, general->ssc_freq);
-		dev_priv->display_clock_mode = general->display_clock_mode;
-		dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+		dev_priv->vbt.display_clock_mode = general->display_clock_mode;
+		dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
 		DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
-			      dev_priv->int_tv_support,
-			      dev_priv->int_crt_support,
-			      dev_priv->lvds_use_ssc,
-			      dev_priv->lvds_ssc_freq,
-			      dev_priv->display_clock_mode,
-			      dev_priv->fdi_rx_polarity_inverted);
+			      dev_priv->vbt.int_tv_support,
+			      dev_priv->vbt.int_crt_support,
+			      dev_priv->vbt.lvds_use_ssc,
+			      dev_priv->vbt.lvds_ssc_freq,
+			      dev_priv->vbt.display_clock_mode,
+			      dev_priv->vbt.fdi_rx_polarity_inverted);
 	}
 }
 
@@ -375,7 +375,7 @@
 			int bus_pin = general->crt_ddc_gmbus_pin;
 			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
 			if (intel_gmbus_is_port_valid(bus_pin))
-				dev_priv->crt_ddc_pin = bus_pin;
+				dev_priv->vbt.crt_ddc_pin = bus_pin;
 		} else {
 			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
 				      block_size);
@@ -486,7 +486,7 @@
 
 	if (SUPPORTS_EDP(dev) &&
 	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
-		dev_priv->edp.support = 1;
+		dev_priv->vbt.edp_support = 1;
 
 	if (driver->dual_frequency)
 		dev_priv->render_reclock_avail = true;
@@ -501,20 +501,20 @@
 
 	edp = find_section(bdb, BDB_EDP);
 	if (!edp) {
-		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
 		return;
 	}
 
 	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
 	case EDP_18BPP:
-		dev_priv->edp.bpp = 18;
+		dev_priv->vbt.edp_bpp = 18;
 		break;
 	case EDP_24BPP:
-		dev_priv->edp.bpp = 24;
+		dev_priv->vbt.edp_bpp = 24;
 		break;
 	case EDP_30BPP:
-		dev_priv->edp.bpp = 30;
+		dev_priv->vbt.edp_bpp = 30;
 		break;
 	}
 
@@ -522,48 +522,48 @@
 	edp_pps = &edp->power_seqs[panel_type];
 	edp_link_params = &edp->link_params[panel_type];
 
-	dev_priv->edp.pps = *edp_pps;
+	dev_priv->vbt.edp_pps = *edp_pps;
 
-	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+	dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
 		DP_LINK_BW_1_62;
 	switch (edp_link_params->lanes) {
 	case 0:
-		dev_priv->edp.lanes = 1;
+		dev_priv->vbt.edp_lanes = 1;
 		break;
 	case 1:
-		dev_priv->edp.lanes = 2;
+		dev_priv->vbt.edp_lanes = 2;
 		break;
 	case 3:
 	default:
-		dev_priv->edp.lanes = 4;
+		dev_priv->vbt.edp_lanes = 4;
 		break;
 	}
 	switch (edp_link_params->preemphasis) {
 	case 0:
-		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
 		break;
 	case 1:
-		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
 		break;
 	case 2:
-		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
 		break;
 	case 3:
-		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
 		break;
 	}
 	switch (edp_link_params->vswing) {
 	case 0:
-		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
 		break;
 	case 1:
-		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
 		break;
 	case 2:
-		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
 		break;
 	case 3:
-		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+		dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
 		break;
 	}
 }
@@ -611,13 +611,13 @@
 		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
 		return;
 	}
-	dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
-	if (!dev_priv->child_dev) {
+	dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+	if (!dev_priv->vbt.child_dev) {
 		DRM_DEBUG_KMS("No memory space for child device\n");
 		return;
 	}
 
-	dev_priv->child_dev_num = count;
+	dev_priv->vbt.child_dev_num = count;
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
@@ -625,7 +625,7 @@
 			/* skip the device block if device type is invalid */
 			continue;
 		}
-		child_dev_ptr = dev_priv->child_dev + count;
+		child_dev_ptr = dev_priv->vbt.child_dev + count;
 		count++;
 		memcpy((void *)child_dev_ptr, (void *)p_child,
 					sizeof(*p_child));
@@ -638,23 +638,23 @@
 {
 	struct drm_device *dev = dev_priv->dev;
 
-	dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+	dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
 	/* LFP panel data */
-	dev_priv->lvds_dither = 1;
-	dev_priv->lvds_vbt = 0;
+	dev_priv->vbt.lvds_dither = 1;
+	dev_priv->vbt.lvds_vbt = 0;
 
 	/* SDVO panel data */
-	dev_priv->sdvo_lvds_vbt_mode = NULL;
+	dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
 
 	/* general features */
-	dev_priv->int_tv_support = 1;
-	dev_priv->int_crt_support = 1;
+	dev_priv->vbt.int_tv_support = 1;
+	dev_priv->vbt.int_crt_support = 1;
 
 	/* Default to using SSC */
-	dev_priv->lvds_use_ssc = 1;
-	dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
-	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+	dev_priv->vbt.lvds_use_ssc = 1;
+	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
 }
 
 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 58b4a53..3acec8c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -84,6 +84,28 @@
 	return true;
 }
 
+static void intel_crt_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 tmp, flags = 0;
+
+	tmp = I915_READ(crt->adpa_reg);
+
+	if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+}
+
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -127,7 +149,7 @@
 	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
 }
 
-
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
 static void intel_crt_dpms(struct drm_connector *connector, int mode)
 {
 	struct drm_device *dev = connector->dev;
@@ -158,6 +180,8 @@
 	else
 		encoder->connectors_active = true;
 
+	/* We call connector dpms manually below in case pipe dpms doesn't
+	 * change due to cloning. */
 	if (mode < old_dpms) {
 		/* From off to on, enable the pipe first. */
 		intel_crtc_update_dpms(crtc);
@@ -207,6 +231,10 @@
 	if (HAS_PCH_SPLIT(dev))
 		pipe_config->has_pch_encoder = true;
 
+	/* LPT FDI RX only supports 8bpc. */
+	if (HAS_PCH_LPT(dev))
+		pipe_config->pipe_bpp = 24;
+
 	return true;
 }
 
@@ -431,7 +459,7 @@
 
 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
-	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
 	edid = intel_crt_get_edid(connector, i2c);
 
 	if (edid) {
@@ -637,7 +665,7 @@
 	int ret;
 	struct i2c_adapter *i2c;
 
-	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
 	ret = intel_crt_ddc_get_modes(connector, i2c);
 	if (ret || !IS_G4X(dev))
 		return ret;
@@ -774,6 +802,7 @@
 	crt->base.compute_config = intel_crt_compute_config;
 	crt->base.disable = intel_disable_crt;
 	crt->base.enable = intel_enable_crt;
+	crt->base.get_config = intel_crt_get_config;
 	if (I915_HAS_HOTPLUG(dev))
 		crt->base.hpd_pin = HPD_CRT;
 	if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fb961bb..9649df8 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -174,6 +174,8 @@
 	 * mode set "sequence for CRT port" document:
 	 * - TP1 to TP2 time with the default value
 	 * - FDI delay to 90h
+	 *
+	 * WaFDIAutoLinkSetTimingOverrride:hsw
 	 */
 	I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
 				  FDI_RX_PWRDN_LANE0_VAL(2) |
@@ -181,7 +183,8 @@
 
 	/* Enable the PCH Receiver FDI PLL */
 	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
-		     FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
+		     FDI_RX_PLL_ENABLE |
+		     FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
 	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
 	POSTING_READ(_FDI_RXA_CTL);
 	udelay(220);
@@ -209,7 +212,7 @@
 		 * port reversal bit */
 		I915_WRITE(DDI_BUF_CTL(PORT_E),
 			   DDI_BUF_CTL_ENABLE |
-			   ((intel_crtc->fdi_lanes - 1) << 1) |
+			   ((intel_crtc->config.fdi_lanes - 1) << 1) |
 			   hsw_ddi_buf_ctl_values[i / 2]);
 		POSTING_READ(DDI_BUF_CTL(PORT_E));
 
@@ -278,392 +281,6 @@
 	DRM_ERROR("FDI link training failed!\n");
 }
 
-/* WRPLL clock dividers */
-struct wrpll_tmds_clock {
-	u32 clock;
-	u16 p;		/* Post divider */
-	u16 n2;		/* Feedback divider */
-	u16 r2;		/* Reference divider */
-};
-
-/* Table of matching values for WRPLL clocks programming for each frequency.
- * The code assumes this table is sorted. */
-static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
-	{19750,	38,	25,	18},
-	{20000,	48,	32,	18},
-	{21000,	36,	21,	15},
-	{21912,	42,	29,	17},
-	{22000,	36,	22,	15},
-	{23000,	36,	23,	15},
-	{23500,	40,	40,	23},
-	{23750,	26,	16,	14},
-	{24000,	36,	24,	15},
-	{25000,	36,	25,	15},
-	{25175,	26,	40,	33},
-	{25200,	30,	21,	15},
-	{26000,	36,	26,	15},
-	{27000,	30,	21,	14},
-	{27027,	18,	100,	111},
-	{27500,	30,	29,	19},
-	{28000,	34,	30,	17},
-	{28320,	26,	30,	22},
-	{28322,	32,	42,	25},
-	{28750,	24,	23,	18},
-	{29000,	30,	29,	18},
-	{29750,	32,	30,	17},
-	{30000,	30,	25,	15},
-	{30750,	30,	41,	24},
-	{31000,	30,	31,	18},
-	{31500,	30,	28,	16},
-	{32000,	30,	32,	18},
-	{32500,	28,	32,	19},
-	{33000,	24,	22,	15},
-	{34000,	28,	30,	17},
-	{35000,	26,	32,	19},
-	{35500,	24,	30,	19},
-	{36000,	26,	26,	15},
-	{36750,	26,	46,	26},
-	{37000,	24,	23,	14},
-	{37762,	22,	40,	26},
-	{37800,	20,	21,	15},
-	{38000,	24,	27,	16},
-	{38250,	24,	34,	20},
-	{39000,	24,	26,	15},
-	{40000,	24,	32,	18},
-	{40500,	20,	21,	14},
-	{40541,	22,	147,	89},
-	{40750,	18,	19,	14},
-	{41000,	16,	17,	14},
-	{41500,	22,	44,	26},
-	{41540,	22,	44,	26},
-	{42000,	18,	21,	15},
-	{42500,	22,	45,	26},
-	{43000,	20,	43,	27},
-	{43163,	20,	24,	15},
-	{44000,	18,	22,	15},
-	{44900,	20,	108,	65},
-	{45000,	20,	25,	15},
-	{45250,	20,	52,	31},
-	{46000,	18,	23,	15},
-	{46750,	20,	45,	26},
-	{47000,	20,	40,	23},
-	{48000,	18,	24,	15},
-	{49000,	18,	49,	30},
-	{49500,	16,	22,	15},
-	{50000,	18,	25,	15},
-	{50500,	18,	32,	19},
-	{51000,	18,	34,	20},
-	{52000,	18,	26,	15},
-	{52406,	14,	34,	25},
-	{53000,	16,	22,	14},
-	{54000,	16,	24,	15},
-	{54054,	16,	173,	108},
-	{54500,	14,	24,	17},
-	{55000,	12,	22,	18},
-	{56000,	14,	45,	31},
-	{56250,	16,	25,	15},
-	{56750,	14,	25,	17},
-	{57000,	16,	27,	16},
-	{58000,	16,	43,	25},
-	{58250,	16,	38,	22},
-	{58750,	16,	40,	23},
-	{59000,	14,	26,	17},
-	{59341,	14,	40,	26},
-	{59400,	16,	44,	25},
-	{60000,	16,	32,	18},
-	{60500,	12,	39,	29},
-	{61000,	14,	49,	31},
-	{62000,	14,	37,	23},
-	{62250,	14,	42,	26},
-	{63000,	12,	21,	15},
-	{63500,	14,	28,	17},
-	{64000,	12,	27,	19},
-	{65000,	14,	32,	19},
-	{65250,	12,	29,	20},
-	{65500,	12,	32,	22},
-	{66000,	12,	22,	15},
-	{66667,	14,	38,	22},
-	{66750,	10,	21,	17},
-	{67000,	14,	33,	19},
-	{67750,	14,	58,	33},
-	{68000,	14,	30,	17},
-	{68179,	14,	46,	26},
-	{68250,	14,	46,	26},
-	{69000,	12,	23,	15},
-	{70000,	12,	28,	18},
-	{71000,	12,	30,	19},
-	{72000,	12,	24,	15},
-	{73000,	10,	23,	17},
-	{74000,	12,	23,	14},
-	{74176,	8,	100,	91},
-	{74250,	10,	22,	16},
-	{74481,	12,	43,	26},
-	{74500,	10,	29,	21},
-	{75000,	12,	25,	15},
-	{75250,	10,	39,	28},
-	{76000,	12,	27,	16},
-	{77000,	12,	53,	31},
-	{78000,	12,	26,	15},
-	{78750,	12,	28,	16},
-	{79000,	10,	38,	26},
-	{79500,	10,	28,	19},
-	{80000,	12,	32,	18},
-	{81000,	10,	21,	14},
-	{81081,	6,	100,	111},
-	{81624,	8,	29,	24},
-	{82000,	8,	17,	14},
-	{83000,	10,	40,	26},
-	{83950,	10,	28,	18},
-	{84000,	10,	28,	18},
-	{84750,	6,	16,	17},
-	{85000,	6,	17,	18},
-	{85250,	10,	30,	19},
-	{85750,	10,	27,	17},
-	{86000,	10,	43,	27},
-	{87000,	10,	29,	18},
-	{88000,	10,	44,	27},
-	{88500,	10,	41,	25},
-	{89000,	10,	28,	17},
-	{89012,	6,	90,	91},
-	{89100,	10,	33,	20},
-	{90000,	10,	25,	15},
-	{91000,	10,	32,	19},
-	{92000,	10,	46,	27},
-	{93000,	10,	31,	18},
-	{94000,	10,	40,	23},
-	{94500,	10,	28,	16},
-	{95000,	10,	44,	25},
-	{95654,	10,	39,	22},
-	{95750,	10,	39,	22},
-	{96000,	10,	32,	18},
-	{97000,	8,	23,	16},
-	{97750,	8,	42,	29},
-	{98000,	8,	45,	31},
-	{99000,	8,	22,	15},
-	{99750,	8,	34,	23},
-	{100000,	6,	20,	18},
-	{100500,	6,	19,	17},
-	{101000,	6,	37,	33},
-	{101250,	8,	21,	14},
-	{102000,	6,	17,	15},
-	{102250,	6,	25,	22},
-	{103000,	8,	29,	19},
-	{104000,	8,	37,	24},
-	{105000,	8,	28,	18},
-	{106000,	8,	22,	14},
-	{107000,	8,	46,	29},
-	{107214,	8,	27,	17},
-	{108000,	8,	24,	15},
-	{108108,	8,	173,	108},
-	{109000,	6,	23,	19},
-	{110000,	6,	22,	18},
-	{110013,	6,	22,	18},
-	{110250,	8,	49,	30},
-	{110500,	8,	36,	22},
-	{111000,	8,	23,	14},
-	{111264,	8,	150,	91},
-	{111375,	8,	33,	20},
-	{112000,	8,	63,	38},
-	{112500,	8,	25,	15},
-	{113100,	8,	57,	34},
-	{113309,	8,	42,	25},
-	{114000,	8,	27,	16},
-	{115000,	6,	23,	18},
-	{116000,	8,	43,	25},
-	{117000,	8,	26,	15},
-	{117500,	8,	40,	23},
-	{118000,	6,	38,	29},
-	{119000,	8,	30,	17},
-	{119500,	8,	46,	26},
-	{119651,	8,	39,	22},
-	{120000,	8,	32,	18},
-	{121000,	6,	39,	29},
-	{121250,	6,	31,	23},
-	{121750,	6,	23,	17},
-	{122000,	6,	42,	31},
-	{122614,	6,	30,	22},
-	{123000,	6,	41,	30},
-	{123379,	6,	37,	27},
-	{124000,	6,	51,	37},
-	{125000,	6,	25,	18},
-	{125250,	4,	13,	14},
-	{125750,	4,	27,	29},
-	{126000,	6,	21,	15},
-	{127000,	6,	24,	17},
-	{127250,	6,	41,	29},
-	{128000,	6,	27,	19},
-	{129000,	6,	43,	30},
-	{129859,	4,	25,	26},
-	{130000,	6,	26,	18},
-	{130250,	6,	42,	29},
-	{131000,	6,	32,	22},
-	{131500,	6,	38,	26},
-	{131850,	6,	41,	28},
-	{132000,	6,	22,	15},
-	{132750,	6,	28,	19},
-	{133000,	6,	34,	23},
-	{133330,	6,	37,	25},
-	{134000,	6,	61,	41},
-	{135000,	6,	21,	14},
-	{135250,	6,	167,	111},
-	{136000,	6,	62,	41},
-	{137000,	6,	35,	23},
-	{138000,	6,	23,	15},
-	{138500,	6,	40,	26},
-	{138750,	6,	37,	24},
-	{139000,	6,	34,	22},
-	{139050,	6,	34,	22},
-	{139054,	6,	34,	22},
-	{140000,	6,	28,	18},
-	{141000,	6,	36,	23},
-	{141500,	6,	22,	14},
-	{142000,	6,	30,	19},
-	{143000,	6,	27,	17},
-	{143472,	4,	17,	16},
-	{144000,	6,	24,	15},
-	{145000,	6,	29,	18},
-	{146000,	6,	47,	29},
-	{146250,	6,	26,	16},
-	{147000,	6,	49,	30},
-	{147891,	6,	23,	14},
-	{148000,	6,	23,	14},
-	{148250,	6,	28,	17},
-	{148352,	4,	100,	91},
-	{148500,	6,	33,	20},
-	{149000,	6,	48,	29},
-	{150000,	6,	25,	15},
-	{151000,	4,	19,	17},
-	{152000,	6,	27,	16},
-	{152280,	6,	44,	26},
-	{153000,	6,	34,	20},
-	{154000,	6,	53,	31},
-	{155000,	6,	31,	18},
-	{155250,	6,	50,	29},
-	{155750,	6,	45,	26},
-	{156000,	6,	26,	15},
-	{157000,	6,	61,	35},
-	{157500,	6,	28,	16},
-	{158000,	6,	65,	37},
-	{158250,	6,	44,	25},
-	{159000,	6,	53,	30},
-	{159500,	6,	39,	22},
-	{160000,	6,	32,	18},
-	{161000,	4,	31,	26},
-	{162000,	4,	18,	15},
-	{162162,	4,	131,	109},
-	{162500,	4,	53,	44},
-	{163000,	4,	29,	24},
-	{164000,	4,	17,	14},
-	{165000,	4,	22,	18},
-	{166000,	4,	32,	26},
-	{167000,	4,	26,	21},
-	{168000,	4,	46,	37},
-	{169000,	4,	104,	83},
-	{169128,	4,	64,	51},
-	{169500,	4,	39,	31},
-	{170000,	4,	34,	27},
-	{171000,	4,	19,	15},
-	{172000,	4,	51,	40},
-	{172750,	4,	32,	25},
-	{172800,	4,	32,	25},
-	{173000,	4,	41,	32},
-	{174000,	4,	49,	38},
-	{174787,	4,	22,	17},
-	{175000,	4,	35,	27},
-	{176000,	4,	30,	23},
-	{177000,	4,	38,	29},
-	{178000,	4,	29,	22},
-	{178500,	4,	37,	28},
-	{179000,	4,	53,	40},
-	{179500,	4,	73,	55},
-	{180000,	4,	20,	15},
-	{181000,	4,	55,	41},
-	{182000,	4,	31,	23},
-	{183000,	4,	42,	31},
-	{184000,	4,	30,	22},
-	{184750,	4,	26,	19},
-	{185000,	4,	37,	27},
-	{186000,	4,	51,	37},
-	{187000,	4,	36,	26},
-	{188000,	4,	32,	23},
-	{189000,	4,	21,	15},
-	{190000,	4,	38,	27},
-	{190960,	4,	41,	29},
-	{191000,	4,	41,	29},
-	{192000,	4,	27,	19},
-	{192250,	4,	37,	26},
-	{193000,	4,	20,	14},
-	{193250,	4,	53,	37},
-	{194000,	4,	23,	16},
-	{194208,	4,	23,	16},
-	{195000,	4,	26,	18},
-	{196000,	4,	45,	31},
-	{197000,	4,	35,	24},
-	{197750,	4,	41,	28},
-	{198000,	4,	22,	15},
-	{198500,	4,	25,	17},
-	{199000,	4,	28,	19},
-	{200000,	4,	37,	25},
-	{201000,	4,	61,	41},
-	{202000,	4,	112,	75},
-	{202500,	4,	21,	14},
-	{203000,	4,	146,	97},
-	{204000,	4,	62,	41},
-	{204750,	4,	44,	29},
-	{205000,	4,	38,	25},
-	{206000,	4,	29,	19},
-	{207000,	4,	23,	15},
-	{207500,	4,	40,	26},
-	{208000,	4,	37,	24},
-	{208900,	4,	48,	31},
-	{209000,	4,	48,	31},
-	{209250,	4,	31,	20},
-	{210000,	4,	28,	18},
-	{211000,	4,	25,	16},
-	{212000,	4,	22,	14},
-	{213000,	4,	30,	19},
-	{213750,	4,	38,	24},
-	{214000,	4,	46,	29},
-	{214750,	4,	35,	22},
-	{215000,	4,	43,	27},
-	{216000,	4,	24,	15},
-	{217000,	4,	37,	23},
-	{218000,	4,	42,	26},
-	{218250,	4,	42,	26},
-	{218750,	4,	34,	21},
-	{219000,	4,	47,	29},
-	{220000,	4,	44,	27},
-	{220640,	4,	49,	30},
-	{220750,	4,	36,	22},
-	{221000,	4,	36,	22},
-	{222000,	4,	23,	14},
-	{222525,	4,	28,	17},
-	{222750,	4,	33,	20},
-	{227000,	4,	37,	22},
-	{230250,	4,	29,	17},
-	{233500,	4,	38,	22},
-	{235000,	4,	40,	23},
-	{238000,	4,	30,	17},
-	{241500,	2,	17,	19},
-	{245250,	2,	20,	22},
-	{247750,	2,	22,	24},
-	{253250,	2,	15,	16},
-	{256250,	2,	18,	19},
-	{262500,	2,	31,	32},
-	{267250,	2,	66,	67},
-	{268500,	2,	94,	95},
-	{270000,	2,	14,	14},
-	{272500,	2,	77,	76},
-	{273750,	2,	57,	56},
-	{280750,	2,	24,	23},
-	{281250,	2,	23,	22},
-	{286000,	2,	17,	16},
-	{291750,	2,	26,	24},
-	{296703,	2,	56,	51},
-	{297000,	2,	22,	20},
-	{298000,	2,	21,	19},
-};
-
 static void intel_ddi_mode_set(struct drm_encoder *encoder,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode)
@@ -675,7 +292,7 @@
 	int pipe = intel_crtc->pipe;
 	int type = intel_encoder->type;
 
-	DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+	DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
 		      port_name(port), pipe_name(pipe));
 
 	intel_crtc->eld_vld = false;
@@ -686,22 +303,7 @@
 
 		intel_dp->DP = intel_dig_port->port_reversal |
 			       DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
-		switch (intel_dp->lane_count) {
-		case 1:
-			intel_dp->DP |= DDI_PORT_WIDTH_X1;
-			break;
-		case 2:
-			intel_dp->DP |= DDI_PORT_WIDTH_X2;
-			break;
-		case 4:
-			intel_dp->DP |= DDI_PORT_WIDTH_X4;
-			break;
-		default:
-			intel_dp->DP |= DDI_PORT_WIDTH_X4;
-			WARN(1, "Unexpected DP lane count %d\n",
-			     intel_dp->lane_count);
-			break;
-		}
+		intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 
 		if (intel_dp->has_audio) {
 			DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
@@ -748,8 +350,8 @@
 	}
 
 	if (num_encoders != 1)
-		WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
-		     intel_crtc->pipe);
+		WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
+		     pipe_name(intel_crtc->pipe));
 
 	BUG_ON(ret == NULL);
 	return ret;
@@ -802,27 +404,224 @@
 	intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
 }
 
-static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+#define LC_FREQ 2700
+#define LC_FREQ_2K (LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
+
+struct wrpll_rnp {
+	unsigned p, n2, r2;
+};
+
+static unsigned wrpll_get_budget_for_freq(int clock)
 {
-	u32 i;
+	unsigned budget;
 
-	for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
-		if (clock <= wrpll_tmds_clock_table[i].clock)
-			break;
+	switch (clock) {
+	case 25175000:
+	case 25200000:
+	case 27000000:
+	case 27027000:
+	case 37762500:
+	case 37800000:
+	case 40500000:
+	case 40541000:
+	case 54000000:
+	case 54054000:
+	case 59341000:
+	case 59400000:
+	case 72000000:
+	case 74176000:
+	case 74250000:
+	case 81000000:
+	case 81081000:
+	case 89012000:
+	case 89100000:
+	case 108000000:
+	case 108108000:
+	case 111264000:
+	case 111375000:
+	case 148352000:
+	case 148500000:
+	case 162000000:
+	case 162162000:
+	case 222525000:
+	case 222750000:
+	case 296703000:
+	case 297000000:
+		budget = 0;
+		break;
+	case 233500000:
+	case 245250000:
+	case 247750000:
+	case 253250000:
+	case 298000000:
+		budget = 1500;
+		break;
+	case 169128000:
+	case 169500000:
+	case 179500000:
+	case 202000000:
+		budget = 2000;
+		break;
+	case 256250000:
+	case 262500000:
+	case 270000000:
+	case 272500000:
+	case 273750000:
+	case 280750000:
+	case 281250000:
+	case 286000000:
+	case 291750000:
+		budget = 4000;
+		break;
+	case 267250000:
+	case 268500000:
+		budget = 5000;
+		break;
+	default:
+		budget = 1000;
+		break;
+	}
 
-	if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
-		i--;
+	return budget;
+}
 
-	*p = wrpll_tmds_clock_table[i].p;
-	*n2 = wrpll_tmds_clock_table[i].n2;
-	*r2 = wrpll_tmds_clock_table[i].r2;
+static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+			     unsigned r2, unsigned n2, unsigned p,
+			     struct wrpll_rnp *best)
+{
+	uint64_t a, b, c, d, diff, diff_best;
 
-	if (wrpll_tmds_clock_table[i].clock != clock)
-		DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
-			 wrpll_tmds_clock_table[i].clock, clock);
+	/* No best (r,n,p) yet */
+	if (best->p == 0) {
+		best->p = p;
+		best->n2 = n2;
+		best->r2 = r2;
+		return;
+	}
 
-	DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
-		      clock, *p, *n2, *r2);
+	/*
+	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+	 * freq2k.
+	 *
+	 * delta = 1e6 *
+	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+	 *	   freq2k;
+	 *
+	 * and we would like delta <= budget.
+	 *
+	 * If the discrepancy is above the PPM-based budget, always prefer to
+	 * improve upon the previous solution.  However, if you're within the
+	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+	 */
+	a = freq2k * budget * p * r2;
+	b = freq2k * budget * best->p * best->r2;
+	diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
+	diff_best = ABS_DIFF((freq2k * best->p * best->r2),
+			     (LC_FREQ_2K * best->n2));
+	c = 1000000 * diff;
+	d = 1000000 * diff_best;
+
+	if (a < c && b < d) {
+		/* If both are above the budget, pick the closer */
+		if (best->p * best->r2 * diff < p * r2 * diff_best) {
+			best->p = p;
+			best->n2 = n2;
+			best->r2 = r2;
+		}
+	} else if (a >= c && b < d) {
+		/* If A is below the threshold but B is above it?  Update. */
+		best->p = p;
+		best->n2 = n2;
+		best->r2 = r2;
+	} else if (a >= c && b >= d) {
+		/* Both are below the limit, so pick the higher n2/(r2*r2) */
+		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+			best->p = p;
+			best->n2 = n2;
+			best->r2 = r2;
+		}
+	}
+	/* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+intel_ddi_calculate_wrpll(int clock /* in Hz */,
+			  unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+	uint64_t freq2k;
+	unsigned p, n2, r2;
+	struct wrpll_rnp best = { 0, 0, 0 };
+	unsigned budget;
+
+	freq2k = clock / 100;
+
+	budget = wrpll_get_budget_for_freq(clock);
+
+	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
+	 * and directly pass the LC PLL to it. */
+	if (freq2k == 5400000) {
+		*n2_out = 2;
+		*p_out = 1;
+		*r2_out = 2;
+		return;
+	}
+
+	/*
+	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+	 * the WR PLL.
+	 *
+	 * We want R so that REF_MIN <= Ref <= REF_MAX.
+	 * Injecting R2 = 2 * R gives:
+	 *   REF_MAX * r2 > LC_FREQ * 2 and
+	 *   REF_MIN * r2 < LC_FREQ * 2
+	 *
+	 * Which means the desired boundaries for r2 are:
+	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+	 *
+	 */
+	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+	     r2 <= LC_FREQ * 2 / REF_MIN;
+	     r2++) {
+
+		/*
+		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+		 *
+		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+		 *   VCO_MAX * r2 > n2 * LC_FREQ and
+		 *   VCO_MIN * r2 < n2 * LC_FREQ)
+		 *
+		 * Which means the desired boundaries for n2 are:
+		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+		 */
+		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+		     n2 <= VCO_MAX * r2 / LC_FREQ;
+		     n2++) {
+
+			for (p = P_MIN; p <= P_MAX; p += P_INC)
+				wrpll_update_rnp(freq2k, budget,
+						 r2, n2, p, &best);
+		}
+	}
+
+	*n2_out = best.n2;
+	*p_out = best.p;
+	*r2_out = best.r2;
+
+	DRM_DEBUG_KMS("WRPLL: %dHz refresh rate with p=%d, n2=%d r2=%d\n",
+		      clock, *p_out, *n2_out, *r2_out);
 }
 
 bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
@@ -863,7 +662,7 @@
 		return true;
 
 	} else if (type == INTEL_OUTPUT_HDMI) {
-		int p, n2, r2;
+		unsigned p, n2, r2;
 
 		if (plls->wrpll1_refcount == 0) {
 			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
@@ -885,7 +684,7 @@
 		WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
 		     "WRPLL already enabled\n");
 
-		intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+		intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
 		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
 		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -995,7 +794,7 @@
 			/* Can only use the always-on power well for eDP when
 			 * not using the panel fitter, and when not using motion
 			  * blur mitigation (which we don't support). */
-			if (dev_priv->pch_pf_size)
+			if (intel_crtc->config.pch_pfit.size)
 				temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
 			else
 				temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1022,7 +821,7 @@
 
 	} else if (type == INTEL_OUTPUT_ANALOG) {
 		temp |= TRANS_DDI_MODE_SELECT_FDI;
-		temp |= (intel_crtc->fdi_lanes - 1) << 1;
+		temp |= (intel_crtc->config.fdi_lanes - 1) << 1;
 
 	} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
 		   type == INTEL_OUTPUT_EDP) {
@@ -1030,25 +829,10 @@
 
 		temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
-		switch (intel_dp->lane_count) {
-		case 1:
-			temp |= TRANS_DDI_PORT_WIDTH_X1;
-			break;
-		case 2:
-			temp |= TRANS_DDI_PORT_WIDTH_X2;
-			break;
-		case 4:
-			temp |= TRANS_DDI_PORT_WIDTH_X4;
-			break;
-		default:
-			temp |= TRANS_DDI_PORT_WIDTH_X4;
-			WARN(1, "Unsupported lane count %d\n",
-			     intel_dp->lane_count);
-		}
-
+		temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
 	} else {
-		WARN(1, "Invalid encoder type %d for pipe %d\n",
-		     intel_encoder->type, pipe);
+		WARN(1, "Invalid encoder type %d for pipe %c\n",
+		     intel_encoder->type, pipe_name(pipe));
 	}
 
 	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@@ -1148,7 +932,7 @@
 		}
 	}
 
-	DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
+	DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
 
 	return false;
 }
@@ -1334,7 +1118,7 @@
 		ironlake_edp_backlight_on(intel_dp);
 	}
 
-	if (intel_crtc->eld_vld) {
+	if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
 		tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
 		tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
 		I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
@@ -1352,9 +1136,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
 
-	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-	tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
-	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+	if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
+		tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+		tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
+			 (pipe * 4));
+		I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+	}
 
 	if (type == INTEL_OUTPUT_EDP) {
 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1366,14 +1153,14 @@
 int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
 {
 	if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
-		return 450;
+		return 450000;
 	else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
 		 LCPLL_CLK_FREQ_450)
-		return 450;
+		return 450000;
 	else if (IS_ULT(dev_priv->dev))
-		return 338;
+		return 337500;
 	else
-		return 540;
+		return 540000;
 }
 
 void intel_ddi_pll_init(struct drm_device *dev)
@@ -1386,7 +1173,7 @@
 	 * Don't even try to turn it on.
 	 */
 
-	DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+	DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
 		      intel_ddi_get_cdclk_freq(dev_priv));
 
 	if (val & LCPLL_CD_SOURCE_FCLK)
@@ -1472,6 +1259,28 @@
 		intel_dp_check_link_status(intel_dp);
 }
 
+static void intel_ddi_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	u32 temp, flags = 0;
+
+	temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+	if (temp & TRANS_DDI_PHSYNC)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	if (temp & TRANS_DDI_PVSYNC)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+	pipe_config->pixel_multiplier = 1;
+}
+
 static void intel_ddi_destroy(struct drm_encoder *encoder)
 {
 	/* HDMI has nothing special to destroy, so we can go with this. */
@@ -1482,9 +1291,13 @@
 				     struct intel_crtc_config *pipe_config)
 {
 	int type = encoder->type;
+	int port = intel_ddi_get_encoder_port(encoder);
 
 	WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
 
+	if (port == PORT_A)
+		pipe_config->cpu_transcoder = TRANSCODER_EDP;
+
 	if (type == INTEL_OUTPUT_HDMI)
 		return intel_hdmi_compute_config(encoder, pipe_config);
 	else
@@ -1518,16 +1331,6 @@
 		return;
 	}
 
-	if (port != PORT_A) {
-		hdmi_connector = kzalloc(sizeof(struct intel_connector),
-					 GFP_KERNEL);
-		if (!hdmi_connector) {
-			kfree(dp_connector);
-			kfree(intel_dig_port);
-			return;
-		}
-	}
-
 	intel_encoder = &intel_dig_port->base;
 	encoder = &intel_encoder->base;
 
@@ -1541,12 +1344,11 @@
 	intel_encoder->disable = intel_disable_ddi;
 	intel_encoder->post_disable = intel_ddi_post_disable;
 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+	intel_encoder->get_config = intel_ddi_get_config;
 
 	intel_dig_port->port = port;
 	intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
 					DDI_BUF_PORT_REVERSAL;
-	if (hdmi_connector)
-		intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
 	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
@@ -1554,7 +1356,16 @@
 	intel_encoder->cloneable = false;
 	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
-	if (hdmi_connector)
-		intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
 	intel_dp_init_connector(intel_dig_port, dp_connector);
+
+	if (intel_encoder->type != INTEL_OUTPUT_EDP) {
+		hdmi_connector = kzalloc(sizeof(struct intel_connector),
+					 GFP_KERNEL);
+		if (!hdmi_connector) {
+			return;
+		}
+
+		intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+		intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+	}
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56746dc..6eb99e1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,18 +46,6 @@
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 typedef struct {
-	/* given values */
-	int n;
-	int m1, m2;
-	int p1, p2;
-	/* derived values */
-	int	dot;
-	int	vco;
-	int	m;
-	int	p;
-} intel_clock_t;
-
-typedef struct {
 	int	min, max;
 } intel_range_t;
 
@@ -114,15 +102,6 @@
 			intel_clock_t *best_clock);
 
 static bool
-intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
-		      int target, int refclk, intel_clock_t *match_clock,
-		      intel_clock_t *best_clock);
-static bool
-intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
-			   int target, int refclk, intel_clock_t *match_clock,
-			   intel_clock_t *best_clock);
-
-static bool
 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
 			int target, int refclk, intel_clock_t *match_clock,
 			intel_clock_t *best_clock);
@@ -254,20 +233,6 @@
 	.find_pll = intel_g4x_find_best_PLL,
 };
 
-static const intel_limit_t intel_limits_g4x_display_port = {
-	.dot = { .min = 161670, .max = 227000 },
-	.vco = { .min = 1750000, .max = 3500000},
-	.n = { .min = 1, .max = 2 },
-	.m = { .min = 97, .max = 108 },
-	.m1 = { .min = 0x10, .max = 0x12 },
-	.m2 = { .min = 0x05, .max = 0x06 },
-	.p = { .min = 10, .max = 20 },
-	.p1 = { .min = 1, .max = 2},
-	.p2 = { .dot_limit = 0,
-		.p2_slow = 10, .p2_fast = 10 },
-	.find_pll = intel_find_pll_g4x_dp,
-};
-
 static const intel_limit_t intel_limits_pineview_sdvo = {
 	.dot = { .min = 20000, .max = 400000},
 	.vco = { .min = 1700000, .max = 3500000 },
@@ -374,20 +339,6 @@
 	.find_pll = intel_g4x_find_best_PLL,
 };
 
-static const intel_limit_t intel_limits_ironlake_display_port = {
-	.dot = { .min = 25000, .max = 350000 },
-	.vco = { .min = 1760000, .max = 3510000},
-	.n = { .min = 1, .max = 2 },
-	.m = { .min = 81, .max = 90 },
-	.m1 = { .min = 12, .max = 22 },
-	.m2 = { .min = 5, .max = 9 },
-	.p = { .min = 10, .max = 20 },
-	.p1 = { .min = 1, .max = 2},
-	.p2 = { .dot_limit = 0,
-		.p2_slow = 10, .p2_fast = 10 },
-	.find_pll = intel_find_pll_ironlake_dp,
-};
-
 static const intel_limit_t intel_limits_vlv_dac = {
 	.dot = { .min = 25000, .max = 270000 },
 	.vco = { .min = 4000000, .max = 6000000 },
@@ -396,15 +347,15 @@
 	.m1 = { .min = 2, .max = 3 },
 	.m2 = { .min = 11, .max = 156 },
 	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 2, .max = 3 },
+	.p1 = { .min = 1, .max = 3 },
 	.p2 = { .dot_limit = 270000,
 		.p2_slow = 2, .p2_fast = 20 },
 	.find_pll = intel_vlv_find_best_pll,
 };
 
 static const intel_limit_t intel_limits_vlv_hdmi = {
-	.dot = { .min = 20000, .max = 165000 },
-	.vco = { .min = 4000000, .max = 5994000},
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 4000000, .max = 6000000 },
 	.n = { .min = 1, .max = 7 },
 	.m = { .min = 60, .max = 300 }, /* guess */
 	.m1 = { .min = 2, .max = 3 },
@@ -424,61 +375,12 @@
 	.m1 = { .min = 2, .max = 3 },
 	.m2 = { .min = 11, .max = 156 },
 	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 2, .max = 3 },
+	.p1 = { .min = 1, .max = 3 },
 	.p2 = { .dot_limit = 270000,
 		.p2_slow = 2, .p2_fast = 20 },
 	.find_pll = intel_vlv_find_best_pll,
 };
 
-u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
-{
-	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
-
-	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
-		DRM_ERROR("DPIO idle wait timed out\n");
-		return 0;
-	}
-
-	I915_WRITE(DPIO_REG, reg);
-	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
-		   DPIO_BYTE);
-	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
-		DRM_ERROR("DPIO read wait timed out\n");
-		return 0;
-	}
-
-	return I915_READ(DPIO_DATA);
-}
-
-static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
-			     u32 val)
-{
-	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
-
-	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
-		DRM_ERROR("DPIO idle wait timed out\n");
-		return;
-	}
-
-	I915_WRITE(DPIO_DATA, val);
-	I915_WRITE(DPIO_REG, reg);
-	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
-		   DPIO_BYTE);
-	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
-		DRM_ERROR("DPIO write wait timed out\n");
-}
-
-static void vlv_init_dpio(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* Reset the DPIO config */
-	I915_WRITE(DPIO_CTL, 0);
-	POSTING_READ(DPIO_CTL);
-	I915_WRITE(DPIO_CTL, 1);
-	POSTING_READ(DPIO_CTL);
-}
-
 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 						int refclk)
 {
@@ -497,10 +399,7 @@
 			else
 				limit = &intel_limits_ironlake_single_lvds;
 		}
-	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-		   intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-		limit = &intel_limits_ironlake_display_port;
-	else
+	} else
 		limit = &intel_limits_ironlake_dac;
 
 	return limit;
@@ -521,8 +420,6 @@
 		limit = &intel_limits_g4x_hdmi;
 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
 		limit = &intel_limits_g4x_sdvo;
-	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
-		limit = &intel_limits_g4x_display_port;
 	} else /* The option is for other outputs */
 		limit = &intel_limits_i9xx_sdvo;
 
@@ -573,13 +470,18 @@
 	clock->dot = clock->vco / clock->p;
 }
 
+static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
+{
+	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
+}
+
 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
 {
 	if (IS_PINEVIEW(dev)) {
 		pineview_clock(refclk, clock);
 		return;
 	}
-	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
 	clock->vco = refclk * clock->m / (clock->n + 2);
 	clock->dot = clock->vco / clock->p;
@@ -712,12 +614,6 @@
 	found = false;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		int lvds_reg;
-
-		if (HAS_PCH_SPLIT(dev))
-			lvds_reg = PCH_LVDS;
-		else
-			lvds_reg = LVDS;
 		if (intel_is_dual_link_lvds(dev))
 			clock.p2 = limit->p2.p2_fast;
 		else
@@ -746,9 +642,6 @@
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
-					if (match_clock &&
-					    clock.p != match_clock->p)
-						continue;
 
 					this_err = abs(clock.dot - target);
 					if (this_err < err_most) {
@@ -765,59 +658,6 @@
 }
 
 static bool
-intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
-			   int target, int refclk, intel_clock_t *match_clock,
-			   intel_clock_t *best_clock)
-{
-	struct drm_device *dev = crtc->dev;
-	intel_clock_t clock;
-
-	if (target < 200000) {
-		clock.n = 1;
-		clock.p1 = 2;
-		clock.p2 = 10;
-		clock.m1 = 12;
-		clock.m2 = 9;
-	} else {
-		clock.n = 2;
-		clock.p1 = 1;
-		clock.p2 = 10;
-		clock.m1 = 14;
-		clock.m2 = 8;
-	}
-	intel_clock(dev, refclk, &clock);
-	memcpy(best_clock, &clock, sizeof(intel_clock_t));
-	return true;
-}
-
-/* DisplayPort has only two frequencies, 162MHz and 270MHz */
-static bool
-intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
-		      int target, int refclk, intel_clock_t *match_clock,
-		      intel_clock_t *best_clock)
-{
-	intel_clock_t clock;
-	if (target < 200000) {
-		clock.p1 = 2;
-		clock.p2 = 10;
-		clock.n = 2;
-		clock.m1 = 23;
-		clock.m2 = 8;
-	} else {
-		clock.p1 = 1;
-		clock.p2 = 10;
-		clock.n = 1;
-		clock.m1 = 14;
-		clock.m2 = 2;
-	}
-	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
-	clock.p = (clock.p1 * clock.p2);
-	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
-	clock.vco = 0;
-	memcpy(best_clock, &clock, sizeof(intel_clock_t));
-	return true;
-}
-static bool
 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
 			int target, int refclk, intel_clock_t *match_clock,
 			intel_clock_t *best_clock)
@@ -1097,14 +937,14 @@
 		pch_dpll = I915_READ(PCH_DPLL_SEL);
 		cur_state = pll->pll_reg == _PCH_DPLL_B;
 		if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
-			  "PLL[%d] not attached to this transcoder %d: %08x\n",
-			  cur_state, crtc->pipe, pch_dpll)) {
+			  "PLL[%d] not attached to this transcoder %c: %08x\n",
+			  cur_state, pipe_name(crtc->pipe), pch_dpll)) {
 			cur_state = !!(val >> (4*crtc->pipe + 3));
 			WARN(cur_state != state,
-			     "PLL[%d] not %s on this transcoder %d: %08x\n",
+			     "PLL[%d] not %s on this transcoder %c: %08x\n",
 			     pll->pll_reg == _PCH_DPLL_B,
 			     state_string(state),
-			     crtc->pipe,
+			     pipe_name(crtc->pipe),
 			     val);
 		}
 	}
@@ -1227,8 +1067,8 @@
 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
 		state = true;
 
-	if (!intel_using_power_well(dev_priv->dev) &&
-	    cpu_transcoder != TRANSCODER_EDP) {
+	if (!intel_display_power_enabled(dev_priv->dev,
+				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
 		cur_state = false;
 	} else {
 		reg = PIPECONF(cpu_transcoder);
@@ -1302,8 +1142,8 @@
 		reg = SPCNTR(pipe, i);
 		val = I915_READ(reg);
 		WARN((val & SP_ENABLE),
-		     "sprite %d assertion failure, should be off on pipe %c but is still active\n",
-		     pipe * 2 + i, pipe_name(pipe));
+		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
+		     sprite_name(pipe, i), pipe_name(pipe));
 	}
 }
 
@@ -1323,14 +1163,14 @@
 	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
 }
 
-static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
-				       enum pipe pipe)
+static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+					   enum pipe pipe)
 {
 	int reg;
 	u32 val;
 	bool enabled;
 
-	reg = TRANSCONF(pipe);
+	reg = PCH_TRANSCONF(pipe);
 	val = I915_READ(reg);
 	enabled = !!(val & TRANS_ENABLE);
 	WARN(enabled,
@@ -1474,6 +1314,8 @@
 	int reg;
 	u32 val;
 
+	assert_pipe_disabled(dev_priv, pipe);
+
 	/* No really, not for ILK+ */
 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
 
@@ -1525,65 +1367,18 @@
 	POSTING_READ(reg);
 }
 
-/* SBI access */
-static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
-		enum intel_sbi_destination destination)
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
 {
-	u32 tmp;
+	u32 port_mask;
 
-	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
-
-	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-				100)) {
-		DRM_ERROR("timeout waiting for SBI to become ready\n");
-		return;
-	}
-
-	I915_WRITE(SBI_ADDR, (reg << 16));
-	I915_WRITE(SBI_DATA, value);
-
-	if (destination == SBI_ICLK)
-		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+	if (!port)
+		port_mask = DPLL_PORTB_READY_MASK;
 	else
-		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
-	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+		port_mask = DPLL_PORTC_READY_MASK;
 
-	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
-				100)) {
-		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
-		return;
-	}
-}
-
-static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
-	       enum intel_sbi_destination destination)
-{
-	u32 value = 0;
-	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
-
-	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-				100)) {
-		DRM_ERROR("timeout waiting for SBI to become ready\n");
-		return 0;
-	}
-
-	I915_WRITE(SBI_ADDR, (reg << 16));
-
-	if (destination == SBI_ICLK)
-		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
-	else
-		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
-	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
-
-	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
-				100)) {
-		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
-		return 0;
-	}
-
-	return I915_READ(SBI_DATA);
+	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
+		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
+		     'B' + port, I915_READ(DPLL(0)));
 }
 
 /**
@@ -1666,7 +1461,7 @@
 	DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
 
 	/* Make sure transcoder isn't still depending on us */
-	assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
+	assert_pch_transcoder_disabled(dev_priv, intel_crtc->pipe);
 
 	reg = pll->pll_reg;
 	val = I915_READ(reg);
@@ -1706,7 +1501,7 @@
 		I915_WRITE(reg, val);
 	}
 
-	reg = TRANSCONF(pipe);
+	reg = PCH_TRANSCONF(pipe);
 	val = I915_READ(reg);
 	pipeconf_val = I915_READ(PIPECONF(pipe));
 
@@ -1731,7 +1526,7 @@
 
 	I915_WRITE(reg, val | TRANS_ENABLE);
 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
-		DRM_ERROR("failed to enable transcoder %d\n", pipe);
+		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
 }
 
 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1760,8 +1555,8 @@
 	else
 		val |= TRANS_PROGRESSIVE;
 
-	I915_WRITE(TRANSCONF(TRANSCODER_A), val);
-	if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+	I915_WRITE(LPT_TRANSCONF, val);
+	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
 		DRM_ERROR("Failed to enable PCH transcoder\n");
 }
 
@@ -1778,13 +1573,13 @@
 	/* Ports must be off as well */
 	assert_pch_ports_disabled(dev_priv, pipe);
 
-	reg = TRANSCONF(pipe);
+	reg = PCH_TRANSCONF(pipe);
 	val = I915_READ(reg);
 	val &= ~TRANS_ENABLE;
 	I915_WRITE(reg, val);
 	/* wait for PCH transcoder off, transcoder state */
 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
-		DRM_ERROR("failed to disable transcoder %d\n", pipe);
+		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
 	if (!HAS_PCH_IBX(dev)) {
 		/* Workaround: Clear the timing override chicken bit again. */
@@ -1799,11 +1594,11 @@
 {
 	u32 val;
 
-	val = I915_READ(_TRANSACONF);
+	val = I915_READ(LPT_TRANSCONF);
 	val &= ~TRANS_ENABLE;
-	I915_WRITE(_TRANSACONF, val);
+	I915_WRITE(LPT_TRANSCONF, val);
 	/* wait for PCH transcoder off, transcoder state */
-	if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
 		DRM_ERROR("Failed to disable PCH transcoder\n");
 
 	/* Workaround: clear timing override bit. */
@@ -1835,6 +1630,9 @@
 	int reg;
 	u32 val;
 
+	assert_planes_disabled(dev_priv, pipe);
+	assert_sprites_disabled(dev_priv, pipe);
+
 	if (HAS_PCH_LPT(dev_priv->dev))
 		pch_transcoder = TRANSCODER_A;
 	else
@@ -2096,7 +1894,7 @@
 	case 1:
 		break;
 	default:
-		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
 		return -EINVAL;
 	}
 
@@ -2193,7 +1991,7 @@
 	case 2:
 		break;
 	default:
-		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
 		return -EINVAL;
 	}
 
@@ -2384,9 +2182,9 @@
 	}
 
 	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
-		DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
-				intel_crtc->plane,
-				INTEL_INFO(dev)->num_pipes);
+		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
+			  plane_name(intel_crtc->plane),
+			  INTEL_INFO(dev)->num_pipes);
 		return -EINVAL;
 	}
 
@@ -2467,6 +2265,11 @@
 			   FDI_FE_ERRC_ENABLE);
 }
 
+static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
+{
+	return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
+}
+
 static void ivb_modeset_global_resources(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2476,10 +2279,13 @@
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
 	uint32_t temp;
 
-	/* When everything is off disable fdi C so that we could enable fdi B
-	 * with all lanes. XXX: This misses the case where a pipe is not using
-	 * any pch resources and so doesn't need any fdi lanes. */
-	if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+	/*
+	 * When everything is off disable fdi C so that we could enable fdi B
+	 * with all lanes. Note that we don't care about enabled pipes without
+	 * an enabled pch encoder.
+	 */
+	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
+	    !pipe_has_enabled_pch(pipe_C_crtc)) {
 		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
 		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
@@ -2517,8 +2323,8 @@
 	/* enable CPU FDI TX and PCH FDI RX */
 	reg = FDI_TX_CTL(pipe);
 	temp = I915_READ(reg);
-	temp &= ~(7 << 19);
-	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_1;
 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@ -2615,8 +2421,8 @@
 	/* enable CPU FDI TX and PCH FDI RX */
 	reg = FDI_TX_CTL(pipe);
 	temp = I915_READ(reg);
-	temp &= ~(7 << 19);
-	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
 	temp &= ~FDI_LINK_TRAIN_NONE;
 	temp |= FDI_LINK_TRAIN_PATTERN_1;
 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2750,8 +2556,8 @@
 	/* enable CPU FDI TX and PCH FDI RX */
 	reg = FDI_TX_CTL(pipe);
 	temp = I915_READ(reg);
-	temp &= ~(7 << 19);
-	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_DP_PORT_WIDTH_MASK;
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
 	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
 	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -2852,8 +2658,8 @@
 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
 	reg = FDI_RX_CTL(pipe);
 	temp = I915_READ(reg);
-	temp &= ~((0x7 << 19) | (0x7 << 16));
-	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
+	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
@@ -3085,6 +2891,30 @@
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
+static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+						enum pipe pch_transcoder)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+
+	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
+		   I915_READ(HTOTAL(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
+		   I915_READ(HBLANK(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
+		   I915_READ(HSYNC(cpu_transcoder)));
+
+	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
+		   I915_READ(VTOTAL(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
+		   I915_READ(VBLANK(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
+		   I915_READ(VSYNC(cpu_transcoder)));
+	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
+}
+
 /*
  * Enable PCH resources required for PCH ports:
  *   - PCH PLLs
@@ -3101,7 +2931,7 @@
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
-	assert_transcoder_disabled(dev_priv, pipe);
+	assert_pch_transcoder_disabled(dev_priv, pipe);
 
 	/* Write the TU size bits before fdi link training, so that error
 	 * detection works. */
@@ -3148,14 +2978,7 @@
 
 	/* set transcoder timing, panel must allow it */
 	assert_panel_unlocked(dev_priv, pipe);
-	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
-	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
-	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
-
-	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
-	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
-	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
-	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
+	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
 
 	intel_fdi_normal_train(crtc);
 
@@ -3205,19 +3028,12 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
-	assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
 
 	lpt_program_iclkip(crtc);
 
 	/* Set transcoder timing. */
-	I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
-	I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
-	I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
-
-	I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
-	I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
-	I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
-	I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
 
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
@@ -3294,7 +3110,7 @@
 found:
 	intel_crtc->pch_pll = pll;
 	pll->refcount++;
-	DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+	DRM_DEBUG_DRIVER("using pll %d for pipe %c\n", i, pipe_name(intel_crtc->pipe));
 prepare: /* separate function? */
 	DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
 
@@ -3309,7 +3125,7 @@
 	return pll;
 }
 
-void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
+static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int dslreg = PIPEDSL(pipe);
@@ -3319,7 +3135,28 @@
 	udelay(500);
 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
 		if (wait_for(I915_READ(dslreg) != temp, 5))
-			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
+			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+	}
+}
+
+static void ironlake_pfit_enable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	if (crtc->config.pch_pfit.size) {
+		/* Force use of hard-coded filter coefficients
+		 * as some pre-programmed values are broken,
+		 * e.g. x201.
+		 */
+		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+						 PF_PIPE_SEL_IVB(pipe));
+		else
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
+		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
 	}
 }
 
@@ -3339,6 +3176,10 @@
 		return;
 
 	intel_crtc->active = true;
+
+	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
+
 	intel_update_watermarks(dev);
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -3363,21 +3204,7 @@
 			encoder->pre_enable(encoder);
 
 	/* Enable panel fitting for LVDS */
-	if (dev_priv->pch_pf_size &&
-	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
-	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-		/* Force use of hard-coded filter coefficients
-		 * as some pre-programmed values are broken,
-		 * e.g. x201.
-		 */
-		if (IS_IVYBRIDGE(dev))
-			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
-						 PF_PIPE_SEL_IVB(pipe));
-		else
-			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
-		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
-		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
-	}
+	ironlake_pfit_enable(intel_crtc);
 
 	/*
 	 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -3402,7 +3229,7 @@
 		encoder->enable(encoder);
 
 	if (HAS_PCH_CPT(dev))
-		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+		cpt_verify_modeset(dev, intel_crtc->pipe);
 
 	/*
 	 * There seems to be a race in PCH platform hw (at least on some
@@ -3415,6 +3242,42 @@
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
+/* IPS only exists on ULT machines and is tied to pipe A. */
+static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
+{
+	return IS_ULT(crtc->base.dev) && crtc->pipe == PIPE_A;
+}
+
+static void hsw_enable_ips(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!crtc->config.ips_enabled)
+		return;
+
+	/* We can only enable IPS after we enable a plane and wait for a vblank.
+	 * We guarantee that the plane is enabled by calling intel_enable_ips
+	 * only after intel_enable_plane. And intel_enable_plane already waits
+	 * for a vblank, so all we need to do here is to enable the IPS bit. */
+	assert_plane_enabled(dev_priv, crtc->plane);
+	I915_WRITE(IPS_CTL, IPS_ENABLE);
+}
+
+static void hsw_disable_ips(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!crtc->config.ips_enabled)
+		return;
+
+	assert_plane_enabled(dev_priv, crtc->plane);
+	I915_WRITE(IPS_CTL, 0);
+
+	/* We need to wait for a vblank before we can disable the plane. */
+	intel_wait_for_vblank(dev, crtc->pipe);
+}
+
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3430,6 +3293,11 @@
 		return;
 
 	intel_crtc->active = true;
+
+	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
+	if (intel_crtc->config.has_pch_encoder)
+		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+
 	intel_update_watermarks(dev);
 
 	if (intel_crtc->config.has_pch_encoder)
@@ -3442,17 +3310,7 @@
 	intel_ddi_enable_pipe_clock(intel_crtc);
 
 	/* Enable panel fitting for eDP */
-	if (dev_priv->pch_pf_size &&
-	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
-		/* Force use of hard-coded filter coefficients
-		 * as some pre-programmed values are broken,
-		 * e.g. x201.
-		 */
-		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
-					 PF_PIPE_SEL_IVB(pipe));
-		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
-		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
-	}
+	ironlake_pfit_enable(intel_crtc);
 
 	/*
 	 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -3467,6 +3325,8 @@
 			  intel_crtc->config.has_pch_encoder);
 	intel_enable_plane(dev_priv, plane, pipe);
 
+	hsw_enable_ips(intel_crtc);
+
 	if (intel_crtc->config.has_pch_encoder)
 		lpt_pch_enable(crtc);
 
@@ -3490,6 +3350,21 @@
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
+static void ironlake_pfit_disable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	/* To avoid upsetting the power well on haswell only disable the pfit if
+	 * it's in use. The hw state code will make sure we get this right. */
+	if (crtc->config.pch_pfit.size) {
+		I915_WRITE(PF_CTL(pipe), 0);
+		I915_WRITE(PF_WIN_POS(pipe), 0);
+		I915_WRITE(PF_WIN_SZ(pipe), 0);
+	}
+}
+
 static void ironlake_crtc_disable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3516,11 +3391,10 @@
 	if (dev_priv->cfb_plane == plane)
 		intel_disable_fbc(dev);
 
+	intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
 	intel_disable_pipe(dev_priv, pipe);
 
-	/* Disable PF */
-	I915_WRITE(PF_CTL(pipe), 0);
-	I915_WRITE(PF_WIN_SZ(pipe), 0);
+	ironlake_pfit_disable(intel_crtc);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->post_disable)
@@ -3529,6 +3403,7 @@
 	ironlake_fdi_disable(crtc);
 
 	ironlake_disable_pch_transcoder(dev_priv, pipe);
+	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
 	if (HAS_PCH_CPT(dev)) {
 		/* disable TRANS_DP_CTL */
@@ -3590,22 +3465,21 @@
 	drm_vblank_off(dev, pipe);
 	intel_crtc_update_cursor(crtc, false);
 
-	intel_disable_plane(dev_priv, plane, pipe);
-
+	/* FBC must be disabled before disabling the plane on HSW. */
 	if (dev_priv->cfb_plane == plane)
 		intel_disable_fbc(dev);
 
+	hsw_disable_ips(intel_crtc);
+
+	intel_disable_plane(dev_priv, plane, pipe);
+
+	if (intel_crtc->config.has_pch_encoder)
+		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
 	intel_disable_pipe(dev_priv, pipe);
 
 	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
-	/* XXX: Once we have proper panel fitter state tracking implemented with
-	 * hardware state read/check support we should switch to only disable
-	 * the panel fitter when we know it's used. */
-	if (intel_using_power_well(dev)) {
-		I915_WRITE(PF_CTL(pipe), 0);
-		I915_WRITE(PF_WIN_SZ(pipe), 0);
-	}
+	ironlake_pfit_disable(intel_crtc);
 
 	intel_ddi_disable_pipe_clock(intel_crtc);
 
@@ -3615,6 +3489,7 @@
 
 	if (intel_crtc->config.has_pch_encoder) {
 		lpt_disable_pch_transcoder(dev_priv);
+		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 		intel_ddi_fdi_disable(crtc);
 	}
 
@@ -3634,12 +3509,6 @@
 
 static void haswell_crtc_off(struct drm_crtc *crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
-	 * start using it. */
-	intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
-
 	intel_ddi_put_crtc_pll(crtc);
 }
 
@@ -3685,6 +3554,79 @@
 	}
 }
 
+static void i9xx_pfit_enable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_config *pipe_config = &crtc->config;
+
+	if (!crtc->config.gmch_pfit.control)
+		return;
+
+	/*
+	 * The panel fitter should only be adjusted whilst the pipe is disabled,
+	 * according to register description and PRM.
+	 */
+	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
+	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+
+	/* Border color in case we don't scale up to the full screen. Black by
+	 * default, change to something else for debugging. */
+	I915_WRITE(BCLRPAT(crtc->pipe), 0);
+}
+
+static void valleyview_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+
+	WARN_ON(!crtc->enabled);
+
+	if (intel_crtc->active)
+		return;
+
+	intel_crtc->active = true;
+	intel_update_watermarks(dev);
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+
+	intel_enable_pll(dev_priv, pipe);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	/* VLV wants encoder enabling _before_ the pipe is up. */
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	/* Enable panel fitting for eDP */
+	i9xx_pfit_enable(intel_crtc);
+
+	intel_enable_pipe(dev_priv, pipe, false);
+	intel_enable_plane(dev_priv, plane, pipe);
+
+	intel_crtc_load_lut(crtc);
+	intel_update_fbc(dev);
+
+	/* Give the overlay scaler a chance to enable if it's on this pipe */
+	intel_crtc_dpms_overlay(intel_crtc, true);
+	intel_crtc_update_cursor(crtc, true);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
 static void i9xx_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3708,6 +3650,9 @@
 		if (encoder->pre_enable)
 			encoder->pre_enable(encoder);
 
+	/* Enable panel fitting for LVDS */
+	i9xx_pfit_enable(intel_crtc);
+
 	intel_enable_pipe(dev_priv, pipe, false);
 	intel_enable_plane(dev_priv, plane, pipe);
 	if (IS_G4X(dev))
@@ -3728,20 +3673,15 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum pipe pipe;
-	uint32_t pctl = I915_READ(PFIT_CONTROL);
+
+	if (!crtc->config.gmch_pfit.control)
+		return;
 
 	assert_pipe_disabled(dev_priv, crtc->pipe);
 
-	if (INTEL_INFO(dev)->gen >= 4)
-		pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
-	else
-		pipe = PIPE_B;
-
-	if (pipe == crtc->pipe) {
-		DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl);
-		I915_WRITE(PFIT_CONTROL, 0);
-	}
+	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
+			 I915_READ(PFIT_CONTROL));
+	I915_WRITE(PFIT_CONTROL, 0);
 }
 
 static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3773,6 +3713,10 @@
 
 	i9xx_pfit_disable(intel_crtc);
 
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
 	intel_disable_pll(dev_priv, pipe);
 
 	intel_crtc->active = false;
@@ -3845,8 +3789,8 @@
 	/* crtc should still be enabled when we disable it. */
 	WARN_ON(!crtc->enabled);
 
-	intel_crtc->eld_vld = false;
 	dev_priv->display.crtc_disable(crtc);
+	intel_crtc->eld_vld = false;
 	intel_crtc_update_sarea(crtc, false);
 	dev_priv->display.off(crtc);
 
@@ -3977,17 +3921,136 @@
 	return encoder->get_hw_state(encoder, &pipe);
 }
 
-static bool intel_crtc_compute_config(struct drm_crtc *crtc,
-				      struct intel_crtc_config *pipe_config)
+static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+				     struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *pipe_B_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
+		      pipe_name(pipe), pipe_config->fdi_lanes);
+	if (pipe_config->fdi_lanes > 4) {
+		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
+			      pipe_name(pipe), pipe_config->fdi_lanes);
+		return false;
+	}
+
+	if (IS_HASWELL(dev)) {
+		if (pipe_config->fdi_lanes > 2) {
+			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
+				      pipe_config->fdi_lanes);
+			return false;
+		} else {
+			return true;
+		}
+	}
+
+	if (INTEL_INFO(dev)->num_pipes == 2)
+		return true;
+
+	/* Ivybridge 3 pipe is really complicated */
+	switch (pipe) {
+	case PIPE_A:
+		return true;
+	case PIPE_B:
+		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+		    pipe_config->fdi_lanes > 2) {
+			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+				      pipe_name(pipe), pipe_config->fdi_lanes);
+			return false;
+		}
+		return true;
+	case PIPE_C:
+		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
+		    pipe_B_crtc->config.fdi_lanes <= 2) {
+			if (pipe_config->fdi_lanes > 2) {
+				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
+					      pipe_name(pipe), pipe_config->fdi_lanes);
+				return false;
+			}
+		} else {
+			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+			return false;
+		}
+		return true;
+	default:
+		BUG();
+	}
+}
+
+#define RETRY 1
+static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
+				       struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	int target_clock, lane, link_bw;
+	bool setup_ok, needs_recompute = false;
+
+retry:
+	/* FDI is a binary signal running at ~2.7GHz, encoding
+	 * each output octet as 10 bits. The actual frequency
+	 * is stored as a divider into a 100MHz clock, and the
+	 * mode pixel clock is stored in units of 1KHz.
+	 * Hence the bw of each lane in terms of the mode signal
+	 * is:
+	 */
+	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+
+	if (pipe_config->pixel_target_clock)
+		target_clock = pipe_config->pixel_target_clock;
+	else
+		target_clock = adjusted_mode->clock;
+
+	lane = ironlake_get_lanes_required(target_clock, link_bw,
+					   pipe_config->pipe_bpp);
+
+	pipe_config->fdi_lanes = lane;
+
+	if (pipe_config->pixel_multiplier > 1)
+		link_bw *= pipe_config->pixel_multiplier;
+	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, target_clock,
+			       link_bw, &pipe_config->fdi_m_n);
+
+	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
+					    intel_crtc->pipe, pipe_config);
+	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
+		pipe_config->pipe_bpp -= 2*3;
+		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
+			      pipe_config->pipe_bpp);
+		needs_recompute = true;
+		pipe_config->bw_constrained = true;
+
+		goto retry;
+	}
+
+	if (needs_recompute)
+		return RETRY;
+
+	return setup_ok ? 0 : -EINVAL;
+}
+
+static void hsw_compute_ips_config(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config)
+{
+	pipe_config->ips_enabled = i915_enable_ips &&
+				   hsw_crtc_supports_ips(crtc) &&
+				   pipe_config->pipe_bpp == 24;
+}
+
+static int intel_crtc_compute_config(struct drm_crtc *crtc,
+				     struct intel_crtc_config *pipe_config)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		/* FDI link clock is fixed at 2.7G */
 		if (pipe_config->requested_mode.clock * 3
 		    > IRONLAKE_FDI_FREQ * 4)
-			return false;
+			return -EINVAL;
 	}
 
 	/* All interlaced capable intel hw wants timings in frames. Note though
@@ -3996,12 +4059,12 @@
 	if (!pipe_config->timings_set)
 		drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-	/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
-	 * with a hsync front porch of 0.
+	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
+	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
 	 */
 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
 		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
-		return false;
+		return -EINVAL;
 
 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
 		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
@@ -4011,7 +4074,13 @@
 		pipe_config->pipe_bpp = 8*3;
 	}
 
-	return true;
+	if (IS_HASWELL(dev))
+		hsw_compute_ips_config(intel_crtc, pipe_config);
+
+	if (pipe_config->has_pch_encoder)
+		return ironlake_fdi_compute_config(intel_crtc, pipe_config);
+
+	return 0;
 }
 
 static int valleyview_get_display_clock_speed(struct drm_device *dev)
@@ -4120,7 +4189,7 @@
 {
 	if (i915_panel_use_ssc >= 0)
 		return i915_panel_use_ssc != 0;
-	return dev_priv->lvds_use_ssc
+	return dev_priv->vbt.lvds_use_ssc
 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
@@ -4156,7 +4225,7 @@
 		refclk = vlv_get_refclk(crtc);
 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
-		refclk = dev_priv->lvds_ssc_freq * 1000;
+		refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 			      refclk / 1000);
 	} else if (!IS_GEN2(dev)) {
@@ -4168,28 +4237,14 @@
 	return refclk;
 }
 
-static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
+static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
 {
-	unsigned dotclock = crtc->config.adjusted_mode.clock;
-	struct dpll *clock = &crtc->config.dpll;
+	return (1 << dpll->n) << 16 | dpll->m1 << 8 | dpll->m2;
+}
 
-	/* SDVO TV has fixed PLL values depend on its clock range,
-	   this mirrors vbios setting. */
-	if (dotclock >= 100000 && dotclock < 140500) {
-		clock->p1 = 2;
-		clock->p2 = 10;
-		clock->n = 3;
-		clock->m1 = 16;
-		clock->m2 = 8;
-	} else if (dotclock >= 140500 && dotclock <= 200000) {
-		clock->p1 = 1;
-		clock->p2 = 10;
-		clock->n = 6;
-		clock->m1 = 12;
-		clock->m2 = 8;
-	}
-
-	crtc->config.clock_set = true;
+static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+{
+	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
 }
 
 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
@@ -4199,18 +4254,15 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe = crtc->pipe;
 	u32 fp, fp2 = 0;
-	struct dpll *clock = &crtc->config.dpll;
 
 	if (IS_PINEVIEW(dev)) {
-		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
 		if (reduced_clock)
-			fp2 = (1 << reduced_clock->n) << 16 |
-				reduced_clock->m1 << 8 | reduced_clock->m2;
+			fp2 = pnv_dpll_compute_fp(reduced_clock);
 	} else {
-		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
 		if (reduced_clock)
-			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
-				reduced_clock->m2;
+			fp2 = i9xx_dpll_compute_fp(reduced_clock);
 	}
 
 	I915_WRITE(FP0(pipe), fp);
@@ -4225,6 +4277,68 @@
 	}
 }
 
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+{
+	u32 reg_val;
+
+	/*
+	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
+	 * and set it to a reasonable value instead.
+	 */
+	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+	reg_val &= 0xffffff00;
+	reg_val |= 0x00000030;
+	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+
+	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+	reg_val &= 0x8cffffff;
+	reg_val = 0x8c000000;
+	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+
+	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+	reg_val &= 0xffffff00;
+	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+
+	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+	reg_val &= 0x00ffffff;
+	reg_val |= 0xb0000000;
+	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+}
+
+static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+}
+
+static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	enum transcoder transcoder = crtc->config.cpu_transcoder;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+	} else {
+		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
+		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+	}
+}
+
 static void intel_dp_set_m_n(struct intel_crtc *crtc)
 {
 	if (crtc->config.has_pch_encoder)
@@ -4237,24 +4351,18 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *adjusted_mode =
+		&crtc->config.adjusted_mode;
+	struct intel_encoder *encoder;
 	int pipe = crtc->pipe;
-	u32 dpll, mdiv, pdiv;
+	u32 dpll, mdiv;
 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
-	bool is_sdvo;
-	u32 temp;
+	bool is_hdmi;
+	u32 coreclk, reg_val, dpll_md;
 
 	mutex_lock(&dev_priv->dpio_lock);
 
-	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
-		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
-
-	dpll = DPLL_VGA_MODE_DIS;
-	dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
-	dpll |= DPLL_REFA_CLK_ENABLE_VLV;
-	dpll |= DPLL_INTEGRATED_CLOCK_VLV;
-
-	I915_WRITE(DPLL(pipe), dpll);
-	POSTING_READ(DPLL(pipe));
+	is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
 
 	bestn = crtc->config.dpll.n;
 	bestm1 = crtc->config.dpll.m1;
@@ -4262,72 +4370,106 @@
 	bestp1 = crtc->config.dpll.p1;
 	bestp2 = crtc->config.dpll.p2;
 
-	/*
-	 * In Valleyview PLL and program lane counter registers are exposed
-	 * through DPIO interface
-	 */
+	/* See eDP HDMI DPIO driver vbios notes doc */
+
+	/* PLL B needs special handling */
+	if (pipe)
+		vlv_pllb_recal_opamp(dev_priv);
+
+	/* Set up Tx target for periodic Rcomp update */
+	vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+
+	/* Disable target IRef on PLL */
+	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+	reg_val &= 0x00ffffff;
+	vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+
+	/* Disable fast lock */
+	vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+
+	/* Set idtafcrecal before PLL is enabled */
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
 	mdiv |= ((bestn << DPIO_N_SHIFT));
-	mdiv |= (1 << DPIO_POST_DIV_SHIFT);
 	mdiv |= (1 << DPIO_K_SHIFT);
+
+	/*
+	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
+	 * but we don't support that).
+	 * Note: don't use the DAC post divider as it seems unstable.
+	 */
+	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
+	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
 	mdiv |= DPIO_ENABLE_CALIBRATION;
-	intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
-	intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
+	/* Set HBR and RBR LPF coefficients */
+	if (adjusted_mode->clock == 162000 ||
+	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
+		vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe),
+				 0x005f0021);
+	else
+		vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe),
+				 0x00d0000f);
 
-	pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
-		(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
-		(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
-		(5 << DPIO_CLK_BIAS_CTL_SHIFT);
-	intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
+	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
+		/* Use SSC source */
+		if (!pipe)
+			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+					 0x0df40000);
+		else
+			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+					 0x0df70000);
+	} else { /* HDMI or VGA */
+		/* Use bend source */
+		if (!pipe)
+			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+					 0x0df70000);
+		else
+			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+					 0x0df40000);
+	}
 
-	intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
+	coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
+	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
+		coreclk |= 0x01000000;
+	vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+
+	vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+
+	/* Enable DPIO clock input */
+	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+	if (pipe)
+		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
 	dpll |= DPLL_VCO_ENABLE;
 	I915_WRITE(DPLL(pipe), dpll);
 	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
 
-	intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+	dpll_md = 0;
+	if (crtc->config.pixel_multiplier > 1) {
+		dpll_md = (crtc->config.pixel_multiplier - 1)
+			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
+	}
+	I915_WRITE(DPLL_MD(pipe), dpll_md);
+	POSTING_READ(DPLL_MD(pipe));
 
 	if (crtc->config.has_dp_encoder)
 		intel_dp_set_m_n(crtc);
 
-	I915_WRITE(DPLL(pipe), dpll);
-
-	/* Wait for the clocks to stabilize. */
-	POSTING_READ(DPLL(pipe));
-	udelay(150);
-
-	temp = 0;
-	if (is_sdvo) {
-		temp = 0;
-		if (crtc->config.pixel_multiplier > 1) {
-			temp = (crtc->config.pixel_multiplier - 1)
-				<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
-		}
-	}
-	I915_WRITE(DPLL_MD(pipe), temp);
-	POSTING_READ(DPLL_MD(pipe));
-
-	/* Now program lane control registers */
-	if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
-	   || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
-		temp = 0x1000C4;
-		if(pipe == 1)
-			temp |= (1 << 21);
-		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
-	}
-
-	if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
-		temp = 0x1000C4;
-		if(pipe == 1)
-			temp |= (1 << 21);
-		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
-	}
-
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -4355,14 +4497,15 @@
 	else
 		dpll |= DPLLB_MODE_DAC_SERIAL;
 
-	if (is_sdvo) {
-		if ((crtc->config.pixel_multiplier > 1) &&
-		    (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
-			dpll |= (crtc->config.pixel_multiplier - 1)
-				<< SDVO_MULTIPLIER_SHIFT_HIRES;
-		}
-		dpll |= DPLL_DVO_HIGH_SPEED;
+	if ((crtc->config.pixel_multiplier > 1) &&
+	    (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
+		dpll |= (crtc->config.pixel_multiplier - 1)
+			<< SDVO_MULTIPLIER_SHIFT_HIRES;
 	}
+
+	if (is_sdvo)
+		dpll |= DPLL_DVO_HIGH_SPEED;
+
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
 		dpll |= DPLL_DVO_HIGH_SPEED;
 
@@ -4391,12 +4534,8 @@
 	if (INTEL_INFO(dev)->gen >= 4)
 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
-	if (is_sdvo && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
+	if (crtc->config.sdvo_tv_clock)
 		dpll |= PLL_REF_INPUT_TVCLKINBC;
-	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
-		/* XXX: just matching BIOS for now */
-		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
-		dpll |= 3;
 	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
 		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4422,15 +4561,12 @@
 	udelay(150);
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		u32 temp = 0;
-		if (is_sdvo) {
-			temp = 0;
-			if (crtc->config.pixel_multiplier > 1) {
-				temp = (crtc->config.pixel_multiplier - 1)
-					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
-			}
+		u32 dpll_md = 0;
+		if (crtc->config.pixel_multiplier > 1) {
+			dpll_md = (crtc->config.pixel_multiplier - 1)
+				<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
 		}
-		I915_WRITE(DPLL_MD(pipe), temp);
+		I915_WRITE(DPLL_MD(pipe), dpll_md);
 	} else {
 		/* The pixel multiplier can only be updated once the
 		 * DPLL is enabled and the clocks are stable.
@@ -4505,12 +4641,17 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe = intel_crtc->pipe;
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
-	uint32_t vsyncshift;
+	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
+
+	/* We need to be careful not to changed the adjusted mode, for otherwise
+	 * the hw state checker will get angry at the mismatch. */
+	crtc_vtotal = adjusted_mode->crtc_vtotal;
+	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
 
 	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		/* the chip adds 2 halflines automatically */
-		adjusted_mode->crtc_vtotal -= 1;
-		adjusted_mode->crtc_vblank_end -= 1;
+		crtc_vtotal -= 1;
+		crtc_vblank_end -= 1;
 		vsyncshift = adjusted_mode->crtc_hsync_start
 			     - adjusted_mode->crtc_htotal / 2;
 	} else {
@@ -4532,10 +4673,10 @@
 
 	I915_WRITE(VTOTAL(cpu_transcoder),
 		   (adjusted_mode->crtc_vdisplay - 1) |
-		   ((adjusted_mode->crtc_vtotal - 1) << 16));
+		   ((crtc_vtotal - 1) << 16));
 	I915_WRITE(VBLANK(cpu_transcoder),
 		   (adjusted_mode->crtc_vblank_start - 1) |
-		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
+		   ((crtc_vblank_end - 1) << 16));
 	I915_WRITE(VSYNC(cpu_transcoder),
 		   (adjusted_mode->crtc_vsync_start - 1) |
 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
@@ -4555,6 +4696,45 @@
 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 }
 
+static void intel_get_pipe_timings(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+	uint32_t tmp;
+
+	tmp = I915_READ(HTOTAL(cpu_transcoder));
+	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(HBLANK(cpu_transcoder));
+	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
+	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(HSYNC(cpu_transcoder));
+	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+	tmp = I915_READ(VTOTAL(cpu_transcoder));
+	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(VBLANK(cpu_transcoder));
+	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
+	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
+	tmp = I915_READ(VSYNC(cpu_transcoder));
+	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+
+	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		pipe_config->adjusted_mode.crtc_vtotal += 1;
+		pipe_config->adjusted_mode.crtc_vblank_end += 1;
+	}
+
+	tmp = I915_READ(PIPESRC(crtc->pipe));
+	pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
+	pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+}
+
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
@@ -4577,22 +4757,29 @@
 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
 	}
 
-	/* default to 8bpc */
-	pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
-	if (intel_crtc->config.has_dp_encoder) {
-		if (intel_crtc->config.dither) {
-			pipeconf |= PIPECONF_6BPC |
-				    PIPECONF_DITHER_EN |
-				    PIPECONF_DITHER_TYPE_SP;
-		}
-	}
+	/* only g4x and later have fancy bpc/dither controls */
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		pipeconf &= ~(PIPECONF_BPC_MASK |
+			      PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
 
-	if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
-						      INTEL_OUTPUT_EDP)) {
-		if (intel_crtc->config.dither) {
-			pipeconf |= PIPECONF_6BPC |
-					PIPECONF_ENABLE |
-					I965_PIPECONF_ACTIVE;
+		/* Bspec claims that we can't use dithering for 30bpp pipes. */
+		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
+			pipeconf |= PIPECONF_DITHER_EN |
+				    PIPECONF_DITHER_TYPE_SP;
+
+		switch (intel_crtc->config.pipe_bpp) {
+		case 18:
+			pipeconf |= PIPECONF_6BPC;
+			break;
+		case 24:
+			pipeconf |= PIPECONF_8BPC;
+			break;
+		case 30:
+			pipeconf |= PIPECONF_10BPC;
+			break;
+		default:
+			/* Case prevented by intel_choose_pipe_bpp_dither. */
+			BUG();
 		}
 	}
 
@@ -4639,8 +4826,8 @@
 	int refclk, num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
 	u32 dspcntr;
-	bool ok, has_reduced_clock = false, is_sdvo = false;
-	bool is_lvds = false, is_tv = false;
+	bool ok, has_reduced_clock = false;
+	bool is_lvds = false;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	int ret;
@@ -4650,15 +4837,6 @@
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
 			break;
-		case INTEL_OUTPUT_SDVO:
-		case INTEL_OUTPUT_HDMI:
-			is_sdvo = true;
-			if (encoder->needs_tv_clock)
-				is_tv = true;
-			break;
-		case INTEL_OUTPUT_TVOUT:
-			is_tv = true;
-			break;
 		}
 
 		num_connectors++;
@@ -4704,9 +4882,6 @@
 		intel_crtc->config.dpll.p2 = clock.p2;
 	}
 
-	if (is_sdvo && is_tv)
-		i9xx_adjust_sdvo_tv_clock(intel_crtc);
-
 	if (IS_GEN2(dev))
 		i8xx_update_pll(intel_crtc, adjusted_mode,
 				has_reduced_clock ? &reduced_clock : NULL,
@@ -4716,7 +4891,7 @@
 	else
 		i9xx_update_pll(intel_crtc,
 				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+                                num_connectors);
 
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4728,9 +4903,6 @@
 			dspcntr |= DISPPLANE_SEL_PIPE_B;
 	}
 
-	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
-	drm_mode_debug_printmodeline(mode);
-
 	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
 	/* pipesrc and dspsize control the size that is scaled from,
@@ -4743,10 +4915,6 @@
 
 	i9xx_set_pipeconf(intel_crtc);
 
-	intel_enable_pipe(dev_priv, pipe, false);
-
-	intel_wait_for_vblank(dev, pipe);
-
 	I915_WRITE(DSPCNTR(plane), dspcntr);
 	POSTING_READ(DSPCNTR(plane));
 
@@ -4757,6 +4925,36 @@
 	return ret;
 }
 
+static void i9xx_get_pfit_config(struct intel_crtc *crtc,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(PFIT_CONTROL);
+
+	if (INTEL_INFO(dev)->gen < 4) {
+		if (crtc->pipe != PIPE_B)
+			return;
+
+		/* gen2/3 store dither state in pfit control, needs to match */
+		pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
+	} else {
+		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
+			return;
+	}
+
+	if (!(tmp & PFIT_ENABLE))
+		return;
+
+	pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
+	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+	if (INTEL_INFO(dev)->gen < 5)
+		pipe_config->gmch_pfit.lvds_border_bits =
+			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 				 struct intel_crtc_config *pipe_config)
 {
@@ -4764,10 +4962,16 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
 
+	pipe_config->cpu_transcoder = crtc->pipe;
+
 	tmp = I915_READ(PIPECONF(crtc->pipe));
 	if (!(tmp & PIPECONF_ENABLE))
 		return false;
 
+	intel_get_pipe_timings(crtc, pipe_config);
+
+	i9xx_get_pfit_config(crtc, pipe_config);
+
 	return true;
 }
 
@@ -4779,7 +4983,6 @@
 	u32 val, final;
 	bool has_lvds = false;
 	bool has_cpu_edp = false;
-	bool has_pch_edp = false;
 	bool has_panel = false;
 	bool has_ck505 = false;
 	bool can_ssc = false;
@@ -4794,25 +4997,22 @@
 			break;
 		case INTEL_OUTPUT_EDP:
 			has_panel = true;
-			if (intel_encoder_is_pch_edp(&encoder->base))
-				has_pch_edp = true;
-			else
+			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
 				has_cpu_edp = true;
 			break;
 		}
 	}
 
 	if (HAS_PCH_IBX(dev)) {
-		has_ck505 = dev_priv->display_clock_mode;
+		has_ck505 = dev_priv->vbt.display_clock_mode;
 		can_ssc = has_ck505;
 	} else {
 		has_ck505 = false;
 		can_ssc = true;
 	}
 
-	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
-		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
-		      has_ck505);
+	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
+		      has_panel, has_lvds, has_ck505);
 
 	/* Ironlake: try to setup display ref clock before DPLL
 	 * enabling. This is only under driver's control after
@@ -5102,7 +5302,6 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *encoder;
-	struct intel_encoder *edp_encoder = NULL;
 	int num_connectors = 0;
 	bool is_lvds = false;
 
@@ -5111,25 +5310,20 @@
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
 			break;
-		case INTEL_OUTPUT_EDP:
-			edp_encoder = encoder;
-			break;
 		}
 		num_connectors++;
 	}
 
 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
-			      dev_priv->lvds_ssc_freq);
-		return dev_priv->lvds_ssc_freq * 1000;
+			      dev_priv->vbt.lvds_ssc_freq);
+		return dev_priv->vbt.lvds_ssc_freq * 1000;
 	}
 
 	return 120000;
 }
 
-static void ironlake_set_pipeconf(struct drm_crtc *crtc,
-				  struct drm_display_mode *adjusted_mode,
-				  bool dither)
+static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5158,11 +5352,11 @@
 	}
 
 	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
-	if (dither)
+	if (intel_crtc->config.dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
 	val &= ~PIPECONF_INTERLACE_MASK;
-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		val |= PIPECONF_INTERLACED_ILK;
 	else
 		val |= PIPECONF_PROGRESSIVE;
@@ -5240,9 +5434,7 @@
 	}
 }
 
-static void haswell_set_pipeconf(struct drm_crtc *crtc,
-				 struct drm_display_mode *adjusted_mode,
-				 bool dither)
+static void haswell_set_pipeconf(struct drm_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5252,11 +5444,11 @@
 	val = I915_READ(PIPECONF(cpu_transcoder));
 
 	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
-	if (dither)
+	if (intel_crtc->config.dither)
 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
 	val &= ~PIPECONF_INTERLACE_MASK_HSW;
-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
 		val |= PIPECONF_INTERLACED_ILK;
 	else
 		val |= PIPECONF_PROGRESSIVE;
@@ -5276,22 +5468,13 @@
 	struct intel_encoder *intel_encoder;
 	int refclk;
 	const intel_limit_t *limit;
-	bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+	bool ret, is_lvds = false;
 
 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 		switch (intel_encoder->type) {
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
 			break;
-		case INTEL_OUTPUT_SDVO:
-		case INTEL_OUTPUT_HDMI:
-			is_sdvo = true;
-			if (intel_encoder->needs_tv_clock)
-				is_tv = true;
-			break;
-		case INTEL_OUTPUT_TVOUT:
-			is_tv = true;
-			break;
 		}
 	}
 
@@ -5322,9 +5505,6 @@
 						     reduced_clock);
 	}
 
-	if (is_sdvo && is_tv)
-		i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
-
 	return true;
 }
 
@@ -5346,65 +5526,25 @@
 	POSTING_READ(SOUTH_CHICKEN1);
 }
 
-static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *pipe_B_crtc =
-		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
-
-	DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
-		      intel_crtc->pipe, intel_crtc->fdi_lanes);
-	if (intel_crtc->fdi_lanes > 4) {
-		DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
-			      intel_crtc->pipe, intel_crtc->fdi_lanes);
-		/* Clamp lanes to avoid programming the hw with bogus values. */
-		intel_crtc->fdi_lanes = 4;
-
-		return false;
-	}
-
-	if (INTEL_INFO(dev)->num_pipes == 2)
-		return true;
 
 	switch (intel_crtc->pipe) {
 	case PIPE_A:
-		return true;
+		break;
 	case PIPE_B:
-		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
-		    intel_crtc->fdi_lanes > 2) {
-			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
-				      intel_crtc->pipe, intel_crtc->fdi_lanes);
-			/* Clamp lanes to avoid programming the hw with bogus values. */
-			intel_crtc->fdi_lanes = 2;
-
-			return false;
-		}
-
-		if (intel_crtc->fdi_lanes > 2)
+		if (intel_crtc->config.fdi_lanes > 2)
 			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
 		else
 			cpt_enable_fdi_bc_bifurcation(dev);
 
-		return true;
+		break;
 	case PIPE_C:
-		if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
-			if (intel_crtc->fdi_lanes > 2) {
-				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
-					      intel_crtc->pipe, intel_crtc->fdi_lanes);
-				/* Clamp lanes to avoid programming the hw with bogus values. */
-				intel_crtc->fdi_lanes = 2;
-
-				return false;
-			}
-		} else {
-			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
-			return false;
-		}
-
 		cpt_enable_fdi_bc_bifurcation(dev);
 
-		return true;
+		break;
 	default:
 		BUG();
 	}
@@ -5421,78 +5561,13 @@
 	return bps / (link_bw * 8) + 1;
 }
 
-void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
-				  struct intel_link_m_n *m_n)
+static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = crtc->pipe;
-
-	I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
-	I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
-	I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
-	I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
-}
-
-void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-				  struct intel_link_m_n *m_n)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = crtc->pipe;
-	enum transcoder transcoder = crtc->config.cpu_transcoder;
-
-	if (INTEL_INFO(dev)->gen >= 5) {
-		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
-		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
-		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
-		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
-	} else {
-		I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
-		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
-		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
-		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
-	}
-}
-
-static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_display_mode *adjusted_mode =
-		&intel_crtc->config.adjusted_mode;
-	struct intel_link_m_n m_n = {0};
-	int target_clock, lane, link_bw;
-
-	/* FDI is a binary signal running at ~2.7GHz, encoding
-	 * each output octet as 10 bits. The actual frequency
-	 * is stored as a divider into a 100MHz clock, and the
-	 * mode pixel clock is stored in units of 1KHz.
-	 * Hence the bw of each lane in terms of the mode signal
-	 * is:
-	 */
-	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
-
-	if (intel_crtc->config.pixel_target_clock)
-		target_clock = intel_crtc->config.pixel_target_clock;
-	else
-		target_clock = adjusted_mode->clock;
-
-	lane = ironlake_get_lanes_required(target_clock, link_bw,
-					   intel_crtc->config.pipe_bpp);
-
-	intel_crtc->fdi_lanes = lane;
-
-	if (intel_crtc->config.pixel_multiplier > 1)
-		link_bw *= intel_crtc->config.pixel_multiplier;
-	intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
-			       link_bw, &m_n);
-
-	intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
+	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
 }
 
 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
-				      intel_clock_t *clock, u32 *fp,
+				      u32 *fp,
 				      intel_clock_t *reduced_clock, u32 *fp2)
 {
 	struct drm_crtc *crtc = &intel_crtc->base;
@@ -5501,7 +5576,7 @@
 	struct intel_encoder *intel_encoder;
 	uint32_t dpll;
 	int factor, num_connectors = 0;
-	bool is_lvds = false, is_sdvo = false, is_tv = false;
+	bool is_lvds = false, is_sdvo = false;
 
 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 		switch (intel_encoder->type) {
@@ -5511,11 +5586,6 @@
 		case INTEL_OUTPUT_SDVO:
 		case INTEL_OUTPUT_HDMI:
 			is_sdvo = true;
-			if (intel_encoder->needs_tv_clock)
-				is_tv = true;
-			break;
-		case INTEL_OUTPUT_TVOUT:
-			is_tv = true;
 			break;
 		}
 
@@ -5526,13 +5596,13 @@
 	factor = 21;
 	if (is_lvds) {
 		if ((intel_panel_use_ssc(dev_priv) &&
-		     dev_priv->lvds_ssc_freq == 100) ||
+		     dev_priv->vbt.lvds_ssc_freq == 100) ||
 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
 			factor = 25;
-	} else if (is_sdvo && is_tv)
+	} else if (intel_crtc->config.sdvo_tv_clock)
 		factor = 20;
 
-	if (clock->m < factor * clock->n)
+	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
 		*fp |= FP_CB_TUNE;
 
 	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -5544,23 +5614,23 @@
 		dpll |= DPLLB_MODE_LVDS;
 	else
 		dpll |= DPLLB_MODE_DAC_SERIAL;
-	if (is_sdvo) {
-		if (intel_crtc->config.pixel_multiplier > 1) {
-			dpll |= (intel_crtc->config.pixel_multiplier - 1)
-				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
-		}
-		dpll |= DPLL_DVO_HIGH_SPEED;
+
+	if (intel_crtc->config.pixel_multiplier > 1) {
+		dpll |= (intel_crtc->config.pixel_multiplier - 1)
+			<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 	}
-	if (intel_crtc->config.has_dp_encoder &&
-	    intel_crtc->config.has_pch_encoder)
+
+	if (is_sdvo)
+		dpll |= DPLL_DVO_HIGH_SPEED;
+	if (intel_crtc->config.has_dp_encoder)
 		dpll |= DPLL_DVO_HIGH_SPEED;
 
 	/* compute bitmask from p1 value */
-	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
 	/* also FPA1 */
-	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
-	switch (clock->p2) {
+	switch (intel_crtc->config.dpll.p2) {
 	case 5:
 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
 		break;
@@ -5575,13 +5645,7 @@
 		break;
 	}
 
-	if (is_sdvo && is_tv)
-		dpll |= PLL_REF_INPUT_TVCLKINBC;
-	else if (is_tv)
-		/* XXX: just matching BIOS for now */
-		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
-		dpll |= 3;
-	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -5603,12 +5667,11 @@
 	int plane = intel_crtc->plane;
 	int num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
-	u32 dpll, fp = 0, fp2 = 0;
+	u32 dpll = 0, fp = 0, fp2 = 0;
 	bool ok, has_reduced_clock = false;
 	bool is_lvds = false;
 	struct intel_encoder *encoder;
 	int ret;
-	bool dither, fdi_config_ok;
 
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		switch (encoder->type) {
@@ -5623,8 +5686,6 @@
 	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
 	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
-	intel_crtc->config.cpu_transcoder = pipe;
-
 	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
 				     &has_reduced_clock, &reduced_clock);
 	if (!ok) {
@@ -5643,30 +5704,22 @@
 	/* Ensure that the cursor is valid for the new mode before changing... */
 	intel_crtc_update_cursor(crtc, true);
 
-	/* determine panel color depth */
-	dither = intel_crtc->config.dither;
-	if (is_lvds && dev_priv->lvds_dither)
-		dither = true;
-
-	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-	if (has_reduced_clock)
-		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
-			reduced_clock.m2;
-
-	dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
-				     has_reduced_clock ? &fp2 : NULL);
-
-	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
-	drm_mode_debug_printmodeline(mode);
-
 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
 	if (intel_crtc->config.has_pch_encoder) {
 		struct intel_pch_pll *pll;
 
+		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
+		if (has_reduced_clock)
+			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
+
+		dpll = ironlake_compute_dpll(intel_crtc,
+					     &fp, &reduced_clock,
+					     has_reduced_clock ? &fp2 : NULL);
+
 		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
 		if (pll == NULL) {
-			DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
-					 pipe);
+			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+					 pipe_name(pipe));
 			return -EINVAL;
 		}
 	} else
@@ -5706,17 +5759,15 @@
 
 	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
-	/* Note, this also computes intel_crtc->fdi_lanes which is used below in
-	 * ironlake_check_fdi_lanes. */
-	intel_crtc->fdi_lanes = 0;
-	if (intel_crtc->config.has_pch_encoder)
-		ironlake_fdi_set_m_n(crtc);
+	if (intel_crtc->config.has_pch_encoder) {
+		intel_cpu_transcoder_set_m_n(intel_crtc,
+					     &intel_crtc->config.fdi_m_n);
+	}
 
-	fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
+	if (IS_IVYBRIDGE(dev))
+		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
 
-	ironlake_set_pipeconf(crtc, adjusted_mode, dither);
-
-	intel_wait_for_vblank(dev, pipe);
+	ironlake_set_pipeconf(crtc);
 
 	/* Set up the display plane register */
 	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
@@ -5726,9 +5777,38 @@
 
 	intel_update_watermarks(dev);
 
-	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+	return ret;
+}
 
-	return fdi_config_ok ? ret : -EINVAL;
+static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
+					struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder transcoder = pipe_config->cpu_transcoder;
+
+	pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
+	pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
+	pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+					& ~TU_SIZE_MASK;
+	pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+	pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void ironlake_get_pfit_config(struct intel_crtc *crtc,
+				     struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(PF_CTL(crtc->pipe));
+
+	if (tmp & PF_ENABLE) {
+		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
+		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+	}
 }
 
 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
@@ -5738,42 +5818,43 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
 
+	pipe_config->cpu_transcoder = crtc->pipe;
+
 	tmp = I915_READ(PIPECONF(crtc->pipe));
 	if (!(tmp & PIPECONF_ENABLE))
 		return false;
 
-	if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
+	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
 		pipe_config->has_pch_encoder = true;
 
+		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+	}
+
+	intel_get_pipe_timings(crtc, pipe_config);
+
+	ironlake_get_pfit_config(crtc, pipe_config);
+
 	return true;
 }
 
 static void haswell_modeset_global_resources(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool enable = false;
 	struct intel_crtc *crtc;
-	struct intel_encoder *encoder;
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
-		if (crtc->pipe != PIPE_A && crtc->base.enabled)
-			enable = true;
-		/* XXX: Should check for edp transcoder here, but thanks to init
-		 * sequence that's not yet available. Just in case desktop eDP
-		 * on PORT D is possible on haswell, too. */
-	}
+		if (!crtc->base.enabled)
+			continue;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-			    base.head) {
-		if (encoder->type != INTEL_OUTPUT_EDP &&
-		    encoder->connectors_active)
+		if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size ||
+		    crtc->config.cpu_transcoder != TRANSCODER_EDP)
 			enable = true;
 	}
 
-	/* Even the eDP panel fitter is outside the always-on well. */
-	if (dev_priv->pch_pf_size)
-		enable = true;
-
 	intel_set_power_well(dev, enable);
 }
 
@@ -5793,12 +5874,11 @@
 	bool is_cpu_edp = false;
 	struct intel_encoder *encoder;
 	int ret;
-	bool dither;
 
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		switch (encoder->type) {
 		case INTEL_OUTPUT_EDP:
-			if (!intel_encoder_is_pch_edp(&encoder->base))
+			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
 				is_cpu_edp = true;
 			break;
 		}
@@ -5806,35 +5886,15 @@
 		num_connectors++;
 	}
 
-	if (is_cpu_edp)
-		intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
-	else
-		intel_crtc->config.cpu_transcoder = pipe;
-
-	/* We are not sure yet this won't happen. */
-	WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
-	     INTEL_PCH_TYPE(dev));
-
 	WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
 	     num_connectors, pipe_name(pipe));
 
-	WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
-		(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
-
-	WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
-
 	if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
 		return -EINVAL;
 
 	/* Ensure that the cursor is valid for the new mode before changing... */
 	intel_crtc_update_cursor(crtc, true);
 
-	/* determine panel color depth */
-	dither = intel_crtc->config.dither;
-
-	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
-	drm_mode_debug_printmodeline(mode);
-
 	if (intel_crtc->config.has_dp_encoder)
 		intel_dp_set_m_n(intel_crtc);
 
@@ -5842,10 +5902,12 @@
 
 	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
-	if (intel_crtc->config.has_pch_encoder)
-		ironlake_fdi_set_m_n(crtc);
+	if (intel_crtc->config.has_pch_encoder) {
+		intel_cpu_transcoder_set_m_n(intel_crtc,
+					     &intel_crtc->config.fdi_m_n);
+	}
 
-	haswell_set_pipeconf(crtc, adjusted_mode, dither);
+	haswell_set_pipeconf(crtc);
 
 	intel_set_pipe_csc(crtc);
 
@@ -5857,8 +5919,6 @@
 
 	intel_update_watermarks(dev);
 
-	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
-
 	return ret;
 }
 
@@ -5867,22 +5927,65 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum intel_display_power_domain pfit_domain;
 	uint32_t tmp;
 
-	tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder));
+	pipe_config->cpu_transcoder = crtc->pipe;
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+	if (tmp & TRANS_DDI_FUNC_ENABLE) {
+		enum pipe trans_edp_pipe;
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		default:
+			WARN(1, "unknown pipe linked to edp transcoder\n");
+		case TRANS_DDI_EDP_INPUT_A_ONOFF:
+		case TRANS_DDI_EDP_INPUT_A_ON:
+			trans_edp_pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			trans_edp_pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			trans_edp_pipe = PIPE_C;
+			break;
+		}
+
+		if (trans_edp_pipe == crtc->pipe)
+			pipe_config->cpu_transcoder = TRANSCODER_EDP;
+	}
+
+	if (!intel_display_power_enabled(dev,
+			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
+		return false;
+
+	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
 	if (!(tmp & PIPECONF_ENABLE))
 		return false;
 
 	/*
-	 * aswell has only FDI/PCH transcoder A. It is which is connected to
+	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
 	 * the PCH transcoder is on.
 	 */
-	tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
 	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
-	    I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
+	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
 		pipe_config->has_pch_encoder = true;
 
+		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+		ironlake_get_fdi_m_n_config(crtc, pipe_config);
+	}
+
+	intel_get_pipe_timings(crtc, pipe_config);
+
+	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+	if (intel_display_power_enabled(dev, pfit_domain))
+		ironlake_get_pfit_config(crtc, pipe_config);
+
+	pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
+				   (I915_READ(IPS_CTL) & IPS_ENABLE);
 
 	return true;
 }
@@ -6120,7 +6223,7 @@
 		eldv |= IBX_ELD_VALIDB << 4;
 		eldv |= IBX_ELD_VALIDB << 8;
 	} else {
-		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
+		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
 		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
 	}
 
@@ -6188,8 +6291,10 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int palreg = PALETTE(intel_crtc->pipe);
+	enum pipe pipe = intel_crtc->pipe;
+	int palreg = PALETTE(pipe);
 	int i;
+	bool reenable_ips = false;
 
 	/* The clocks have to be on to load the palette. */
 	if (!crtc->enabled || !intel_crtc->active)
@@ -6197,7 +6302,17 @@
 
 	/* use legacy palette for Ironlake */
 	if (HAS_PCH_SPLIT(dev))
-		palreg = LGC_PALETTE(intel_crtc->pipe);
+		palreg = LGC_PALETTE(pipe);
+
+	/* Workaround : Do not read or write the pipe palette/gamma data while
+	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+	 */
+	if (intel_crtc->config.ips_enabled &&
+	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+	     GAMMA_MODE_MODE_SPLIT)) {
+		hsw_disable_ips(intel_crtc);
+		reenable_ips = true;
+	}
 
 	for (i = 0; i < 256; i++) {
 		I915_WRITE(palreg + 4 * i,
@@ -6205,6 +6320,9 @@
 			   (intel_crtc->lut_g[i] << 8) |
 			   intel_crtc->lut_b[i]);
 	}
+
+	if (reenable_ips)
+		hsw_enable_ips(intel_crtc);
 }
 
 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6451,7 +6569,7 @@
 	intel_crtc->cursor_width = width;
 	intel_crtc->cursor_height = height;
 
-	intel_crtc_update_cursor(crtc, true);
+	intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
 	return 0;
 fail_unpin:
@@ -6470,7 +6588,7 @@
 	intel_crtc->cursor_x = x;
 	intel_crtc->cursor_y = y;
 
-	intel_crtc_update_cursor(crtc, true);
+	intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
 	return 0;
 }
@@ -6984,6 +7102,8 @@
 		kfree(work);
 	}
 
+	intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
+
 	drm_crtc_cleanup(crtc);
 
 	kfree(intel_crtc);
@@ -7591,11 +7711,48 @@
 				      bpp, connector->display_info.bpc*3);
 			pipe_config->pipe_bpp = connector->display_info.bpc*3;
 		}
+
+		/* Clamp bpp to 8 on screens without EDID 1.4 */
+		if (connector->display_info.bpc == 0 && bpp > 24) {
+			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+				      bpp);
+			pipe_config->pipe_bpp = 24;
+		}
 	}
 
 	return bpp;
 }
 
+static void intel_dump_pipe_config(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config,
+				   const char *context)
+{
+	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
+		      context, pipe_name(crtc->pipe));
+
+	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
+		      pipe_config->pipe_bpp, pipe_config->dither);
+	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		      pipe_config->has_pch_encoder,
+		      pipe_config->fdi_lanes,
+		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
+		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
+		      pipe_config->fdi_m_n.tu);
+	DRM_DEBUG_KMS("requested mode:\n");
+	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
+	DRM_DEBUG_KMS("adjusted mode:\n");
+	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+		      pipe_config->gmch_pfit.control,
+		      pipe_config->gmch_pfit.pgm_ratios,
+		      pipe_config->gmch_pfit.lvds_border_bits);
+	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n",
+		      pipe_config->pch_pfit.pos,
+		      pipe_config->pch_pfit.size);
+	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+}
+
 static struct intel_crtc_config *
 intel_modeset_pipe_config(struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
@@ -7605,7 +7762,8 @@
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct intel_encoder *encoder;
 	struct intel_crtc_config *pipe_config;
-	int plane_bpp;
+	int plane_bpp, ret = -EINVAL;
+	bool retry = true;
 
 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
 	if (!pipe_config)
@@ -7613,11 +7771,13 @@
 
 	drm_mode_copy(&pipe_config->adjusted_mode, mode);
 	drm_mode_copy(&pipe_config->requested_mode, mode);
+	pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
 
 	plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
 	if (plane_bpp < 0)
 		goto fail;
 
+encoder_retry:
 	/* Pass our mode to the connectors and the CRTC to give them a chance to
 	 * adjust it according to limitations or connector properties, and also
 	 * a chance to reject the mode entirely.
@@ -7646,11 +7806,22 @@
 		}
 	}
 
-	if (!(intel_crtc_compute_config(crtc, pipe_config))) {
+	ret = intel_crtc_compute_config(crtc, pipe_config);
+	if (ret < 0) {
 		DRM_DEBUG_KMS("CRTC fixup failed\n");
 		goto fail;
 	}
-	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+	if (ret == RETRY) {
+		if (WARN(!retry, "loop in pipe configuration computation\n")) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+		retry = false;
+		goto encoder_retry;
+	}
 
 	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
 	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
@@ -7659,7 +7830,7 @@
 	return pipe_config;
 fail:
 	kfree(pipe_config);
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(ret);
 }
 
 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
@@ -7755,6 +7926,9 @@
 	 */
 	*modeset_pipes &= 1 << intel_crtc->pipe;
 	*prepare_pipes &= 1 << intel_crtc->pipe;
+
+	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+		      *modeset_pipes, *prepare_pipes, *disable_pipes);
 }
 
 static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -7821,20 +7995,83 @@
 	list_for_each_entry((intel_crtc), \
 			    &(dev)->mode_config.crtc_list, \
 			    base.head) \
-		if (mask & (1 <<(intel_crtc)->pipe)) \
+		if (mask & (1 <<(intel_crtc)->pipe))
 
 static bool
-intel_pipe_config_compare(struct intel_crtc_config *current_config,
+intel_pipe_config_compare(struct drm_device *dev,
+			  struct intel_crtc_config *current_config,
 			  struct intel_crtc_config *pipe_config)
 {
-	if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
-		DRM_ERROR("mismatch in has_pch_encoder "
-			  "(expected %i, found %i)\n",
-			  current_config->has_pch_encoder,
-			  pipe_config->has_pch_encoder);
-		return false;
+#define PIPE_CONF_CHECK_I(name)	\
+	if (current_config->name != pipe_config->name) { \
+		DRM_ERROR("mismatch in " #name " " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		return false; \
 	}
 
+#define PIPE_CONF_CHECK_FLAGS(name, mask)	\
+	if ((current_config->name ^ pipe_config->name) & (mask)) { \
+		DRM_ERROR("mismatch in " #name " " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name & (mask), \
+			  pipe_config->name & (mask)); \
+		return false; \
+	}
+
+	PIPE_CONF_CHECK_I(cpu_transcoder);
+
+	PIPE_CONF_CHECK_I(has_pch_encoder);
+	PIPE_CONF_CHECK_I(fdi_lanes);
+	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
+	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
+	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
+	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
+	PIPE_CONF_CHECK_I(fdi_m_n.tu);
+
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
+
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
+	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
+
+	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+			      DRM_MODE_FLAG_INTERLACE);
+
+	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+			      DRM_MODE_FLAG_PHSYNC);
+	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+			      DRM_MODE_FLAG_NHSYNC);
+	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+			      DRM_MODE_FLAG_PVSYNC);
+	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
+			      DRM_MODE_FLAG_NVSYNC);
+
+	PIPE_CONF_CHECK_I(requested_mode.hdisplay);
+	PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+
+	PIPE_CONF_CHECK_I(gmch_pfit.control);
+	/* pfit ratios are autocomputed by the hw on gen4+ */
+	if (INTEL_INFO(dev)->gen < 4)
+		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+	PIPE_CONF_CHECK_I(pch_pfit.pos);
+	PIPE_CONF_CHECK_I(pch_pfit.size);
+
+	PIPE_CONF_CHECK_I(ips_enabled);
+
+#undef PIPE_CONF_CHECK_I
+#undef PIPE_CONF_CHECK_FLAGS
+
 	return true;
 }
 
@@ -7913,6 +8150,8 @@
 		bool enabled = false;
 		bool active = false;
 
+		memset(&pipe_config, 0, sizeof(pipe_config));
+
 		DRM_DEBUG_KMS("[CRTC:%d]\n",
 			      crtc->base.base.id);
 
@@ -7926,6 +8165,8 @@
 			enabled = true;
 			if (encoder->connectors_active)
 				active = true;
+			if (encoder->get_config)
+				encoder->get_config(encoder, &pipe_config);
 		}
 		WARN(active != crtc->active,
 		     "crtc's computed active state doesn't match tracked active state "
@@ -7934,7 +8175,6 @@
 		     "crtc's computed enabled state doesn't match tracked enabled state "
 		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
 
-		memset(&pipe_config, 0, sizeof(pipe_config));
 		active = dev_priv->display.get_pipe_config(crtc,
 							   &pipe_config);
 
@@ -7946,9 +8186,14 @@
 		     "crtc active state doesn't match with hw state "
 		     "(expected %i, found %i)\n", crtc->active, active);
 
-		WARN(active &&
-		     !intel_pipe_config_compare(&crtc->config, &pipe_config),
-		     "pipe state doesn't match!\n");
+		if (active &&
+		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
+			WARN(1, "pipe state doesn't match!\n");
+			intel_dump_pipe_config(crtc, &pipe_config,
+					       "[hw state]");
+			intel_dump_pipe_config(crtc, &crtc->config,
+					       "[sw state]");
+		}
 	}
 }
 
@@ -7988,11 +8233,10 @@
 
 			goto out;
 		}
+		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+				       "[modeset]");
 	}
 
-	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
-		      modeset_pipes, prepare_pipes, disable_pipes);
-
 	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
 		intel_crtc_disable(&intel_crtc->base);
 
@@ -8005,12 +8249,10 @@
 	 * to set it here already despite that we pass it down the callchain.
 	 */
 	if (modeset_pipes) {
-		enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
 		crtc->mode = *mode;
 		/* mode_set/enable/disable functions rely on a correct pipe
 		 * config. */
 		to_intel_crtc(crtc)->config = *pipe_config;
-		to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
 	}
 
 	/* Only after disabling all output pipelines that will be changed can we
@@ -8349,12 +8591,6 @@
 		goto fail;
 
 	if (config->mode_changed) {
-		if (set->mode) {
-			DRM_DEBUG_KMS("attempting to set mode from"
-					" userspace\n");
-			drm_mode_debug_printmodeline(set->mode);
-		}
-
 		ret = intel_set_mode(set->crtc, set->mode,
 				     set->x, set->y, set->fb);
 	} else if (config->fb_changed) {
@@ -8436,7 +8672,6 @@
 	/* Swap pipes & planes for FBC on pre-965 */
 	intel_crtc->pipe = pipe;
 	intel_crtc->plane = pipe;
-	intel_crtc->config.cpu_transcoder = pipe;
 	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
 		intel_crtc->plane = !pipe;
@@ -8598,10 +8833,8 @@
 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
 			}
 
-			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
-				DRM_DEBUG_KMS("probing DP_B\n");
+			if (!found && SUPPORTS_INTEGRATED_DP(dev))
 				intel_dp_init(dev, DP_B, PORT_B);
-			}
 		}
 
 		/* Before G4X SDVOC doesn't have its own detect register */
@@ -8617,17 +8850,13 @@
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
 			}
-			if (SUPPORTS_INTEGRATED_DP(dev)) {
-				DRM_DEBUG_KMS("probing DP_C\n");
+			if (SUPPORTS_INTEGRATED_DP(dev))
 				intel_dp_init(dev, DP_C, PORT_C);
-			}
 		}
 
 		if (SUPPORTS_INTEGRATED_DP(dev) &&
-		    (I915_READ(DP_D) & DP_DETECTED)) {
-			DRM_DEBUG_KMS("probing DP_D\n");
+		    (I915_READ(DP_D) & DP_DETECTED))
 			intel_dp_init(dev, DP_D, PORT_D);
-		}
 	} else if (IS_GEN2(dev))
 		intel_dvo_init(dev);
 
@@ -8712,7 +8941,8 @@
 	case DRM_FORMAT_XRGB1555:
 	case DRM_FORMAT_ARGB1555:
 		if (INTEL_INFO(dev)->gen > 3) {
-			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
 			return -EINVAL;
 		}
 		break;
@@ -8723,7 +8953,8 @@
 	case DRM_FORMAT_XBGR2101010:
 	case DRM_FORMAT_ABGR2101010:
 		if (INTEL_INFO(dev)->gen < 4) {
-			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
 			return -EINVAL;
 		}
 		break;
@@ -8732,12 +8963,14 @@
 	case DRM_FORMAT_YVYU:
 	case DRM_FORMAT_VYUY:
 		if (INTEL_INFO(dev)->gen < 5) {
-			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+			DRM_DEBUG("unsupported pixel format: %s\n",
+				  drm_get_format_name(mode_cmd->pixel_format));
 			return -EINVAL;
 		}
 		break;
 	default:
-		DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+		DRM_DEBUG("unsupported pixel format: %s\n",
+			  drm_get_format_name(mode_cmd->pixel_format));
 		return -EINVAL;
 	}
 
@@ -8796,6 +9029,13 @@
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
 		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_plane = ironlake_update_plane;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+		dev_priv->display.crtc_enable = valleyview_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+		dev_priv->display.off = i9xx_crtc_off;
+		dev_priv->display.update_plane = i9xx_update_plane;
 	} else {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
@@ -9037,6 +9277,11 @@
 	mutex_unlock(&dev->struct_mutex);
 }
 
+void intel_modeset_suspend_hw(struct drm_device *dev)
+{
+	intel_suspend_hw(dev);
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9082,8 +9327,8 @@
 		for (j = 0; j < dev_priv->num_plane; j++) {
 			ret = intel_plane_init(dev, i, j);
 			if (ret)
-				DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
-					      i, j, ret);
+				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
+					      pipe_name(i), sprite_name(i, j), ret);
 		}
 	}
 
@@ -9296,50 +9541,14 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
-	u32 tmp;
 	struct drm_plane *plane;
 	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
 	struct intel_connector *connector;
 
-	if (HAS_DDI(dev)) {
-		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
-
-		if (tmp & TRANS_DDI_FUNC_ENABLE) {
-			switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
-			case TRANS_DDI_EDP_INPUT_A_ON:
-			case TRANS_DDI_EDP_INPUT_A_ONOFF:
-				pipe = PIPE_A;
-				break;
-			case TRANS_DDI_EDP_INPUT_B_ONOFF:
-				pipe = PIPE_B;
-				break;
-			case TRANS_DDI_EDP_INPUT_C_ONOFF:
-				pipe = PIPE_C;
-				break;
-			default:
-				/* A bogus value has been programmed, disable
-				 * the transcoder */
-				WARN(1, "Bogus eDP source %08x\n", tmp);
-				intel_ddi_disable_transcoder_func(dev_priv,
-						TRANSCODER_EDP);
-				goto setup_pipes;
-			}
-
-			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-			crtc->config.cpu_transcoder = TRANSCODER_EDP;
-
-			DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
-				      pipe_name(pipe));
-		}
-	}
-
-setup_pipes:
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
 			    base.head) {
-		enum transcoder tmp = crtc->config.cpu_transcoder;
 		memset(&crtc->config, 0, sizeof(crtc->config));
-		crtc->config.cpu_transcoder = tmp;
 
 		crtc->active = dev_priv->display.get_pipe_config(crtc,
 								 &crtc->config);
@@ -9359,8 +9568,10 @@
 		pipe = 0;
 
 		if (encoder->get_hw_state(encoder, &pipe)) {
-			encoder->base.crtc =
-				dev_priv->pipe_to_crtc_mapping[pipe];
+			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+			encoder->base.crtc = &crtc->base;
+			if (encoder->get_config)
+				encoder->get_config(encoder, &crtc->config);
 		} else {
 			encoder->base.crtc = NULL;
 		}
@@ -9398,6 +9609,7 @@
 	for_each_pipe(pipe) {
 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 		intel_sanitize_crtc(crtc);
+		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
 	}
 
 	if (force_restore) {
@@ -9440,12 +9652,23 @@
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;
 
+	/*
+	 * Interrupts and polling as the first thing to avoid creating havoc.
+	 * Too much stuff here (turning of rps, connectors, ...) would
+	 * experience fancy races otherwise.
+	 */
+	drm_irq_uninstall(dev);
+	cancel_work_sync(&dev_priv->hotplug_work);
+	/*
+	 * Due to the hpd irq storm handling the hotplug work can re-arm the
+	 * poll handlers. Hence disable polling after hpd handling is shut down.
+	 */
 	drm_kms_helper_poll_fini(dev);
+
 	mutex_lock(&dev->struct_mutex);
 
 	intel_unregister_dsm_handler();
 
-
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		/* Skip inactive CRTCs */
 		if (!crtc->fb)
@@ -9461,17 +9684,8 @@
 
 	ironlake_teardown_rc6(dev);
 
-	if (IS_VALLEYVIEW(dev))
-		vlv_init_dpio(dev);
-
 	mutex_unlock(&dev->struct_mutex);
 
-	/* Disable the irq before mode object teardown, for the irq might
-	 * enqueue unpin/hotplug work. */
-	drm_irq_uninstall(dev);
-	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_work_sync(&dev_priv->rps.work);
-
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
 
@@ -9520,6 +9734,9 @@
 #include <linux/seq_file.h>
 
 struct intel_display_error_state {
+
+	u32 power_well_driver;
+
 	struct intel_cursor_error_state {
 		u32 control;
 		u32 position;
@@ -9528,6 +9745,7 @@
 	} cursor[I915_MAX_PIPES];
 
 	struct intel_pipe_error_state {
+		enum transcoder cpu_transcoder;
 		u32 conf;
 		u32 source;
 
@@ -9562,8 +9780,12 @@
 	if (error == NULL)
 		return NULL;
 
+	if (HAS_POWER_WELL(dev))
+		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
+
 	for_each_pipe(i) {
 		cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+		error->pipe[i].cpu_transcoder = cpu_transcoder;
 
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
 			error->cursor[i].control = I915_READ(CURCNTR(i));
@@ -9598,46 +9820,60 @@
 		error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
 	}
 
+	/* In the code above we read the registers without checking if the power
+	 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
+	 * prevent the next I915_WRITE from detecting it and printing an error
+	 * message. */
+	if (HAS_POWER_WELL(dev))
+		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
 	return error;
 }
 
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+
 void
-intel_display_print_error_state(struct seq_file *m,
+intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 				struct drm_device *dev,
 				struct intel_display_error_state *error)
 {
 	int i;
 
-	seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+	if (HAS_POWER_WELL(dev))
+		err_printf(m, "PWR_WELL_CTL2: %08x\n",
+			   error->power_well_driver);
 	for_each_pipe(i) {
-		seq_printf(m, "Pipe [%d]:\n", i);
-		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
-		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
-		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
-		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
-		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
-		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
-		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
-		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
+		err_printf(m, "Pipe [%d]:\n", i);
+		err_printf(m, "  CPU transcoder: %c\n",
+			   transcoder_name(error->pipe[i].cpu_transcoder));
+		err_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
+		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
+		err_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
+		err_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
+		err_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
+		err_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
+		err_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
+		err_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
 
-		seq_printf(m, "Plane [%d]:\n", i);
-		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
-		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
+		err_printf(m, "Plane [%d]:\n", i);
+		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
+		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
 		if (INTEL_INFO(dev)->gen <= 3) {
-			seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
-			seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
+			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
 		}
 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
-			seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
 		if (INTEL_INFO(dev)->gen >= 4) {
-			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
-			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
+			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
+			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
 		}
 
-		seq_printf(m, "Cursor [%d]:\n", i);
-		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
-		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
-		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
+		err_printf(m, "Cursor [%d]:\n", i);
+		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
+		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
+		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
 	}
 }
 #endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 70789b1..91a31b3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -52,30 +52,6 @@
 	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
-/**
- * is_pch_edp - is the port on the PCH and attached to an eDP panel?
- * @intel_dp: DP struct
- *
- * Returns true if the given DP struct corresponds to a PCH DP port attached
- * to an eDP panel, false otherwise.  Helpful for determining whether we
- * may need FDI resources for a given DP output or not.
- */
-static bool is_pch_edp(struct intel_dp *intel_dp)
-{
-	return intel_dp->is_pch_edp;
-}
-
-/**
- * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
- * @intel_dp: DP struct
- *
- * Returns true if the given DP struct corresponds to a CPU eDP port.
- */
-static bool is_cpu_edp(struct intel_dp *intel_dp)
-{
-	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
-}
-
 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -88,25 +64,6 @@
 	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 }
 
-/**
- * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
- * @encoder: DRM encoder
- *
- * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
- * by intel_display.c.
- */
-bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
-{
-	struct intel_dp *intel_dp;
-
-	if (!encoder)
-		return false;
-
-	intel_dp = enc_to_intel_dp(encoder);
-
-	return is_pch_edp(intel_dp);
-}
-
 static void intel_dp_link_down(struct intel_dp *intel_dp);
 
 static int
@@ -344,11 +301,12 @@
 	 * Note that PCH attached eDP panels should use a 125MHz input
 	 * clock divider.
 	 */
-	if (is_cpu_edp(intel_dp)) {
+	if (IS_VALLEYVIEW(dev)) {
+		aux_clock_divider = 100;
+	} else if (intel_dig_port->port == PORT_A) {
 		if (HAS_DDI(dev))
-			aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
-		else if (IS_VALLEYVIEW(dev))
-			aux_clock_divider = 100;
+			aux_clock_divider = DIV_ROUND_CLOSEST(
+				intel_ddi_get_cdclk_freq(dev_priv), 2000);
 		else if (IS_GEN6(dev) || IS_GEN7(dev))
 			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
 		else
@@ -660,6 +618,49 @@
 	return ret;
 }
 
+static void
+intel_dp_set_clock(struct intel_encoder *encoder,
+		   struct intel_crtc_config *pipe_config, int link_bw)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (IS_G4X(dev)) {
+		if (link_bw == DP_LINK_BW_1_62) {
+			pipe_config->dpll.p1 = 2;
+			pipe_config->dpll.p2 = 10;
+			pipe_config->dpll.n = 2;
+			pipe_config->dpll.m1 = 23;
+			pipe_config->dpll.m2 = 8;
+		} else {
+			pipe_config->dpll.p1 = 1;
+			pipe_config->dpll.p2 = 10;
+			pipe_config->dpll.n = 1;
+			pipe_config->dpll.m1 = 14;
+			pipe_config->dpll.m2 = 2;
+		}
+		pipe_config->clock_set = true;
+	} else if (IS_HASWELL(dev)) {
+		/* Haswell has special-purpose DP DDI clocks. */
+	} else if (HAS_PCH_SPLIT(dev)) {
+		if (link_bw == DP_LINK_BW_1_62) {
+			pipe_config->dpll.n = 1;
+			pipe_config->dpll.p1 = 2;
+			pipe_config->dpll.p2 = 10;
+			pipe_config->dpll.m1 = 12;
+			pipe_config->dpll.m2 = 9;
+		} else {
+			pipe_config->dpll.n = 2;
+			pipe_config->dpll.p1 = 1;
+			pipe_config->dpll.p2 = 10;
+			pipe_config->dpll.m1 = 14;
+			pipe_config->dpll.m2 = 8;
+		}
+		pipe_config->clock_set = true;
+	} else if (IS_VALLEYVIEW(dev)) {
+		/* FIXME: Need to figure out optimized DP clocks for vlv. */
+	}
+}
+
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
 			struct intel_crtc_config *pipe_config)
@@ -667,8 +668,9 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
-	struct drm_display_mode *mode = &pipe_config->requested_mode;
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	struct intel_crtc *intel_crtc = encoder->new_crtc;
 	struct intel_connector *intel_connector = intel_dp->attached_connector;
 	int lane_count, clock;
 	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
@@ -677,7 +679,7 @@
 	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 	int target_clock, link_avail, link_clock;
 
-	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
+	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
 		pipe_config->has_pch_encoder = true;
 
 	pipe_config->has_dp_encoder = true;
@@ -685,9 +687,12 @@
 	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
 		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
 				       adjusted_mode);
-		intel_pch_panel_fitting(dev,
-					intel_connector->panel.fitting_mode,
-					mode, adjusted_mode);
+		if (!HAS_PCH_SPLIT(dev))
+			intel_gmch_panel_fitting(intel_crtc, pipe_config,
+						 intel_connector->panel.fitting_mode);
+		else
+			intel_pch_panel_fitting(intel_crtc, pipe_config,
+						intel_connector->panel.fitting_mode);
 	}
 	/* We need to take the panel's fixed mode into account. */
 	target_clock = adjusted_mode->clock;
@@ -702,8 +707,8 @@
 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
 	 * bpc in between. */
 	bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
-	if (is_edp(intel_dp) && dev_priv->edp.bpp)
-		bpp = min_t(int, bpp, dev_priv->edp.bpp);
+	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+		bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
 
 	for (; bpp >= 6*3; bpp -= 2*3) {
 		mode_rate = intel_dp_link_required(target_clock, bpp);
@@ -755,6 +760,8 @@
 			       target_clock, adjusted_mode->clock,
 			       &pipe_config->dp_m_n);
 
+	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+
 	return true;
 }
 
@@ -806,6 +813,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct drm_crtc *crtc = encoder->crtc;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
@@ -833,18 +841,8 @@
 
 	/* Handle DP bits in common between all three register formats */
 	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
 
-	switch (intel_dp->lane_count) {
-	case 1:
-		intel_dp->DP |= DP_PORT_WIDTH_1;
-		break;
-	case 2:
-		intel_dp->DP |= DP_PORT_WIDTH_2;
-		break;
-	case 4:
-		intel_dp->DP |= DP_PORT_WIDTH_4;
-		break;
-	}
 	if (intel_dp->has_audio) {
 		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
 				 pipe_name(intel_crtc->pipe));
@@ -856,7 +854,7 @@
 
 	/* Split out the IBX/CPU vs CPT settings */
 
-	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 			intel_dp->DP |= DP_SYNC_HS_HIGH;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -873,7 +871,7 @@
 			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 		else
 			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
-	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
 		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
 			intel_dp->DP |= intel_dp->color_range;
 
@@ -889,7 +887,7 @@
 		if (intel_crtc->pipe == 1)
 			intel_dp->DP |= DP_PIPEB_SELECT;
 
-		if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+		if (port == PORT_A && !IS_VALLEYVIEW(dev)) {
 			/* don't miss out required setting for eDP */
 			if (adjusted_mode->clock < 200000)
 				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
@@ -900,7 +898,7 @@
 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 	}
 
-	if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
+	if (port == PORT_A && !IS_VALLEYVIEW(dev))
 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 }
 
@@ -1290,6 +1288,7 @@
 				  enum pipe *pipe)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp = I915_READ(intel_dp->output_reg);
@@ -1297,9 +1296,9 @@
 	if (!(tmp & DP_PORT_EN))
 		return false;
 
-	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
 		*pipe = PORT_TO_PIPE_CPT(tmp);
-	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
 		*pipe = PORT_TO_PIPE(tmp);
 	} else {
 		u32 trans_sel;
@@ -1335,9 +1334,33 @@
 	return true;
 }
 
+static void intel_dp_get_config(struct intel_encoder *encoder,
+				struct intel_crtc_config *pipe_config)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp, flags = 0;
+
+	tmp = I915_READ(intel_dp->output_reg);
+
+	if (tmp & DP_SYNC_HS_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (tmp & DP_SYNC_VS_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+}
+
 static void intel_disable_dp(struct intel_encoder *encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
+	struct drm_device *dev = encoder->base.dev;
 
 	/* Make sure the panel is off before trying to change the mode. But also
 	 * ensure that we have vdd while we switch off the panel. */
@@ -1347,16 +1370,17 @@
 	ironlake_edp_panel_off(intel_dp);
 
 	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
-	if (!is_cpu_edp(intel_dp))
+	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
 		intel_dp_link_down(intel_dp);
 }
 
 static void intel_post_disable_dp(struct intel_encoder *encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct drm_device *dev = encoder->base.dev;
 
-	if (is_cpu_edp(intel_dp)) {
+	if (port == PORT_A || IS_VALLEYVIEW(dev)) {
 		intel_dp_link_down(intel_dp);
 		if (!IS_VALLEYVIEW(dev))
 			ironlake_edp_pll_off(intel_dp);
@@ -1381,15 +1405,73 @@
 	intel_dp_complete_link_train(intel_dp);
 	intel_dp_stop_link_train(intel_dp);
 	ironlake_edp_backlight_on(intel_dp);
+
+	if (IS_VALLEYVIEW(dev)) {
+		struct intel_digital_port *dport =
+			enc_to_dig_port(&encoder->base);
+		int channel = vlv_dport_to_channel(dport);
+
+		vlv_wait_port_ready(dev_priv, channel);
+	}
 }
 
 static void intel_pre_enable_dp(struct intel_encoder *encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
+	if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
 		ironlake_edp_pll_on(intel_dp);
+
+	if (IS_VALLEYVIEW(dev)) {
+		struct intel_crtc *intel_crtc =
+			to_intel_crtc(encoder->base.crtc);
+		int port = vlv_dport_to_channel(dport);
+		int pipe = intel_crtc->pipe;
+		u32 val;
+
+		val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+		val = 0;
+		if (pipe)
+			val |= (1<<21);
+		else
+			val &= ~(1<<21);
+		val |= 0x001000c4;
+		vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+
+		vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+				 0x00760018);
+		vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+				 0x00400888);
+	}
+}
+
+static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int port = vlv_dport_to_channel(dport);
+
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	/* Program Tx lane resets to default */
+	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+			 DPIO_PCS_TX_LANE2_RESET |
+			 DPIO_PCS_TX_LANE1_RESET);
+	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+				 DPIO_PCS_CLK_SOFT_RESET);
+
+	/* Fix up inter-pair skew failure */
+	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
+	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
 }
 
 /*
@@ -1451,10 +1533,13 @@
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	enum port port = dp_to_dig_port(intel_dp)->port;
 
-	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+	if (IS_VALLEYVIEW(dev))
+		return DP_TRAIN_VOLTAGE_SWING_1200;
+	else if (IS_GEN7(dev) && port == PORT_A)
 		return DP_TRAIN_VOLTAGE_SWING_800;
-	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+	else if (HAS_PCH_CPT(dev) && port != PORT_A)
 		return DP_TRAIN_VOLTAGE_SWING_1200;
 	else
 		return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1464,6 +1549,7 @@
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	enum port port = dp_to_dig_port(intel_dp)->port;
 
 	if (HAS_DDI(dev)) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
@@ -1477,7 +1563,19 @@
 		default:
 			return DP_TRAIN_PRE_EMPHASIS_0;
 		}
-	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_9_5;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else if (IS_GEN7(dev) && port == PORT_A) {
 		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
 		case DP_TRAIN_VOLTAGE_SWING_400:
 			return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1502,6 +1600,101 @@
 	}
 }
 
+static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	unsigned long demph_reg_value, preemph_reg_value,
+		uniqtranscale_reg_value;
+	uint8_t train_set = intel_dp->train_set[0];
+	int port = vlv_dport_to_channel(dport);
+
+	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+	case DP_TRAIN_PRE_EMPHASIS_0:
+		preemph_reg_value = 0x0004000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			demph_reg_value = 0x2B405555;
+			uniqtranscale_reg_value = 0x552AB83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			demph_reg_value = 0x2B404040;
+			uniqtranscale_reg_value = 0x5548B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			demph_reg_value = 0x2B245555;
+			uniqtranscale_reg_value = 0x5560B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+			demph_reg_value = 0x2B405555;
+			uniqtranscale_reg_value = 0x5598DA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPHASIS_3_5:
+		preemph_reg_value = 0x0002000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			demph_reg_value = 0x2B404040;
+			uniqtranscale_reg_value = 0x5552B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			demph_reg_value = 0x2B404848;
+			uniqtranscale_reg_value = 0x5580B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			demph_reg_value = 0x2B404040;
+			uniqtranscale_reg_value = 0x55ADDA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPHASIS_6:
+		preemph_reg_value = 0x0000000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			demph_reg_value = 0x2B305555;
+			uniqtranscale_reg_value = 0x5570B83A;
+			break;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			demph_reg_value = 0x2B2B4040;
+			uniqtranscale_reg_value = 0x55ADDA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	case DP_TRAIN_PRE_EMPHASIS_9_5:
+		preemph_reg_value = 0x0006000;
+		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			demph_reg_value = 0x1B405555;
+			uniqtranscale_reg_value = 0x55ADDA3A;
+			break;
+		default:
+			return 0;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
+	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+			 uniqtranscale_reg_value);
+	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
+	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+
+	return 0;
+}
+
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
@@ -1669,6 +1862,7 @@
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	enum port port = intel_dig_port->port;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	uint32_t signal_levels, mask;
 	uint8_t train_set = intel_dp->train_set[0];
@@ -1676,10 +1870,13 @@
 	if (HAS_DDI(dev)) {
 		signal_levels = intel_hsw_signal_levels(train_set);
 		mask = DDI_BUF_EMP_MASK;
-	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+	} else if (IS_VALLEYVIEW(dev)) {
+		signal_levels = intel_vlv_signal_levels(intel_dp);
+		mask = 0;
+	} else if (IS_GEN7(dev) && port == PORT_A) {
 		signal_levels = intel_gen7_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
-	} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+	} else if (IS_GEN6(dev) && port == PORT_A) {
 		signal_levels = intel_gen6_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
 	} else {
@@ -1729,8 +1926,7 @@
 		}
 		I915_WRITE(DP_TP_CTL(port), temp);
 
-	} else if (HAS_PCH_CPT(dev) &&
-		   (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
 		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1981,6 +2177,7 @@
 intel_dp_link_down(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	enum port port = intel_dig_port->port;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc =
@@ -2010,7 +2207,7 @@
 
 	DRM_DEBUG_KMS("\n");
 
-	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
 		DP &= ~DP_LINK_TRAIN_MASK_CPT;
 		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
 	} else {
@@ -2301,11 +2498,10 @@
 			return NULL;
 
 		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
-		edid = kmalloc(size, GFP_KERNEL);
+		edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
 		if (!edid)
 			return NULL;
 
-		memcpy(edid, intel_connector->edid, size);
 		return edid;
 	}
 
@@ -2588,11 +2784,11 @@
 	struct child_device_config *p_child;
 	int i;
 
-	if (!dev_priv->child_dev_num)
+	if (!dev_priv->vbt.child_dev_num)
 		return false;
 
-	for (i = 0; i < dev_priv->child_dev_num; i++) {
-		p_child = dev_priv->child_dev + i;
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
 
 		if (p_child->dvo_port == PORT_IDPD &&
 		    p_child->device_type == DEVICE_TYPE_eDP)
@@ -2670,7 +2866,7 @@
 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
 
-	vbt = dev_priv->edp.pps;
+	vbt = dev_priv->vbt.edp_pps;
 
 	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
 	 * our hw here, which are all in 100usec. */
@@ -2738,9 +2934,6 @@
 		pp_div_reg = PIPEA_PP_DIVISOR;
 	}
 
-	if (IS_VALLEYVIEW(dev))
-		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
-
 	/* And finally store the new values in the power sequencer. */
 	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
 		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
@@ -2754,8 +2947,10 @@
 
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
-		if (is_cpu_edp(intel_dp))
+	if (IS_VALLEYVIEW(dev)) {
+		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		if (dp_to_dig_port(intel_dp)->port == PORT_A)
 			port_sel = PANEL_POWER_PORT_DP_A;
 		else
 			port_sel = PANEL_POWER_PORT_DP_D;
@@ -2792,28 +2987,39 @@
 	intel_dp->DP = I915_READ(intel_dp->output_reg);
 	intel_dp->attached_connector = intel_connector;
 
-	if (HAS_PCH_SPLIT(dev) && port == PORT_D)
-		if (intel_dpd_is_edp(dev))
-			intel_dp->is_pch_edp = true;
-
+	type = DRM_MODE_CONNECTOR_DisplayPort;
 	/*
 	 * FIXME : We need to initialize built-in panels before external panels.
 	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
 	 */
-	if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+	switch (port) {
+	case PORT_A:
 		type = DRM_MODE_CONNECTOR_eDP;
-		intel_encoder->type = INTEL_OUTPUT_EDP;
-	} else if (port == PORT_A || is_pch_edp(intel_dp)) {
-		type = DRM_MODE_CONNECTOR_eDP;
-		intel_encoder->type = INTEL_OUTPUT_EDP;
-	} else {
-		/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
-		 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
-		 * rewrite it.
-		 */
-		type = DRM_MODE_CONNECTOR_DisplayPort;
+		break;
+	case PORT_C:
+		if (IS_VALLEYVIEW(dev))
+			type = DRM_MODE_CONNECTOR_eDP;
+		break;
+	case PORT_D:
+		if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
+			type = DRM_MODE_CONNECTOR_eDP;
+		break;
+	default:	/* silence GCC warning */
+		break;
 	}
 
+	/*
+	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
+	 * for DP the encoder type can be set by the caller to
+	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
+	 */
+	if (type == DRM_MODE_CONNECTOR_eDP)
+		intel_encoder->type = INTEL_OUTPUT_EDP;
+
+	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
+			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+			port_name(port));
+
 	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
 	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
@@ -2929,8 +3135,8 @@
 		}
 
 		/* fallback to VBT if available for eDP */
-		if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
-			fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+		if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
+			fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
 			if (fixed_mode)
 				fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 		}
@@ -2986,6 +3192,9 @@
 	intel_encoder->disable = intel_disable_dp;
 	intel_encoder->post_disable = intel_post_disable_dp;
 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
+	intel_encoder->get_config = intel_dp_get_config;
+	if (IS_VALLEYVIEW(dev))
+		intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
 
 	intel_dig_port->port = port;
 	intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 624a9e6..fdf6303 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -120,7 +120,6 @@
 	struct intel_crtc *new_crtc;
 
 	int type;
-	bool needs_tv_clock;
 	/*
 	 * Intel hw has only one MUX where encoders could be clone, hence a
 	 * simple flag is enough to compute the possible_clones mask.
@@ -140,6 +139,10 @@
 	 * the encoder is active. If the encoder is enabled it also set the pipe
 	 * it is connected to in the pipe parameter. */
 	bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+	/* Reconstructs the equivalent mode flags for the current hardware
+	 * state. */
+	void (*get_config)(struct intel_encoder *,
+			   struct intel_crtc_config *pipe_config);
 	int crtc_mask;
 	enum hpd_pin hpd_pin;
 };
@@ -177,6 +180,18 @@
 	u8 polled;
 };
 
+typedef struct dpll {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int	dot;
+	int	vco;
+	int	m;
+	int	p;
+} intel_clock_t;
+
 struct intel_crtc_config {
 	struct drm_display_mode requested_mode;
 	struct drm_display_mode adjusted_mode;
@@ -201,18 +216,30 @@
 	/* DP has a bunch of special case unfortunately, so mark the pipe
 	 * accordingly. */
 	bool has_dp_encoder;
+
+	/*
+	 * Enable dithering, used when the selected pipe bpp doesn't match the
+	 * plane bpp.
+	 */
 	bool dither;
 
 	/* Controls for the clock computation, to override various stages. */
 	bool clock_set;
 
+	/* SDVO TV has a bunch of special case. To make multifunction encoders
+	 * work correctly, we need to track this at runtime.*/
+	bool sdvo_tv_clock;
+
+	/*
+	 * crtc bandwidth limit, don't increase pipe bpp or clock if not really
+	 * required. This is set in the 2nd loop of calling encoder's
+	 * ->compute_config if the first pick doesn't work out.
+	 */
+	bool bw_constrained;
+
 	/* Settings for the intel dpll used on pretty much everything but
 	 * haswell. */
-	struct dpll {
-		unsigned n;
-		unsigned m1, m2;
-		unsigned p1, p2;
-	} dpll;
+	struct dpll dpll;
 
 	int pipe_bpp;
 	struct intel_link_m_n dp_m_n;
@@ -224,6 +251,25 @@
 	int pixel_target_clock;
 	/* Used by SDVO (and if we ever fix it, HDMI). */
 	unsigned pixel_multiplier;
+
+	/* Panel fitter controls for gen2-gen4 + VLV */
+	struct {
+		u32 control;
+		u32 pgm_ratios;
+		u32 lvds_border_bits;
+	} gmch_pfit;
+
+	/* Panel fitter placement and size for Ironlake+ */
+	struct {
+		u32 pos;
+		u32 size;
+	} pch_pfit;
+
+	/* FDI configuration, only valid if has_pch_encoder is set. */
+	int fdi_lanes;
+	struct intel_link_m_n fdi_m_n;
+
+	bool ips_enabled;
 };
 
 struct intel_crtc {
@@ -242,7 +288,6 @@
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
-	int fdi_lanes;
 
 	atomic_t unpin_work_count;
 
@@ -265,6 +310,10 @@
 
 	/* reset counter value when the last flip was submitted */
 	unsigned int reset_counter;
+
+	/* Access to these should be protected by dev_priv->irq_lock. */
+	bool cpu_fifo_underrun_disabled;
+	bool pch_fifo_underrun_disabled;
 };
 
 struct intel_plane {
@@ -279,6 +328,18 @@
 	unsigned int crtc_w, crtc_h;
 	uint32_t src_x, src_y;
 	uint32_t src_w, src_h;
+
+	/* Since we need to change the watermarks before/after
+	 * enabling/disabling the planes, we need to store the parameters here
+	 * as the other pieces of the struct may not reflect the values we want
+	 * for the watermark calculations. Currently only Haswell uses this.
+	 */
+	struct {
+		bool enable;
+		uint8_t bytes_per_pixel;
+		uint32_t horiz_pixels;
+	} wm;
+
 	void (*update_plane)(struct drm_plane *plane,
 			     struct drm_framebuffer *fb,
 			     struct drm_i915_gem_object *obj,
@@ -411,7 +472,6 @@
 	uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
 	struct i2c_adapter adapter;
 	struct i2c_algo_dp_aux_data algo;
-	bool is_pch_edp;
 	uint8_t train_set[4];
 	int panel_power_up_delay;
 	int panel_power_down_delay;
@@ -431,6 +491,19 @@
 	struct intel_hdmi hdmi;
 };
 
+static inline int
+vlv_dport_to_channel(struct intel_digital_port *dport)
+{
+	switch (dport->port) {
+	case PORT_B:
+		return 0;
+	case PORT_C:
+		return 1;
+	default:
+		BUG();
+	}
+}
+
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
@@ -474,6 +547,7 @@
 extern void intel_attach_force_audio_property(struct drm_connector *connector);
 extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
+extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev,
 			    int hdmi_reg, enum port port);
@@ -512,7 +586,6 @@
 extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
 extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
 extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
 extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
 extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
 				      enum plane plane);
@@ -524,12 +597,14 @@
 
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct drm_device *dev,
-				    int fitting_mode,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode);
-extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
+				    struct intel_crtc_config *pipe_config,
+				    int fitting_mode);
+extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+				     struct intel_crtc_config *pipe_config,
+				     int fitting_mode);
+extern void intel_panel_set_backlight(struct drm_device *dev,
+				      u32 level, u32 max);
 extern int intel_panel_setup_backlight(struct drm_connector *connector);
 extern void intel_panel_enable_backlight(struct drm_device *dev,
 					 enum pipe pipe);
@@ -565,19 +640,17 @@
 	return to_intel_connector(connector)->encoder;
 }
 
-static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
-{
-	struct intel_digital_port *intel_dig_port =
-		container_of(encoder, struct intel_digital_port, base.base);
-	return &intel_dig_port->dp;
-}
-
 static inline struct intel_digital_port *
 enc_to_dig_port(struct drm_encoder *encoder)
 {
 	return container_of(encoder, struct intel_digital_port, base.base);
 }
 
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+	return &enc_to_dig_port(encoder)->dp;
+}
+
 static inline struct intel_digital_port *
 dp_to_dig_port(struct intel_dp *intel_dp)
 {
@@ -607,6 +680,7 @@
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
 
 struct intel_load_detect_pipe {
 	struct drm_framebuffer *release_fb;
@@ -660,13 +734,9 @@
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
 extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_suspend_hw(struct drm_device *dev);
 extern void intel_write_eld(struct drm_encoder *encoder,
 			    struct drm_display_mode *mode);
-extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
-extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
-					 struct intel_link_m_n *m_n);
-extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
-					 struct intel_link_m_n *m_n);
 extern void intel_prepare_ddi(struct drm_device *dev);
 extern void hsw_fdi_link_train(struct drm_crtc *crtc);
 extern void intel_ddi_init(struct drm_device *dev, enum port port);
@@ -675,9 +745,7 @@
 extern void intel_update_watermarks(struct drm_device *dev);
 extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 					   uint32_t sprite_width,
-					   int pixel_size);
-extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
-			 struct drm_display_mode *mode);
+					   int pixel_size, bool enable);
 
 extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
 						    unsigned int tiling_mode,
@@ -689,8 +757,6 @@
 extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
 
-extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
-
 /* Power-related functions, located in intel_pm.c */
 extern void intel_init_pm(struct drm_device *dev);
 /* FBC */
@@ -701,7 +767,8 @@
 extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 extern void intel_gpu_ips_teardown(void);
 
-extern bool intel_using_power_well(struct drm_device *dev);
+extern bool intel_display_power_enabled(struct drm_device *dev,
+					enum intel_display_power_domain domain);
 extern void intel_init_power_well(struct drm_device *dev);
 extern void intel_set_power_well(struct drm_device *dev, bool enable);
 extern void intel_enable_gt_powersave(struct drm_device *dev);
@@ -728,5 +795,11 @@
 extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
 extern void intel_display_handle_reset(struct drm_device *dev);
+extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+						  enum pipe pipe,
+						  bool enable);
+extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+						 enum transcoder pch_transcoder,
+						 bool enable);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index cc70b16..eb2020e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -54,6 +54,13 @@
 		.dev_ops = &ch7xxx_ops,
 	},
 	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "ch7xxx",
+		.dvo_reg = DVOC,
+		.slave_addr = 0x75, /* For some ch7010 */
+		.dev_ops = &ch7xxx_ops,
+	},
+	{
 		.type = INTEL_DVO_CHIP_LVDS,
 		.name = "ivch",
 		.dvo_reg = DVOA,
@@ -129,6 +136,26 @@
 	return true;
 }
 
+static void intel_dvo_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 tmp, flags = 0;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+	if (tmp & DVO_HSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	if (tmp & DVO_VSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+}
+
 static void intel_disable_dvo(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -153,6 +180,7 @@
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
 static void intel_dvo_dpms(struct drm_connector *connector, int mode)
 {
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@@ -174,6 +202,8 @@
 		return;
 	}
 
+	/* We call connector dpms manually below in case pipe dpms doesn't
+	 * change due to cloning. */
 	if (mode == DRM_MODE_DPMS_ON) {
 		intel_dvo->base.connectors_active = true;
 
@@ -440,6 +470,7 @@
 	intel_encoder->disable = intel_disable_dvo;
 	intel_encoder->enable = intel_enable_dvo;
 	intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+	intel_encoder->get_config = intel_dvo_get_config;
 	intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
 
 	/* Now, try to find a controller */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6b7c3ca..3b03c3c6 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -292,8 +292,6 @@
 {
 	int ret;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_plane *plane;
 
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return;
@@ -304,10 +302,5 @@
 	if (ret)
 		DRM_DEBUG("failed to restore crtc mode\n");
 
-	/* Be sure to shut off any planes that may be active */
-	list_for_each_entry(plane, &config->plane_list, head)
-		if (plane->enabled)
-			plane->funcs->disable_plane(plane);
-
 	drm_modeset_unlock_all(dev);
 }
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a905793..8062a92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -658,6 +658,28 @@
 	return true;
 }
 
+static void intel_hdmi_get_config(struct intel_encoder *encoder,
+				  struct intel_crtc_config *pipe_config)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp, flags = 0;
+
+	tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+	if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (tmp & SDVO_VSYNC_ACTIVE_HIGH)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+}
+
 static void intel_enable_hdmi(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
@@ -697,6 +719,14 @@
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
+
+	if (IS_VALLEYVIEW(dev)) {
+		struct intel_digital_port *dport =
+			enc_to_dig_port(&encoder->base);
+		int channel = vlv_dport_to_channel(dport);
+
+		vlv_wait_port_ready(dev_priv, channel);
+	}
 }
 
 static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -775,6 +805,8 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+	int desired_bpp;
 
 	if (intel_hdmi->color_range_auto) {
 		/* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -794,14 +826,31 @@
 	/*
 	 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
 	 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
-	 * outputs.
+	 * outputs. We also need to check that the higher clock still fits
+	 * within limits.
 	 */
-	if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
-		DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
-		pipe_config->pipe_bpp = 12*3;
+	if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
+	    && HAS_PCH_SPLIT(dev)) {
+		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
+		desired_bpp = 12*3;
+
+		/* Need to adjust the port link by 1.5x for 12bpc. */
+		adjusted_mode->clock = clock_12bpc;
+		pipe_config->pixel_target_clock =
+			pipe_config->requested_mode.clock;
 	} else {
-		DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
-		pipe_config->pipe_bpp = 8*3;
+		DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
+		desired_bpp = 8*3;
+	}
+
+	if (!pipe_config->bw_constrained) {
+		DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
+		pipe_config->pipe_bpp = desired_bpp;
+	}
+
+	if (adjusted_mode->clock > 225000) {
+		DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+		return false;
 	}
 
 	return true;
@@ -955,6 +1004,97 @@
 	return 0;
 }
 
+static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
+	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
+	u32 val;
+
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	/* Enable clock channels for this port */
+	val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+	val = 0;
+	if (pipe)
+		val |= (1<<21);
+	else
+		val &= ~(1<<21);
+	val |= 0x001000c4;
+	vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+
+	/* HDMI 1.0V-2dB */
+	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
+	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+			 0x2b245f5f);
+	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+			 0x5578b83a);
+	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+			 0x0c782040);
+	vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+			 0x2b247878);
+	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
+	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+			 0x00002000);
+	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+			 DPIO_TX_OCALINIT_EN);
+
+	/* Program lane clock */
+	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+			 0x00760018);
+	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+			 0x00400888);
+}
+
+static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int port = vlv_dport_to_channel(dport);
+
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	/* Program Tx lane resets to default */
+	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+			 DPIO_PCS_TX_LANE2_RESET |
+			 DPIO_PCS_TX_LANE1_RESET);
+	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+			 DPIO_PCS_CLK_SOFT_RESET);
+
+	/* Fix up inter-pair skew failure */
+	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
+	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+
+	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+			 0x00002000);
+	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+			 DPIO_TX_OCALINIT_EN);
+}
+
+static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+{
+	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	int port = vlv_dport_to_channel(dport);
+
+	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
+	mutex_lock(&dev_priv->dpio_lock);
+	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
+	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
 	drm_sysfs_connector_remove(connector);
@@ -1094,6 +1234,12 @@
 	intel_encoder->enable = intel_enable_hdmi;
 	intel_encoder->disable = intel_disable_hdmi;
 	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+	intel_encoder->get_config = intel_hdmi_get_config;
+	if (IS_VALLEYVIEW(dev)) {
+		intel_encoder->pre_enable = intel_hdmi_pre_enable;
+		intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
+		intel_encoder->post_disable = intel_hdmi_post_disable;
+	}
 
 	intel_encoder->type = INTEL_OUTPUT_HDMI;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 29412cc..0ef8b4d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -49,8 +49,6 @@
 struct intel_lvds_encoder {
 	struct intel_encoder base;
 
-	u32 pfit_control;
-	u32 pfit_pgm_ratios;
 	bool is_dual_link;
 	u32 reg;
 
@@ -88,6 +86,31 @@
 	return true;
 }
 
+static void intel_lvds_get_config(struct intel_encoder *encoder,
+				  struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 lvds_reg, tmp, flags = 0;
+
+	if (HAS_PCH_SPLIT(dev))
+		lvds_reg = PCH_LVDS;
+	else
+		lvds_reg = LVDS;
+
+	tmp = I915_READ(lvds_reg);
+	if (tmp & LVDS_HSYNC_POLARITY)
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	if (tmp & LVDS_VSYNC_POLARITY)
+		flags |= DRM_MODE_FLAG_NVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+}
+
 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
  * This is an exception to the general rule that mode_set doesn't turn
  * things on.
@@ -118,7 +141,8 @@
 	}
 
 	/* set the corresponsding LVDS_BORDER bit */
-	temp |= dev_priv->lvds_border_bits;
+	temp &= ~LVDS_BORDER_ENABLE;
+	temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
 	/* Set the B0-B3 data pairs corresponding to whether we're going to
 	 * set the DPLLs for dual-channel mode or not.
 	 */
@@ -136,7 +160,10 @@
 	 * special lvds dither control bit on pch-split platforms, dithering is
 	 * only controlled through the PIPECONF reg. */
 	if (INTEL_INFO(dev)->gen == 4) {
-		if (dev_priv->lvds_dither)
+		/* Bspec wording suggests that LVDS port dithering only exists
+		 * for 18bpp panels. */
+		if (intel_crtc->config.dither &&
+		    intel_crtc->config.pipe_bpp == 18)
 			temp |= LVDS_ENABLE_DITHER;
 		else
 			temp &= ~LVDS_ENABLE_DITHER;
@@ -150,29 +177,6 @@
 	I915_WRITE(lvds_encoder->reg, temp);
 }
 
-static void intel_pre_enable_lvds(struct intel_encoder *encoder)
-{
-	struct drm_device *dev = encoder->base.dev;
-	struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
-		return;
-
-	/*
-	 * Enable automatic panel scaling so that non-native modes
-	 * fill the screen.  The panel fitter should only be
-	 * adjusted whilst the pipe is disabled, according to
-	 * register description and PRM.
-	 */
-	DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
-		      enc->pfit_control,
-		      enc->pfit_pgm_ratios);
-
-	I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
-	I915_WRITE(PFIT_CONTROL, enc->pfit_control);
-}
-
 /**
  * Sets the power state for the panel.
  */
@@ -241,62 +245,6 @@
 	return MODE_OK;
 }
 
-static void
-centre_horizontally(struct drm_display_mode *mode,
-		    int width)
-{
-	u32 border, sync_pos, blank_width, sync_width;
-
-	/* keep the hsync and hblank widths constant */
-	sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
-	blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
-	sync_pos = (blank_width - sync_width + 1) / 2;
-
-	border = (mode->hdisplay - width + 1) / 2;
-	border += border & 1; /* make the border even */
-
-	mode->crtc_hdisplay = width;
-	mode->crtc_hblank_start = width + border;
-	mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
-
-	mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
-	mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
-}
-
-static void
-centre_vertically(struct drm_display_mode *mode,
-		  int height)
-{
-	u32 border, sync_pos, blank_width, sync_width;
-
-	/* keep the vsync and vblank widths constant */
-	sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
-	blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
-	sync_pos = (blank_width - sync_width + 1) / 2;
-
-	border = (mode->vdisplay - height + 1) / 2;
-
-	mode->crtc_vdisplay = height;
-	mode->crtc_vblank_start = height + border;
-	mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
-
-	mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
-	mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
-}
-
-static inline u32 panel_fitter_scaling(u32 source, u32 target)
-{
-	/*
-	 * Floating point operation is not supported. So the FACTOR
-	 * is defined, which can avoid the floating point computation
-	 * when calculating the panel ratio.
-	 */
-#define ACCURACY 12
-#define FACTOR (1 << ACCURACY)
-	u32 ratio = source * FACTOR / target;
-	return (FACTOR * ratio + FACTOR/2) / FACTOR;
-}
-
 static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
 				      struct intel_crtc_config *pipe_config)
 {
@@ -307,11 +255,8 @@
 	struct intel_connector *intel_connector =
 		&lvds_encoder->attached_connector->base;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
-	struct drm_display_mode *mode = &pipe_config->requested_mode;
 	struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
-	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 	unsigned int lvds_bpp;
-	int pipe;
 
 	/* Should never happen!! */
 	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -328,11 +273,12 @@
 	else
 		lvds_bpp = 6*3;
 
-	if (lvds_bpp != pipe_config->pipe_bpp) {
+	if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
 		DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
 			      pipe_config->pipe_bpp, lvds_bpp);
 		pipe_config->pipe_bpp = lvds_bpp;
 	}
+
 	/*
 	 * We have timings from the BIOS for the panel, put them in
 	 * to the adjusted mode.  The CRTC will be set up for this mode,
@@ -345,139 +291,17 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		pipe_config->has_pch_encoder = true;
 
-		intel_pch_panel_fitting(dev,
-					intel_connector->panel.fitting_mode,
-					mode, adjusted_mode);
+		intel_pch_panel_fitting(intel_crtc, pipe_config,
+					intel_connector->panel.fitting_mode);
 		return true;
+	} else {
+		intel_gmch_panel_fitting(intel_crtc, pipe_config,
+					 intel_connector->panel.fitting_mode);
 	}
 
-	/* Native modes don't need fitting */
-	if (adjusted_mode->hdisplay == mode->hdisplay &&
-	    adjusted_mode->vdisplay == mode->vdisplay)
-		goto out;
-
-	/* 965+ wants fuzzy fitting */
-	if (INTEL_INFO(dev)->gen >= 4)
-		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
-				 PFIT_FILTER_FUZZY);
-
-	/*
-	 * Enable automatic panel scaling for non-native modes so that they fill
-	 * the screen.  Should be enabled before the pipe is enabled, according
-	 * to register description and PRM.
-	 * Change the value here to see the borders for debugging
-	 */
-	for_each_pipe(pipe)
-		I915_WRITE(BCLRPAT(pipe), 0);
-
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 	pipe_config->timings_set = true;
 
-	switch (intel_connector->panel.fitting_mode) {
-	case DRM_MODE_SCALE_CENTER:
-		/*
-		 * For centered modes, we have to calculate border widths &
-		 * heights and modify the values programmed into the CRTC.
-		 */
-		centre_horizontally(adjusted_mode, mode->hdisplay);
-		centre_vertically(adjusted_mode, mode->vdisplay);
-		border = LVDS_BORDER_ENABLE;
-		break;
-
-	case DRM_MODE_SCALE_ASPECT:
-		/* Scale but preserve the aspect ratio */
-		if (INTEL_INFO(dev)->gen >= 4) {
-			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
-
-			/* 965+ is easy, it does everything in hw */
-			if (scaled_width > scaled_height)
-				pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
-			else if (scaled_width < scaled_height)
-				pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
-			else if (adjusted_mode->hdisplay != mode->hdisplay)
-				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
-		} else {
-			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
-			/*
-			 * For earlier chips we have to calculate the scaling
-			 * ratio by hand and program it into the
-			 * PFIT_PGM_RATIO register
-			 */
-			if (scaled_width > scaled_height) { /* pillar */
-				centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
-
-				border = LVDS_BORDER_ENABLE;
-				if (mode->vdisplay != adjusted_mode->vdisplay) {
-					u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
-					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-							    bits << PFIT_VERT_SCALE_SHIFT);
-					pfit_control |= (PFIT_ENABLE |
-							 VERT_INTERP_BILINEAR |
-							 HORIZ_INTERP_BILINEAR);
-				}
-			} else if (scaled_width < scaled_height) { /* letter */
-				centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
-
-				border = LVDS_BORDER_ENABLE;
-				if (mode->hdisplay != adjusted_mode->hdisplay) {
-					u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
-					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-							    bits << PFIT_VERT_SCALE_SHIFT);
-					pfit_control |= (PFIT_ENABLE |
-							 VERT_INTERP_BILINEAR |
-							 HORIZ_INTERP_BILINEAR);
-				}
-			} else
-				/* Aspects match, Let hw scale both directions */
-				pfit_control |= (PFIT_ENABLE |
-						 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-						 VERT_INTERP_BILINEAR |
-						 HORIZ_INTERP_BILINEAR);
-		}
-		break;
-
-	case DRM_MODE_SCALE_FULLSCREEN:
-		/*
-		 * Full scaling, even if it changes the aspect ratio.
-		 * Fortunately this is all done for us in hw.
-		 */
-		if (mode->vdisplay != adjusted_mode->vdisplay ||
-		    mode->hdisplay != adjusted_mode->hdisplay) {
-			pfit_control |= PFIT_ENABLE;
-			if (INTEL_INFO(dev)->gen >= 4)
-				pfit_control |= PFIT_SCALING_AUTO;
-			else
-				pfit_control |= (VERT_AUTO_SCALE |
-						 VERT_INTERP_BILINEAR |
-						 HORIZ_AUTO_SCALE |
-						 HORIZ_INTERP_BILINEAR);
-		}
-		break;
-
-	default:
-		break;
-	}
-
-out:
-	/* If not enabling scaling, be consistent and always use 0. */
-	if ((pfit_control & PFIT_ENABLE) == 0) {
-		pfit_control = 0;
-		pfit_pgm_ratios = 0;
-	}
-
-	/* Make sure pre-965 set dither correctly */
-	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
-		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
-	if (pfit_control != lvds_encoder->pfit_control ||
-	    pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
-		lvds_encoder->pfit_control = pfit_control;
-		lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
-	}
-	dev_priv->lvds_border_bits = border;
-
 	/*
 	 * XXX: It would be nice to support lower refresh rates on the
 	 * panels to reduce power consumption, and perhaps match the
@@ -937,11 +761,11 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	if (!dev_priv->child_dev_num)
+	if (!dev_priv->vbt.child_dev_num)
 		return true;
 
-	for (i = 0; i < dev_priv->child_dev_num; i++) {
-		struct child_device_config *child = dev_priv->child_dev + i;
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		struct child_device_config *child = dev_priv->vbt.child_dev + i;
 
 		/* If the device type is not LFP, continue.
 		 * We have to check both the new identifiers as well as the
@@ -1029,7 +853,7 @@
 	 */
 	val = I915_READ(lvds_encoder->reg);
 	if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
-		val = dev_priv->bios_lvds_val;
+		val = dev_priv->vbt.bios_lvds_val;
 
 	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
 }
@@ -1089,7 +913,7 @@
 	if (HAS_PCH_SPLIT(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return false;
-		if (dev_priv->edp.support) {
+		if (dev_priv->vbt.edp_support) {
 			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
 			return false;
 		}
@@ -1107,10 +931,6 @@
 
 	lvds_encoder->attached_connector = lvds_connector;
 
-	if (!HAS_PCH_SPLIT(dev)) {
-		lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
-	}
-
 	intel_encoder = &lvds_encoder->base;
 	encoder = &intel_encoder->base;
 	intel_connector = &lvds_connector->base;
@@ -1122,11 +942,11 @@
 			 DRM_MODE_ENCODER_LVDS);
 
 	intel_encoder->enable = intel_enable_lvds;
-	intel_encoder->pre_enable = intel_pre_enable_lvds;
 	intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
 	intel_encoder->compute_config = intel_lvds_compute_config;
 	intel_encoder->disable = intel_disable_lvds;
 	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+	intel_encoder->get_config = intel_lvds_get_config;
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 	intel_connector_attach_encoder(intel_connector, intel_encoder);
@@ -1212,11 +1032,11 @@
 	}
 
 	/* Failed to get EDID, what about VBT? */
-	if (dev_priv->lfp_lvds_vbt_mode) {
+	if (dev_priv->vbt.lfp_lvds_vbt_mode) {
 		DRM_DEBUG_KMS("using mode from VBT: ");
-		drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+		drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
 
-		fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+		fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
 		if (fixed_mode) {
 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 			goto out;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index a8117e6..5c2d693 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -110,6 +110,10 @@
 	u8 rsvd[102];
 } __attribute__((packed));
 
+/* Driver readiness indicator */
+#define ASLE_ARDY_READY		(1 << 0)
+#define ASLE_ARDY_NOT_READY	(0 << 0)
+
 /* ASLE irq request bits */
 #define ASLE_SET_ALS_ILLUM     (1 << 0)
 #define ASLE_SET_BACKLIGHT     (1 << 1)
@@ -123,6 +127,12 @@
 #define ASLE_PFIT_FAILED	(1<<14)
 #define ASLE_PWM_FREQ_FAILED	(1<<16)
 
+/* Technology enabled indicator */
+#define ASLE_TCHE_ALS_EN	(1 << 0)
+#define ASLE_TCHE_BLC_EN	(1 << 1)
+#define ASLE_TCHE_PFIT_EN	(1 << 2)
+#define ASLE_TCHE_PFMB_EN	(1 << 3)
+
 /* ASLE backlight brightness to set */
 #define ASLE_BCLP_VALID                (1<<31)
 #define ASLE_BCLP_MSK          (~(1<<31))
@@ -152,7 +162,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-	u32 max;
 
 	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
@@ -163,8 +172,7 @@
 	if (bclp > 255)
 		return ASLE_BACKLIGHT_FAILED;
 
-	max = intel_panel_get_max_backlight(dev);
-	intel_panel_set_backlight(dev, bclp * max / 255);
+	intel_panel_set_backlight(dev, bclp, 255);
 	iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
 
 	return 0;
@@ -174,29 +182,22 @@
 {
 	/* alsi is the current ALS reading in lux. 0 indicates below sensor
 	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
-	return 0;
+	DRM_DEBUG_DRIVER("Illum is not supported\n");
+	return ASLE_ALS_ILLUM_FAILED;
 }
 
 static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	if (pfmb & ASLE_PFMB_PWM_VALID) {
-		u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-		u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
-		blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
-		pwm = pwm >> 9;
-		/* FIXME - what do we do with the PWM? */
-	}
-	return 0;
+	DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+	return ASLE_PWM_FREQ_FAILED;
 }
 
 static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
 {
 	/* Panel fitting is currently controlled by the X code, so this is a
 	   noop until modesetting support works fully */
-	if (!(pfit & ASLE_PFIT_VALID))
-		return ASLE_PFIT_FAILED;
-	return 0;
+	DRM_DEBUG_DRIVER("Pfit is not supported\n");
+	return ASLE_PFIT_FAILED;
 }
 
 void intel_opregion_asle_intr(struct drm_device *dev)
@@ -231,64 +232,6 @@
 	iowrite32(asle_stat, &asle->aslc);
 }
 
-void intel_opregion_gse_intr(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-	u32 asle_stat = 0;
-	u32 asle_req;
-
-	if (!asle)
-		return;
-
-	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
-
-	if (!asle_req) {
-		DRM_DEBUG_DRIVER("non asle set request??\n");
-		return;
-	}
-
-	if (asle_req & ASLE_SET_ALS_ILLUM) {
-		DRM_DEBUG_DRIVER("Illum is not supported\n");
-		asle_stat |= ASLE_ALS_ILLUM_FAILED;
-	}
-
-	if (asle_req & ASLE_SET_BACKLIGHT)
-		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
-
-	if (asle_req & ASLE_SET_PFIT) {
-		DRM_DEBUG_DRIVER("Pfit is not supported\n");
-		asle_stat |= ASLE_PFIT_FAILED;
-	}
-
-	if (asle_req & ASLE_SET_PWM_FREQ) {
-		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-		asle_stat |= ASLE_PWM_FREQ_FAILED;
-	}
-
-	iowrite32(asle_stat, &asle->aslc);
-}
-#define ASLE_ALS_EN    (1<<0)
-#define ASLE_BLC_EN    (1<<1)
-#define ASLE_PFIT_EN   (1<<2)
-#define ASLE_PFMB_EN   (1<<3)
-
-void intel_opregion_enable_asle(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-
-	if (asle) {
-		if (IS_MOBILE(dev))
-			intel_enable_asle(dev);
-
-		iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
-			  ASLE_PFMB_EN,
-			  &asle->tche);
-		iowrite32(1, &asle->ardy);
-	}
-}
-
 #define ACPI_EV_DISPLAY_SWITCH (1<<0)
 #define ACPI_EV_LID            (1<<1)
 #define ACPI_EV_DOCK           (1<<2)
@@ -472,8 +415,10 @@
 		register_acpi_notifier(&intel_opregion_notifier);
 	}
 
-	if (opregion->asle)
-		intel_opregion_enable_asle(dev);
+	if (opregion->asle) {
+		iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
+		iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
+	}
 }
 
 void intel_opregion_fini(struct drm_device *dev)
@@ -484,6 +429,9 @@
 	if (!opregion->header)
 		return;
 
+	if (opregion->asle)
+		iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
+
 	if (opregion->acpi) {
 		iowrite32(0, &opregion->acpi->drdy);
 
@@ -546,6 +494,8 @@
 	if (mboxes & MBOX_ASLE) {
 		DRM_DEBUG_DRIVER("ASLE supported\n");
 		opregion->asle = base + OPREGION_ASLE_OFFSET;
+
+		iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 67a2501..836794b 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1485,14 +1485,15 @@
 }
 
 void
-intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
+intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
+				struct intel_overlay_error_state *error)
 {
-	seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
-		   error->dovsta, error->isr);
-	seq_printf(m, "  Register file at 0x%08lx:\n",
-		   error->base);
+	i915_error_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+			  error->dovsta, error->isr);
+	i915_error_printf(m, "  Register file at 0x%08lx:\n",
+			  error->base);
 
-#define P(x) seq_printf(m, "    " #x ":	0x%08x\n", error->regs.x)
+#define P(x) i915_error_printf(m, "    " #x ":	0x%08x\n", error->regs.x)
 	P(OBUF_0Y);
 	P(OBUF_1Y);
 	P(OBUF_0U);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index eb5e6e9..80bea1d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -54,14 +54,16 @@
 
 /* adjusted_mode has been preset to be the panel's fixed mode */
 void
-intel_pch_panel_fitting(struct drm_device *dev,
-			int fitting_mode,
-			const struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode)
+intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
+			struct intel_crtc_config *pipe_config,
+			int fitting_mode)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *mode, *adjusted_mode;
 	int x, y, width, height;
 
+	mode = &pipe_config->requested_mode;
+	adjusted_mode = &pipe_config->adjusted_mode;
+
 	x = y = width = height = 0;
 
 	/* Native modes don't need fitting */
@@ -104,17 +106,209 @@
 		}
 		break;
 
-	default:
 	case DRM_MODE_SCALE_FULLSCREEN:
 		x = y = 0;
 		width = adjusted_mode->hdisplay;
 		height = adjusted_mode->vdisplay;
 		break;
+
+	default:
+		WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+		return;
 	}
 
 done:
-	dev_priv->pch_pf_pos = (x << 16) | y;
-	dev_priv->pch_pf_size = (width << 16) | height;
+	pipe_config->pch_pfit.pos = (x << 16) | y;
+	pipe_config->pch_pfit.size = (width << 16) | height;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *mode,
+		    int width)
+{
+	u32 border, sync_pos, blank_width, sync_width;
+
+	/* keep the hsync and hblank widths constant */
+	sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+	blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+	sync_pos = (blank_width - sync_width + 1) / 2;
+
+	border = (mode->hdisplay - width + 1) / 2;
+	border += border & 1; /* make the border even */
+
+	mode->crtc_hdisplay = width;
+	mode->crtc_hblank_start = width + border;
+	mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+	mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+	mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+		  int height)
+{
+	u32 border, sync_pos, blank_width, sync_width;
+
+	/* keep the vsync and vblank widths constant */
+	sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+	sync_pos = (blank_width - sync_width + 1) / 2;
+
+	border = (mode->vdisplay - height + 1) / 2;
+
+	mode->crtc_vdisplay = height;
+	mode->crtc_vblank_start = height + border;
+	mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+	mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+	mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+	/*
+	 * Floating point operation is not supported. So the FACTOR
+	 * is defined, which can avoid the floating point computation
+	 * when calculating the panel ratio.
+	 */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+	u32 ratio = source * FACTOR / target;
+	return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
+			      struct intel_crtc_config *pipe_config,
+			      int fitting_mode)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+	struct drm_display_mode *mode, *adjusted_mode;
+
+	mode = &pipe_config->requested_mode;
+	adjusted_mode = &pipe_config->adjusted_mode;
+
+	/* Native modes don't need fitting */
+	if (adjusted_mode->hdisplay == mode->hdisplay &&
+	    adjusted_mode->vdisplay == mode->vdisplay)
+		goto out;
+
+	switch (fitting_mode) {
+	case DRM_MODE_SCALE_CENTER:
+		/*
+		 * For centered modes, we have to calculate border widths &
+		 * heights and modify the values programmed into the CRTC.
+		 */
+		centre_horizontally(adjusted_mode, mode->hdisplay);
+		centre_vertically(adjusted_mode, mode->vdisplay);
+		border = LVDS_BORDER_ENABLE;
+		break;
+	case DRM_MODE_SCALE_ASPECT:
+		/* Scale but preserve the aspect ratio */
+		if (INTEL_INFO(dev)->gen >= 4) {
+			u32 scaled_width = adjusted_mode->hdisplay *
+				mode->vdisplay;
+			u32 scaled_height = mode->hdisplay *
+				adjusted_mode->vdisplay;
+
+			/* 965+ is easy, it does everything in hw */
+			if (scaled_width > scaled_height)
+				pfit_control |= PFIT_ENABLE |
+					PFIT_SCALING_PILLAR;
+			else if (scaled_width < scaled_height)
+				pfit_control |= PFIT_ENABLE |
+					PFIT_SCALING_LETTER;
+			else if (adjusted_mode->hdisplay != mode->hdisplay)
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+		} else {
+			u32 scaled_width = adjusted_mode->hdisplay *
+				mode->vdisplay;
+			u32 scaled_height = mode->hdisplay *
+				adjusted_mode->vdisplay;
+			/*
+			 * For earlier chips we have to calculate the scaling
+			 * ratio by hand and program it into the
+			 * PFIT_PGM_RATIO register
+			 */
+			if (scaled_width > scaled_height) { /* pillar */
+				centre_horizontally(adjusted_mode,
+						    scaled_height /
+						    mode->vdisplay);
+
+				border = LVDS_BORDER_ENABLE;
+				if (mode->vdisplay != adjusted_mode->vdisplay) {
+					u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+							    bits << PFIT_VERT_SCALE_SHIFT);
+					pfit_control |= (PFIT_ENABLE |
+							 VERT_INTERP_BILINEAR |
+							 HORIZ_INTERP_BILINEAR);
+				}
+			} else if (scaled_width < scaled_height) { /* letter */
+				centre_vertically(adjusted_mode,
+						  scaled_width /
+						  mode->hdisplay);
+
+				border = LVDS_BORDER_ENABLE;
+				if (mode->hdisplay != adjusted_mode->hdisplay) {
+					u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+							    bits << PFIT_VERT_SCALE_SHIFT);
+					pfit_control |= (PFIT_ENABLE |
+							 VERT_INTERP_BILINEAR |
+							 HORIZ_INTERP_BILINEAR);
+				}
+			} else {
+				/* Aspects match, Let hw scale both directions */
+				pfit_control |= (PFIT_ENABLE |
+						 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_INTERP_BILINEAR);
+			}
+		}
+		break;
+	case DRM_MODE_SCALE_FULLSCREEN:
+		/*
+		 * Full scaling, even if it changes the aspect ratio.
+		 * Fortunately this is all done for us in hw.
+		 */
+		if (mode->vdisplay != adjusted_mode->vdisplay ||
+		    mode->hdisplay != adjusted_mode->hdisplay) {
+			pfit_control |= PFIT_ENABLE;
+			if (INTEL_INFO(dev)->gen >= 4)
+				pfit_control |= PFIT_SCALING_AUTO;
+			else
+				pfit_control |= (VERT_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_AUTO_SCALE |
+						 HORIZ_INTERP_BILINEAR);
+		}
+		break;
+	default:
+		WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+		return;
+	}
+
+	/* 965+ wants fuzzy fitting */
+	/* FIXME: handle multiple panels by failing gracefully */
+	if (INTEL_INFO(dev)->gen >= 4)
+		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+				 PFIT_FILTER_FUZZY);
+
+out:
+	if ((pfit_control & PFIT_ENABLE) == 0) {
+		pfit_control = 0;
+		pfit_pgm_ratios = 0;
+	}
+
+	/* Make sure pre-965 set dither correctly for 18bpp panels. */
+	if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+	pipe_config->gmch_pfit.control = pfit_control;
+	pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+	pipe_config->gmch_pfit.lvds_border_bits = border;
 }
 
 static int is_backlight_combination_mode(struct drm_device *dev)
@@ -130,11 +324,16 @@
 	return 0;
 }
 
+/* XXX: query mode clock or hardware clock and program max PWM appropriately
+ * when it's 0.
+ */
 static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
 
+	WARN_ON_SMP(!spin_is_locked(&dev_priv->backlight.lock));
+
 	/* Restore the CTL value if it lost, e.g. GPU reset */
 
 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
@@ -164,7 +363,7 @@
 	return val;
 }
 
-static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 intel_panel_get_max_backlight(struct drm_device *dev)
 {
 	u32 max;
 
@@ -182,23 +381,8 @@
 			max *= 0xff;
 	}
 
-	return max;
-}
-
-u32 intel_panel_get_max_backlight(struct drm_device *dev)
-{
-	u32 max;
-
-	max = _intel_panel_get_max_backlight(dev);
-	if (max == 0) {
-		/* XXX add code here to query mode clock or hardware clock
-		 * and program max PWM appropriately.
-		 */
-		pr_warn_once("fixme: max PWM is zero\n");
-		return 1;
-	}
-
 	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+
 	return max;
 }
 
@@ -217,8 +401,11 @@
 		return val;
 
 	if (i915_panel_invert_brightness > 0 ||
-	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
-		return intel_panel_get_max_backlight(dev) - val;
+	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+		u32 max = intel_panel_get_max_backlight(dev);
+		if (max)
+			return max - val;
+	}
 
 	return val;
 }
@@ -227,6 +414,9 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
@@ -244,6 +434,9 @@
 	}
 
 	val = intel_panel_compute_brightness(dev, val);
+
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
 	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
 	return val;
 }
@@ -270,6 +463,10 @@
 		u32 max = intel_panel_get_max_backlight(dev);
 		u8 lbpc;
 
+		/* we're screwed, but keep behaviour backwards compatible */
+		if (!max)
+			max = 1;
+
 		lbpc = level * 0xfe / max + 1;
 		level /= lbpc;
 		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
@@ -282,9 +479,23 @@
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
-void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+/* set backlight brightness to level in range [0..max] */
+void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+
+	freq = intel_panel_get_max_backlight(dev);
+	if (!freq) {
+		/* we are screwed, bail out */
+		goto out;
+	}
+
+	/* scale to hardware */
+	level = level * freq / max;
 
 	dev_priv->backlight.level = level;
 	if (dev_priv->backlight.device)
@@ -292,11 +503,16 @@
 
 	if (dev_priv->backlight.enabled)
 		intel_panel_actually_set_backlight(dev, level);
+out:
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
 void intel_panel_disable_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	dev_priv->backlight.enabled = false;
 	intel_panel_actually_set_backlight(dev, 0);
@@ -314,12 +530,19 @@
 			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
 		}
 	}
+
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
 void intel_panel_enable_backlight(struct drm_device *dev,
 				  enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder =
+		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	if (dev_priv->backlight.level == 0) {
 		dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
@@ -347,7 +570,10 @@
 		else
 			tmp &= ~BLM_PIPE_SELECT;
 
-		tmp |= BLM_PIPE(pipe);
+		if (cpu_transcoder == TRANSCODER_EDP)
+			tmp |= BLM_TRANSCODER_EDP;
+		else
+			tmp |= BLM_PIPE(cpu_transcoder);
 		tmp &= ~BLM_PWM_ENABLE;
 
 		I915_WRITE(reg, tmp);
@@ -369,6 +595,8 @@
 	 */
 	dev_priv->backlight.enabled = true;
 	intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
+
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
 static void intel_panel_init_backlight(struct drm_device *dev)
@@ -405,7 +633,8 @@
 static int intel_panel_update_status(struct backlight_device *bd)
 {
 	struct drm_device *dev = bl_get_data(bd);
-	intel_panel_set_backlight(dev, bd->props.brightness);
+	intel_panel_set_backlight(dev, bd->props.brightness,
+				  bd->props.max_brightness);
 	return 0;
 }
 
@@ -425,6 +654,7 @@
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct backlight_properties props;
+	unsigned long flags;
 
 	intel_panel_init_backlight(dev);
 
@@ -434,7 +664,11 @@
 	memset(&props, 0, sizeof(props));
 	props.type = BACKLIGHT_RAW;
 	props.brightness = dev_priv->backlight.level;
-	props.max_brightness = _intel_panel_get_max_backlight(dev);
+
+	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
+	props.max_brightness = intel_panel_get_max_backlight(dev);
+	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
+
 	if (props.max_brightness == 0) {
 		DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
 		return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa01128..49a1887 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -113,8 +113,8 @@
 	fbc_ctl |= obj->fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
-	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
-		      cfb_pitch, crtc->y, intel_crtc->plane);
+	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
+		      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
 }
 
 static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -148,7 +148,7 @@
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
 }
 
 static void g4x_disable_fbc(struct drm_device *dev)
@@ -228,7 +228,7 @@
 		sandybridge_blit_fbc_update(dev);
 	}
 
-	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
 }
 
 static void ironlake_disable_fbc(struct drm_device *dev)
@@ -242,6 +242,18 @@
 		dpfc_ctl &= ~DPFC_CTL_EN;
 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
+		if (IS_IVYBRIDGE(dev))
+			/* WaFbcDisableDpfcClockGating:ivb */
+			I915_WRITE(ILK_DSPCLK_GATE_D,
+				   I915_READ(ILK_DSPCLK_GATE_D) &
+				   ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
+
+		if (IS_HASWELL(dev))
+			/* WaFbcDisableDpfcClockGating:hsw */
+			I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
+				   I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
+				   ~HSW_DPFC_GATING_DISABLE);
+
 		DRM_DEBUG_KMS("disabled FBC\n");
 	}
 }
@@ -253,6 +265,47 @@
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+
+	I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
+		   IVB_DPFC_CTL_FENCE_EN |
+		   intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
+
+	if (IS_IVYBRIDGE(dev)) {
+		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
+		I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
+		/* WaFbcDisableDpfcClockGating:ivb */
+		I915_WRITE(ILK_DSPCLK_GATE_D,
+			   I915_READ(ILK_DSPCLK_GATE_D) |
+			   ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
+	} else {
+		/* WaFbcAsynchFlipDisableFbcQueue:hsw */
+		I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
+			   HSW_BYPASS_FBC_QUEUE);
+		/* WaFbcDisableDpfcClockGating:hsw */
+		I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
+			   I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
+			   HSW_DPFC_GATING_DISABLE);
+	}
+
+	I915_WRITE(SNB_DPFC_CTL_SA,
+		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+
+	sandybridge_blit_fbc_update(dev);
+
+	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
 bool intel_fbc_enabled(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -439,7 +492,7 @@
 	if (enable_fbc < 0) {
 		DRM_DEBUG_KMS("fbc set to per-chip default\n");
 		enable_fbc = 1;
-		if (INTEL_INFO(dev)->gen <= 6)
+		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
 			enable_fbc = 0;
 	}
 	if (!enable_fbc) {
@@ -460,7 +513,8 @@
 		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
 		goto out_disable;
 	}
-	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+	if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
+	    intel_crtc->plane != 0) {
 		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
 		goto out_disable;
@@ -481,8 +535,6 @@
 		goto out_disable;
 
 	if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
-		DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
-		DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
 		DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
 		goto out_disable;
@@ -1633,6 +1685,10 @@
 		I915_WRITE(DISP_ARB_CTL,
 			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
 		return false;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		/* enable FBC WM (except on ILK, where it must remain off) */
+		I915_WRITE(DISP_ARB_CTL,
+			   I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
 	}
 
 	if (display_wm > display->max_wm) {
@@ -2016,31 +2072,561 @@
 		   cursor_wm);
 }
 
-static void
-haswell_update_linetime_wm(struct drm_device *dev, int pipe,
-				 struct drm_display_mode *mode)
+static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
+				      struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t pixel_rate, pfit_size;
+
+	if (intel_crtc->config.pixel_target_clock)
+		pixel_rate = intel_crtc->config.pixel_target_clock;
+	else
+		pixel_rate = intel_crtc->config.adjusted_mode.clock;
+
+	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
+	 * adjust the pixel_rate here. */
+
+	pfit_size = intel_crtc->config.pch_pfit.size;
+	if (pfit_size) {
+		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+
+		pipe_w = intel_crtc->config.requested_mode.hdisplay;
+		pipe_h = intel_crtc->config.requested_mode.vdisplay;
+		pfit_w = (pfit_size >> 16) & 0xFFFF;
+		pfit_h = pfit_size & 0xFFFF;
+		if (pipe_w < pfit_w)
+			pipe_w = pfit_w;
+		if (pipe_h < pfit_h)
+			pipe_h = pfit_h;
+
+		pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+				     pfit_w * pfit_h);
+	}
+
+	return pixel_rate;
+}
+
+static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+			       uint32_t latency)
+{
+	uint64_t ret;
+
+	ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+	ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
+
+	return ret;
+}
+
+static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+			       uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+			       uint32_t latency)
+{
+	uint32_t ret;
+
+	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = DIV_ROUND_UP(ret, 64) + 2;
+	return ret;
+}
+
+static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+			   uint8_t bytes_per_pixel)
+{
+	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+}
+
+struct hsw_pipe_wm_parameters {
+	bool active;
+	bool sprite_enabled;
+	uint8_t pri_bytes_per_pixel;
+	uint8_t spr_bytes_per_pixel;
+	uint8_t cur_bytes_per_pixel;
+	uint32_t pri_horiz_pixels;
+	uint32_t spr_horiz_pixels;
+	uint32_t cur_horiz_pixels;
+	uint32_t pipe_htotal;
+	uint32_t pixel_rate;
+};
+
+struct hsw_wm_maximums {
+	uint16_t pri;
+	uint16_t spr;
+	uint16_t cur;
+	uint16_t fbc;
+};
+
+struct hsw_lp_wm_result {
+	bool enable;
+	bool fbc_enable;
+	uint32_t pri_val;
+	uint32_t spr_val;
+	uint32_t cur_val;
+	uint32_t fbc_val;
+};
+
+struct hsw_wm_values {
+	uint32_t wm_pipe[3];
+	uint32_t wm_lp[3];
+	uint32_t wm_lp_spr[3];
+	uint32_t wm_linetime[3];
+	bool enable_fbc_wm;
+};
+
+enum hsw_data_buf_partitioning {
+	HSW_DATA_BUF_PART_1_2,
+	HSW_DATA_BUF_PART_5_6,
+};
+
+/* For both WM_PIPE and WM_LP. */
+static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+				   uint32_t mem_value,
+				   bool is_lp)
+{
+	uint32_t method1, method2;
+
+	/* TODO: for now, assume the primary plane is always enabled. */
+	if (!params->active)
+		return 0;
+
+	method1 = hsw_wm_method1(params->pixel_rate,
+				 params->pri_bytes_per_pixel,
+				 mem_value);
+
+	if (!is_lp)
+		return method1;
+
+	method2 = hsw_wm_method2(params->pixel_rate,
+				 params->pipe_htotal,
+				 params->pri_horiz_pixels,
+				 params->pri_bytes_per_pixel,
+				 mem_value);
+
+	return min(method1, method2);
+}
+
+/* For both WM_PIPE and WM_LP. */
+static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+				   uint32_t mem_value)
+{
+	uint32_t method1, method2;
+
+	if (!params->active || !params->sprite_enabled)
+		return 0;
+
+	method1 = hsw_wm_method1(params->pixel_rate,
+				 params->spr_bytes_per_pixel,
+				 mem_value);
+	method2 = hsw_wm_method2(params->pixel_rate,
+				 params->pipe_htotal,
+				 params->spr_horiz_pixels,
+				 params->spr_bytes_per_pixel,
+				 mem_value);
+	return min(method1, method2);
+}
+
+/* For both WM_PIPE and WM_LP. */
+static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+				   uint32_t mem_value)
+{
+	if (!params->active)
+		return 0;
+
+	return hsw_wm_method2(params->pixel_rate,
+			      params->pipe_htotal,
+			      params->cur_horiz_pixels,
+			      params->cur_bytes_per_pixel,
+			      mem_value);
+}
+
+/* Only for WM_LP. */
+static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+				   uint32_t pri_val,
+				   uint32_t mem_value)
+{
+	if (!params->active)
+		return 0;
+
+	return hsw_wm_fbc(pri_val,
+			  params->pri_horiz_pixels,
+			  params->pri_bytes_per_pixel);
+}
+
+static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
+			      struct hsw_pipe_wm_parameters *params,
+			      struct hsw_lp_wm_result *result)
+{
+	enum pipe pipe;
+	uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
+
+	for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
+		struct hsw_pipe_wm_parameters *p = &params[pipe];
+
+		pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
+		spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
+		cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
+		fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
+	}
+
+	result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
+	result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
+	result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
+	result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
+
+	if (result->fbc_val > max->fbc) {
+		result->fbc_enable = false;
+		result->fbc_val = 0;
+	} else {
+		result->fbc_enable = true;
+	}
+
+	result->enable = result->pri_val <= max->pri &&
+			 result->spr_val <= max->spr &&
+			 result->cur_val <= max->cur;
+	return result->enable;
+}
+
+static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
+				    uint32_t mem_value, enum pipe pipe,
+				    struct hsw_pipe_wm_parameters *params)
+{
+	uint32_t pri_val, cur_val, spr_val;
+
+	pri_val = hsw_compute_pri_wm(params, mem_value, false);
+	spr_val = hsw_compute_spr_wm(params, mem_value);
+	cur_val = hsw_compute_cur_wm(params, mem_value);
+
+	WARN(pri_val > 127,
+	     "Primary WM error, mode not supported for pipe %c\n",
+	     pipe_name(pipe));
+	WARN(spr_val > 127,
+	     "Sprite WM error, mode not supported for pipe %c\n",
+	     pipe_name(pipe));
+	WARN(cur_val > 63,
+	     "Cursor WM error, mode not supported for pipe %c\n",
+	     pipe_name(pipe));
+
+	return (pri_val << WM0_PIPE_PLANE_SHIFT) |
+	       (spr_val << WM0_PIPE_SPRITE_SHIFT) |
+	       cur_val;
+}
+
+static uint32_t
+hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 temp;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+	u32 linetime, ips_linetime;
 
-	temp = I915_READ(PIPE_WM_LINETIME(pipe));
-	temp &= ~PIPE_WM_LINETIME_MASK;
+	if (!intel_crtc_active(crtc))
+		return 0;
 
 	/* The WM are computed with base on how long it takes to fill a single
 	 * row at the given clock rate, multiplied by 8.
 	 * */
-	temp |= PIPE_WM_LINETIME_TIME(
-		((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+	linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
+	ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
+					 intel_ddi_get_cdclk_freq(dev_priv));
 
-	/* IPS watermarks are only used by pipe A, and are ignored by
-	 * pipes B and C.  They are calculated similarly to the common
-	 * linetime values, except that we are using CD clock frequency
-	 * in MHz instead of pixel rate for the division.
-	 *
-	 * This is a placeholder for the IPS watermark calculation code.
-	 */
+	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
+	       PIPE_WM_LINETIME_TIME(linetime);
+}
 
-	I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+static void hsw_compute_wm_parameters(struct drm_device *dev,
+				      struct hsw_pipe_wm_parameters *params,
+				      uint32_t *wm,
+				      struct hsw_wm_maximums *lp_max_1_2,
+				      struct hsw_wm_maximums *lp_max_5_6)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	uint64_t sskpd = I915_READ64(MCH_SSKPD);
+	enum pipe pipe;
+	int pipes_active = 0, sprites_enabled = 0;
+
+	if ((sskpd >> 56) & 0xFF)
+		wm[0] = (sskpd >> 56) & 0xFF;
+	else
+		wm[0] = sskpd & 0xF;
+	wm[1] = ((sskpd >> 4) & 0xFF) * 5;
+	wm[2] = ((sskpd >> 12) & 0xFF) * 5;
+	wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
+	wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct hsw_pipe_wm_parameters *p;
+
+		pipe = intel_crtc->pipe;
+		p = &params[pipe];
+
+		p->active = intel_crtc_active(crtc);
+		if (!p->active)
+			continue;
+
+		pipes_active++;
+
+		p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
+		p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
+		p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
+		p->cur_bytes_per_pixel = 4;
+		p->pri_horiz_pixels =
+			intel_crtc->config.requested_mode.hdisplay;
+		p->cur_horiz_pixels = 64;
+	}
+
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+		struct hsw_pipe_wm_parameters *p;
+
+		pipe = intel_plane->pipe;
+		p = &params[pipe];
+
+		p->sprite_enabled = intel_plane->wm.enable;
+		p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
+		p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
+
+		if (p->sprite_enabled)
+			sprites_enabled++;
+	}
+
+	if (pipes_active > 1) {
+		lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
+		lp_max_1_2->spr = lp_max_5_6->spr = 128;
+		lp_max_1_2->cur = lp_max_5_6->cur = 64;
+	} else {
+		lp_max_1_2->pri = sprites_enabled ? 384 : 768;
+		lp_max_5_6->pri = sprites_enabled ? 128 : 768;
+		lp_max_1_2->spr = 384;
+		lp_max_5_6->spr = 640;
+		lp_max_1_2->cur = lp_max_5_6->cur = 255;
+	}
+	lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
+}
+
+static void hsw_compute_wm_results(struct drm_device *dev,
+				   struct hsw_pipe_wm_parameters *params,
+				   uint32_t *wm,
+				   struct hsw_wm_maximums *lp_maximums,
+				   struct hsw_wm_values *results)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct hsw_lp_wm_result lp_results[4] = {};
+	enum pipe pipe;
+	int level, max_level, wm_lp;
+
+	for (level = 1; level <= 4; level++)
+		if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
+				       &lp_results[level - 1]))
+			break;
+	max_level = level - 1;
+
+	/* The spec says it is preferred to disable FBC WMs instead of disabling
+	 * a WM level. */
+	results->enable_fbc_wm = true;
+	for (level = 1; level <= max_level; level++) {
+		if (!lp_results[level - 1].fbc_enable) {
+			results->enable_fbc_wm = false;
+			break;
+		}
+	}
+
+	memset(results, 0, sizeof(*results));
+	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+		const struct hsw_lp_wm_result *r;
+
+		level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
+		if (level > max_level)
+			break;
+
+		r = &lp_results[level - 1];
+		results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
+							  r->fbc_val,
+							  r->pri_val,
+							  r->cur_val);
+		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
+	}
+
+	for_each_pipe(pipe)
+		results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
+							     pipe,
+							     &params[pipe]);
+
+	for_each_pipe(pipe) {
+		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+		results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+	}
+}
+
+/* Find the result with the highest level enabled. Check for enable_fbc_wm in
+ * case both are at the same level. Prefer r1 in case they're the same. */
+struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+					   struct hsw_wm_values *r2)
+{
+	int i, val_r1 = 0, val_r2 = 0;
+
+	for (i = 0; i < 3; i++) {
+		if (r1->wm_lp[i] & WM3_LP_EN)
+			val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
+		if (r2->wm_lp[i] & WM3_LP_EN)
+			val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+	}
+
+	if (val_r1 == val_r2) {
+		if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+			return r2;
+		else
+			return r1;
+	} else if (val_r1 > val_r2) {
+		return r1;
+	} else {
+		return r2;
+	}
+}
+
+/*
+ * The spec says we shouldn't write when we don't need, because every write
+ * causes WMs to be re-evaluated, expending some power.
+ */
+static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
+				struct hsw_wm_values *results,
+				enum hsw_data_buf_partitioning partitioning)
+{
+	struct hsw_wm_values previous;
+	uint32_t val;
+	enum hsw_data_buf_partitioning prev_partitioning;
+	bool prev_enable_fbc_wm;
+
+	previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
+	previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
+	previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
+	previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
+	previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
+	previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
+	previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+	previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+	previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+	previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
+	previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
+	previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
+
+	prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+			    HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
+
+	prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+
+	if (memcmp(results->wm_pipe, previous.wm_pipe,
+		   sizeof(results->wm_pipe)) == 0 &&
+	    memcmp(results->wm_lp, previous.wm_lp,
+		   sizeof(results->wm_lp)) == 0 &&
+	    memcmp(results->wm_lp_spr, previous.wm_lp_spr,
+		   sizeof(results->wm_lp_spr)) == 0 &&
+	    memcmp(results->wm_linetime, previous.wm_linetime,
+		   sizeof(results->wm_linetime)) == 0 &&
+	    partitioning == prev_partitioning &&
+	    results->enable_fbc_wm == prev_enable_fbc_wm)
+		return;
+
+	if (previous.wm_lp[2] != 0)
+		I915_WRITE(WM3_LP_ILK, 0);
+	if (previous.wm_lp[1] != 0)
+		I915_WRITE(WM2_LP_ILK, 0);
+	if (previous.wm_lp[0] != 0)
+		I915_WRITE(WM1_LP_ILK, 0);
+
+	if (previous.wm_pipe[0] != results->wm_pipe[0])
+		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
+	if (previous.wm_pipe[1] != results->wm_pipe[1])
+		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
+	if (previous.wm_pipe[2] != results->wm_pipe[2])
+		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
+
+	if (previous.wm_linetime[0] != results->wm_linetime[0])
+		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
+	if (previous.wm_linetime[1] != results->wm_linetime[1])
+		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
+	if (previous.wm_linetime[2] != results->wm_linetime[2])
+		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
+
+	if (prev_partitioning != partitioning) {
+		val = I915_READ(WM_MISC);
+		if (partitioning == HSW_DATA_BUF_PART_1_2)
+			val &= ~WM_MISC_DATA_PARTITION_5_6;
+		else
+			val |= WM_MISC_DATA_PARTITION_5_6;
+		I915_WRITE(WM_MISC, val);
+	}
+
+	if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+		val = I915_READ(DISP_ARB_CTL);
+		if (results->enable_fbc_wm)
+			val &= ~DISP_FBC_WM_DIS;
+		else
+			val |= DISP_FBC_WM_DIS;
+		I915_WRITE(DISP_ARB_CTL, val);
+	}
+
+	if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
+	if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+		I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
+	if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+		I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
+
+	if (results->wm_lp[0] != 0)
+		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
+	if (results->wm_lp[1] != 0)
+		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
+	if (results->wm_lp[2] != 0)
+		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+}
+
+static void haswell_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
+	struct hsw_pipe_wm_parameters params[3];
+	struct hsw_wm_values results_1_2, results_5_6, *best_results;
+	uint32_t wm[5];
+	enum hsw_data_buf_partitioning partitioning;
+
+	hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
+
+	hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
+	if (lp_max_1_2.pri != lp_max_5_6.pri) {
+		hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
+				       &results_5_6);
+		best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+	} else {
+		best_results = &results_1_2;
+	}
+
+	partitioning = (best_results == &results_1_2) ?
+		       HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
+
+	hsw_write_wm_values(dev_priv, best_results, partitioning);
+}
+
+static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
+				     uint32_t sprite_width, int pixel_size,
+				     bool enable)
+{
+	struct drm_plane *plane;
+
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		struct intel_plane *intel_plane = to_intel_plane(plane);
+
+		if (intel_plane->pipe == pipe) {
+			intel_plane->wm.enable = enable;
+			intel_plane->wm.horiz_pixels = sprite_width + 1;
+			intel_plane->wm.bytes_per_pixel = pixel_size;
+			break;
+		}
+	}
+
+	haswell_update_wm(dev);
 }
 
 static bool
@@ -2120,7 +2706,8 @@
 }
 
 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
-					 uint32_t sprite_width, int pixel_size)
+					 uint32_t sprite_width, int pixel_size,
+					 bool enable)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
@@ -2128,6 +2715,9 @@
 	int sprite_wm, reg;
 	int ret;
 
+	if (!enable)
+		return;
+
 	switch (pipe) {
 	case 0:
 		reg = WM0_PIPEA_ILK;
@@ -2146,15 +2736,15 @@
 					    &sandybridge_display_wm_info,
 					    latency, &sprite_wm);
 	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
-			      pipe);
+		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
+			      pipe_name(pipe));
 		return;
 	}
 
 	val = I915_READ(reg);
 	val &= ~WM0_PIPE_SPRITE_MASK;
 	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
-	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+	DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
 
 
 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
@@ -2163,8 +2753,8 @@
 					      SNB_READ_WM1_LATENCY() * 500,
 					      &sprite_wm);
 	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
-			      pipe);
+		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
+			      pipe_name(pipe));
 		return;
 	}
 	I915_WRITE(WM1S_LP_ILK, sprite_wm);
@@ -2179,8 +2769,8 @@
 					      SNB_READ_WM2_LATENCY() * 500,
 					      &sprite_wm);
 	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
-			      pipe);
+		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
+			      pipe_name(pipe));
 		return;
 	}
 	I915_WRITE(WM2S_LP_IVB, sprite_wm);
@@ -2191,8 +2781,8 @@
 					      SNB_READ_WM3_LATENCY() * 500,
 					      &sprite_wm);
 	if (!ret) {
-		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
-			      pipe);
+		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
+			      pipe_name(pipe));
 		return;
 	}
 	I915_WRITE(WM3S_LP_IVB, sprite_wm);
@@ -2238,23 +2828,15 @@
 		dev_priv->display.update_wm(dev);
 }
 
-void intel_update_linetime_watermarks(struct drm_device *dev,
-		int pipe, struct drm_display_mode *mode)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->display.update_linetime_wm)
-		dev_priv->display.update_linetime_wm(dev, pipe, mode);
-}
-
 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
-				    uint32_t sprite_width, int pixel_size)
+				    uint32_t sprite_width, int pixel_size,
+				    bool enable)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (dev_priv->display.update_sprite_wm)
 		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
-						   pixel_size);
+						   pixel_size, enable);
 }
 
 static struct drm_i915_gem_object *
@@ -2481,6 +3063,52 @@
 	trace_intel_gpu_freq_change(val * 50);
 }
 
+void valleyview_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long timeout = jiffies + msecs_to_jiffies(10);
+	u32 limits = gen6_rps_limits(dev_priv, &val);
+	u32 pval;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+	WARN_ON(val > dev_priv->rps.max_delay);
+	WARN_ON(val < dev_priv->rps.min_delay);
+
+	DRM_DEBUG_DRIVER("gpu freq request from %d to %d\n",
+			 vlv_gpu_freq(dev_priv->mem_freq,
+				      dev_priv->rps.cur_delay),
+			 vlv_gpu_freq(dev_priv->mem_freq, val));
+
+	if (val == dev_priv->rps.cur_delay)
+		return;
+
+	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+
+	do {
+		pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+		if (time_after(jiffies, timeout)) {
+			DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
+			break;
+		}
+		udelay(10);
+	} while (pval & 1);
+
+	pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	if ((pval >> 8) != val)
+		DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n",
+			  val, pval >> 8);
+
+	/* Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+	dev_priv->rps.cur_delay = pval >> 8;
+
+	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
+}
+
+
 static void gen6_disable_rps(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2488,6 +3116,25 @@
 	I915_WRITE(GEN6_RC_CONTROL, 0);
 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+	I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
+	/* Complete PM interrupt masking here doesn't race with the rps work
+	 * item again unmasking PM interrupts because that is using a different
+	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+	spin_lock_irq(&dev_priv->rps.lock);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->rps.lock);
+
+	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+}
+
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
 	I915_WRITE(GEN6_PMIER, 0);
 	/* Complete PM interrupt masking here doesn't race with the rps work
 	 * item again unmasking PM interrupts because that is using a different
@@ -2499,6 +3146,11 @@
 	spin_unlock_irq(&dev_priv->rps.lock);
 
 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+
+	if (dev_priv->vlv_pctx) {
+		drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
+		dev_priv->vlv_pctx = NULL;
+	}
 }
 
 int intel_enable_rc6(const struct drm_device *dev)
@@ -2655,12 +3307,15 @@
 	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
 	/* requires MSI enabled */
-	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+	I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
 	spin_lock_irq(&dev_priv->rps.lock);
-	WARN_ON(dev_priv->rps.pm_iir != 0);
-	I915_WRITE(GEN6_PMIMR, 0);
+	/* FIXME: Our interrupt enabling sequence is bonghits.
+	 * dev_priv->rps.pm_iir really should be 0 here. */
+	dev_priv->rps.pm_iir = 0;
+	I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+	I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 	spin_unlock_irq(&dev_priv->rps.lock);
-	/* enable all PM interrupts */
+	/* unmask all PM interrupts */
 	I915_WRITE(GEN6_PMINTRMSK, 0);
 
 	rc6vids = 0;
@@ -2742,6 +3397,198 @@
 	}
 }
 
+int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rp0;
+
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+	/* Clamp to max */
+	rp0 = min_t(u32, rp0, 0xea);
+
+	return rp0;
+}
+
+static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+	u32 val, rpe;
+
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+	return rpe;
+}
+
+int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
+}
+
+static void vlv_rps_timer_work(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+						    rps.vlv_work.work);
+
+	/*
+	 * Timer fired, we must be idle.  Drop to min voltage state.
+	 * Note: we use RPe here since it should match the
+	 * Vmin we were shooting for.  That should give us better
+	 * perf when we come back out of RC6 than if we used the
+	 * min freq available.
+	 */
+	mutex_lock(&dev_priv->rps.hw_lock);
+	valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+static void valleyview_setup_pctx(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *pctx;
+	unsigned long pctx_paddr;
+	u32 pcbr;
+	int pctx_size = 24*1024;
+
+	pcbr = I915_READ(VLV_PCBR);
+	if (pcbr) {
+		/* BIOS set it up already, grab the pre-alloc'd space */
+		int pcbr_offset;
+
+		pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+		pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+								      pcbr_offset,
+								      -1,
+								      pctx_size);
+		goto out;
+	}
+
+	/*
+	 * From the Gunit register HAS:
+	 * The Gfx driver is expected to program this register and ensure
+	 * proper allocation within Gfx stolen memory.  For example, this
+	 * register should be programmed such than the PCBR range does not
+	 * overlap with other ranges, such as the frame buffer, protected
+	 * memory, or any other relevant ranges.
+	 */
+	pctx = i915_gem_object_create_stolen(dev, pctx_size);
+	if (!pctx) {
+		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+		return;
+	}
+
+	pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+	I915_WRITE(VLV_PCBR, pctx_paddr);
+
+out:
+	dev_priv->vlv_pctx = pctx;
+}
+
+static void valleyview_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	u32 gtfifodbg, val, rpe;
+	int i;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	valleyview_setup_pctx(dev);
+
+	gen6_gt_force_wake_get(dev_priv);
+
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   GEN6_RP_DOWN_IDLE_CONT);
+
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
+
+	/* allows RC6 residency counter to work */
+	I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
+	I915_WRITE(GEN6_RC_CONTROL,
+		   GEN7_RC_CTL_TO_MODE);
+
+	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+	switch ((val >> 6) & 3) {
+	case 0:
+	case 1:
+		dev_priv->mem_freq = 800;
+		break;
+	case 2:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 3:
+		dev_priv->mem_freq = 1333;
+		break;
+	}
+	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
+
+	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+	DRM_DEBUG_DRIVER("current GPU freq: %d\n",
+			 vlv_gpu_freq(dev_priv->mem_freq, (val >> 8) & 0xff));
+	dev_priv->rps.cur_delay = (val >> 8) & 0xff;
+
+	dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
+	dev_priv->rps.hw_max = dev_priv->rps.max_delay;
+	DRM_DEBUG_DRIVER("max GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq,
+						     dev_priv->rps.max_delay));
+
+	rpe = valleyview_rps_rpe_freq(dev_priv);
+	DRM_DEBUG_DRIVER("RPe GPU freq: %d\n",
+			 vlv_gpu_freq(dev_priv->mem_freq, rpe));
+	dev_priv->rps.rpe_delay = rpe;
+
+	val = valleyview_rps_min_freq(dev_priv);
+	DRM_DEBUG_DRIVER("min GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq,
+							    val));
+	dev_priv->rps.min_delay = val;
+
+	DRM_DEBUG_DRIVER("setting GPU freq to %d\n",
+			 vlv_gpu_freq(dev_priv->mem_freq, rpe));
+
+	INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
+
+	valleyview_set_rps(dev_priv->dev, rpe);
+
+	/* requires MSI enabled */
+	I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
+	I915_WRITE(GEN6_PMIMR, 0);
+	spin_unlock_irq(&dev_priv->rps.lock);
+	/* enable all PM interrupts */
+	I915_WRITE(GEN6_PMINTRMSK, 0);
+
+	gen6_gt_force_wake_put(dev_priv);
+}
+
 void ironlake_teardown_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3465,13 +4312,22 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	/* Interrupts should be disabled already to avoid re-arming. */
+	WARN_ON(dev->irq_enabled);
+
 	if (IS_IRONLAKE_M(dev)) {
 		ironlake_disable_drps(dev);
 		ironlake_disable_rc6(dev);
-	} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+	} else if (INTEL_INFO(dev)->gen >= 6) {
 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+		cancel_work_sync(&dev_priv->rps.work);
+		if (IS_VALLEYVIEW(dev))
+			cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
 		mutex_lock(&dev_priv->rps.hw_lock);
-		gen6_disable_rps(dev);
+		if (IS_VALLEYVIEW(dev))
+			valleyview_disable_rps(dev);
+		else
+			gen6_disable_rps(dev);
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	}
 }
@@ -3484,8 +4340,13 @@
 	struct drm_device *dev = dev_priv->dev;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
-	gen6_enable_rps(dev);
-	gen6_update_ring_freq(dev);
+
+	if (IS_VALLEYVIEW(dev)) {
+		valleyview_enable_rps(dev);
+	} else {
+		gen6_enable_rps(dev);
+		gen6_update_ring_freq(dev);
+	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
@@ -3497,7 +4358,7 @@
 		ironlake_enable_drps(dev);
 		ironlake_enable_rc6(dev);
 		intel_init_emon(dev);
-	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
 		/*
 		 * PCU communication is slow and this doesn't need to be
 		 * done at any specific time, so do this out of our fast path
@@ -3579,7 +4440,7 @@
 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
 		   _3D_CHICKEN2_WM_READ_PIPELINED);
 
-	/* WaDisableRenderCachePipelinedFlush */
+	/* WaDisableRenderCachePipelinedFlush:ilk */
 	I915_WRITE(CACHE_MODE_0,
 		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
@@ -3607,7 +4468,7 @@
 		val = I915_READ(TRANS_CHICKEN2(pipe));
 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
 		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
-		if (dev_priv->fdi_rx_polarity_inverted)
+		if (dev_priv->vbt.fdi_rx_polarity_inverted)
 			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
@@ -3646,11 +4507,11 @@
 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
 		   ILK_ELPIN_409_SELECT);
 
-	/* WaDisableHiZPlanesWhenMSAAEnabled */
+	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
 	I915_WRITE(_3D_CHICKEN,
 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 
-	/* WaSetupGtModeTdRowDispatch */
+	/* WaSetupGtModeTdRowDispatch:snb */
 	if (IS_SNB_GT1(dev))
 		I915_WRITE(GEN6_GT_MODE,
 			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
@@ -3677,8 +4538,8 @@
 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
 	 * but we didn't debug actual testcases to find it out.
 	 *
-	 * Also apply WaDisableVDSUnitClockGating and
-	 * WaDisableRCPBUnitClockGating.
+	 * Also apply WaDisableVDSUnitClockGating:snb and
+	 * WaDisableRCPBUnitClockGating:snb.
 	 */
 	I915_WRITE(GEN6_UCGCTL2,
 		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
@@ -3709,7 +4570,7 @@
 		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
-	/* WaMbcDriverBootEnable */
+	/* WaMbcDriverBootEnable:snb */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3739,7 +4600,6 @@
 	reg |= GEN7_FF_VS_SCHED_HW;
 	reg |= GEN7_FF_DS_SCHED_HW;
 
-	/* WaVSRefCountFullforceMissDisable */
 	if (IS_HASWELL(dev_priv->dev))
 		reg &= ~GEN7_FF_VS_REF_CNT_FFME;
 
@@ -3758,6 +4618,23 @@
 		I915_WRITE(SOUTH_DSPCLK_GATE_D,
 			   I915_READ(SOUTH_DSPCLK_GATE_D) |
 			   PCH_LP_PARTITION_LEVEL_DISABLE);
+
+	/* WADPOClockGatingDisable:hsw */
+	I915_WRITE(_TRANSA_CHICKEN1,
+		   I915_READ(_TRANSA_CHICKEN1) |
+		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+}
+
+static void lpt_suspend_hw(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+		uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+
+		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+	}
 }
 
 static void haswell_init_clock_gating(struct drm_device *dev)
@@ -3770,21 +4647,21 @@
 	I915_WRITE(WM1_LP_ILK, 0);
 
 	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-	 * This implements the WaDisableRCZUnitClockGating workaround.
+	 * This implements the WaDisableRCZUnitClockGating:hsw workaround.
 	 */
 	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
-	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+	/* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
-	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+	/* WaApplyL3ControlAndL3ChickenMode:hsw */
 	I915_WRITE(GEN7_L3CNTLREG1,
 			GEN7_WA_FOR_GEN7_L3_CONTROL);
 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
 			GEN7_WA_L3_CHICKEN_MODE);
 
-	/* This is required by WaCatErrorRejectionIssue */
+	/* This is required by WaCatErrorRejectionIssue:hsw */
 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
@@ -3796,27 +4673,23 @@
 		intel_flush_display_plane(dev_priv, pipe);
 	}
 
+	/* WaVSRefCountFullforceMissDisable:hsw */
 	gen7_setup_fixed_func_scheduler(dev_priv);
 
-	/* WaDisable4x2SubspanOptimization */
+	/* WaDisable4x2SubspanOptimization:hsw */
 	I915_WRITE(CACHE_MODE_1,
 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
-	/* WaMbcDriverBootEnable */
+	/* WaMbcDriverBootEnable:hsw */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
-	/* WaSwitchSolVfFArbitrationPriority */
+	/* WaSwitchSolVfFArbitrationPriority:hsw */
 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
-	/* XXX: This is a workaround for early silicon revisions and should be
-	 * removed later.
-	 */
-	I915_WRITE(WM_DBG,
-			I915_READ(WM_DBG) |
-			WM_DBG_DISALLOW_MULTIPLE_LP |
-			WM_DBG_DISALLOW_SPRITE |
-			WM_DBG_DISALLOW_MAXFIFO);
+	/* WaRsPkgCStateDisplayPMReq:hsw */
+	I915_WRITE(CHICKEN_PAR1_1,
+		   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
 
 	lpt_init_clock_gating(dev);
 }
@@ -3833,16 +4706,16 @@
 
 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
-	/* WaDisableEarlyCull */
+	/* WaDisableEarlyCull:ivb */
 	I915_WRITE(_3D_CHICKEN3,
 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
-	/* WaDisableBackToBackFlipFix */
+	/* WaDisableBackToBackFlipFix:ivb */
 	I915_WRITE(IVB_CHICKEN3,
 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
-	/* WaDisablePSDDualDispatchEnable */
+	/* WaDisablePSDDualDispatchEnable:ivb */
 	if (IS_IVB_GT1(dev))
 		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
 			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
@@ -3850,11 +4723,11 @@
 		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
 			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
-	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
-	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+	/* WaApplyL3ControlAndL3ChickenMode:ivb */
 	I915_WRITE(GEN7_L3CNTLREG1,
 			GEN7_WA_FOR_GEN7_L3_CONTROL);
 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
@@ -3867,7 +4740,7 @@
 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
 
-	/* WaForceL3Serialization */
+	/* WaForceL3Serialization:ivb */
 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
@@ -3882,13 +4755,13 @@
 	 * but we didn't debug actual testcases to find it out.
 	 *
 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-	 * This implements the WaDisableRCZUnitClockGating workaround.
+	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
 	 */
 	I915_WRITE(GEN6_UCGCTL2,
 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
-	/* This is required by WaCatErrorRejectionIssue */
+	/* This is required by WaCatErrorRejectionIssue:ivb */
 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
@@ -3900,13 +4773,14 @@
 		intel_flush_display_plane(dev_priv, pipe);
 	}
 
-	/* WaMbcDriverBootEnable */
+	/* WaMbcDriverBootEnable:ivb */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
+	/* WaVSRefCountFullforceMissDisable:ivb */
 	gen7_setup_fixed_func_scheduler(dev_priv);
 
-	/* WaDisable4x2SubspanOptimization */
+	/* WaDisable4x2SubspanOptimization:ivb */
 	I915_WRITE(CACHE_MODE_1,
 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
@@ -3932,46 +4806,46 @@
 
 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
-	/* WaDisableEarlyCull */
+	/* WaDisableEarlyCull:vlv */
 	I915_WRITE(_3D_CHICKEN3,
 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
-	/* WaDisableBackToBackFlipFix */
+	/* WaDisableBackToBackFlipFix:vlv */
 	I915_WRITE(IVB_CHICKEN3,
 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
-	/* WaDisablePSDDualDispatchEnable */
+	/* WaDisablePSDDualDispatchEnable:vlv */
 	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
 		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
 				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
-	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+	/* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
-	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+	/* WaApplyL3ControlAndL3ChickenMode:vlv */
 	I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
-	/* WaForceL3Serialization */
+	/* WaForceL3Serialization:vlv */
 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
-	/* WaDisableDopClockGating */
+	/* WaDisableDopClockGating:vlv */
 	I915_WRITE(GEN7_ROW_CHICKEN2,
 		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
-	/* WaForceL3Serialization */
+	/* WaForceL3Serialization:vlv */
 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
-	/* This is required by WaCatErrorRejectionIssue */
+	/* This is required by WaCatErrorRejectionIssue:vlv */
 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-	/* WaMbcDriverBootEnable */
+	/* WaMbcDriverBootEnable:vlv */
 	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
 		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3987,10 +4861,10 @@
 	 * but we didn't debug actual testcases to find it out.
 	 *
 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
-	 * This implements the WaDisableRCZUnitClockGating workaround.
+	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
 	 *
-	 * Also apply WaDisableVDSUnitClockGating and
-	 * WaDisableRCPBUnitClockGating.
+	 * Also apply WaDisableVDSUnitClockGating:vlv and
+	 * WaDisableRCPBUnitClockGating:vlv.
 	 */
 	I915_WRITE(GEN6_UCGCTL2,
 		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
@@ -4012,7 +4886,7 @@
 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
 	/*
-	 * WaDisableVLVClockGating_VBIIssue
+	 * WaDisableVLVClockGating_VBIIssue:vlv
 	 * Disable clock gating on th GCFG unit to prevent a delay
 	 * in the reporting of vblank events.
 	 */
@@ -4110,20 +4984,42 @@
 	dev_priv->display.init_clock_gating(dev);
 }
 
+void intel_suspend_hw(struct drm_device *dev)
+{
+	if (HAS_PCH_LPT(dev))
+		lpt_suspend_hw(dev);
+}
+
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
  * be enabled.
  */
-bool intel_using_power_well(struct drm_device *dev)
+bool intel_display_power_enabled(struct drm_device *dev,
+				 enum intel_display_power_domain domain)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (IS_HASWELL(dev))
+	if (!HAS_POWER_WELL(dev))
+		return true;
+
+	switch (domain) {
+	case POWER_DOMAIN_PIPE_A:
+	case POWER_DOMAIN_TRANSCODER_EDP:
+		return true;
+	case POWER_DOMAIN_PIPE_B:
+	case POWER_DOMAIN_PIPE_C:
+	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+	case POWER_DOMAIN_TRANSCODER_A:
+	case POWER_DOMAIN_TRANSCODER_B:
+	case POWER_DOMAIN_TRANSCODER_C:
 		return I915_READ(HSW_PWR_WELL_DRIVER) ==
 		       (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
-	else
-		return true;
+	default:
+		BUG();
+	}
 }
 
 void intel_set_power_well(struct drm_device *dev, bool enable)
@@ -4190,7 +5086,12 @@
 	if (I915_HAS_FBC(dev)) {
 		if (HAS_PCH_SPLIT(dev)) {
 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
-			dev_priv->display.enable_fbc = ironlake_enable_fbc;
+			if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+				dev_priv->display.enable_fbc =
+					gen7_enable_fbc;
+			else
+				dev_priv->display.enable_fbc =
+					ironlake_enable_fbc;
 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
 		} else if (IS_GM45(dev)) {
 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
@@ -4242,10 +5143,10 @@
 			}
 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
 		} else if (IS_HASWELL(dev)) {
-			if (SNB_READ_WM0_LATENCY()) {
-				dev_priv->display.update_wm = sandybridge_update_wm;
-				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
-				dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+			if (I915_READ64(MCH_SSKPD)) {
+				dev_priv->display.update_wm = haswell_update_wm;
+				dev_priv->display.update_sprite_wm =
+					haswell_update_sprite_wm;
 			} else {
 				DRM_DEBUG_KMS("Failed to read display plane latency. "
 					      "Disable CxSR\n");
@@ -4340,6 +5241,7 @@
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
+	/* WaRsForcewakeWaitTC0:snb */
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
@@ -4371,6 +5273,7 @@
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
+	/* WaRsForcewakeWaitTC0:ivb,hsw */
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
@@ -4474,6 +5377,7 @@
 			    FORCEWAKE_ACK_TIMEOUT_MS))
 		DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
 
+	/* WaRsForcewakeWaitTC0:vlv */
 	__gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
@@ -4568,55 +5472,58 @@
 	return 0;
 }
 
-static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
-			u8 addr, u32 *val)
+int vlv_gpu_freq(int ddr_freq, int val)
 {
-	u32 cmd, devfn, port, be, bar;
+	int mult, base;
 
-	bar = 0;
-	be = 0xf;
-	port = IOSF_PORT_PUNIT;
-	devfn = PCI_DEVFN(2, 0);
-
-	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
-		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
-		(bar << IOSF_BAR_SHIFT);
-
-	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
-	if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
-		DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
-				 opcode == PUNIT_OPCODE_REG_READ ?
-				 "read" : "write");
-		return -EAGAIN;
+	switch (ddr_freq) {
+	case 800:
+		mult = 20;
+		base = 120;
+		break;
+	case 1066:
+		mult = 22;
+		base = 133;
+		break;
+	case 1333:
+		mult = 21;
+		base = 125;
+		break;
+	default:
+		return -1;
 	}
 
-	I915_WRITE(VLV_IOSF_ADDR, addr);
-	if (opcode == PUNIT_OPCODE_REG_WRITE)
-		I915_WRITE(VLV_IOSF_DATA, *val);
-	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+	return ((val - 0xbd) * mult) + base;
+}
 
-	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
-		     500)) {
-		DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
-			  opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
-			  addr);
-		return -ETIMEDOUT;
+int vlv_freq_opcode(int ddr_freq, int val)
+{
+	int mult, base;
+
+	switch (ddr_freq) {
+	case 800:
+		mult = 20;
+		base = 120;
+		break;
+	case 1066:
+		mult = 22;
+		base = 133;
+		break;
+	case 1333:
+		mult = 21;
+		base = 125;
+		break;
+	default:
+		return -1;
 	}
 
-	if (opcode == PUNIT_OPCODE_REG_READ)
-		*val = I915_READ(VLV_IOSF_DATA);
-	I915_WRITE(VLV_IOSF_DATA, 0);
+	val /= mult;
+	val -= base / mult;
+	val += 0xbd;
 
-	return 0;
+	if (val > 0xea)
+		val = 0xea;
+
+	return val;
 }
 
-int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
-{
-	return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
-}
-
-int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
-{
-	return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
-}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1d5d613..0e72da6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -464,9 +464,11 @@
 		goto err_unref;
 
 	pc->gtt_offset = obj->gtt_offset;
-	pc->cpu_page =  kmap(sg_page(obj->pages->sgl));
-	if (pc->cpu_page == NULL)
+	pc->cpu_page = kmap(sg_page(obj->pages->sgl));
+	if (pc->cpu_page == NULL) {
+		ret = -ENOMEM;
 		goto err_unpin;
+	}
 
 	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
 			 ring->name, pc->gtt_offset);
@@ -515,6 +517,8 @@
 	/* We need to disable the AsyncFlip performance optimisations in order
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
 	 * programmed to '1' on all products.
+	 *
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
 	if (INTEL_INFO(dev)->gen >= 6)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
@@ -556,7 +560,7 @@
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	if (HAS_L3_GPU_CACHE(dev))
-		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+		I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
 
 	return ret;
 }
@@ -578,9 +582,16 @@
 update_mboxes(struct intel_ring_buffer *ring,
 	      u32 mmio_offset)
 {
+/* NB: In order to be able to do semaphore MBOX updates for varying number
+ * of rings, it's easiest if we round up each individual update to a
+ * multiple of 2 (since ring updates must always be a multiple of 2)
+ * even though the actual update only requires 3 dwords.
+ */
+#define MBOX_UPDATE_DWORDS 4
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, mmio_offset);
 	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, MI_NOOP);
 }
 
 /**
@@ -595,19 +606,24 @@
 static int
 gen6_add_request(struct intel_ring_buffer *ring)
 {
-	u32 mbox1_reg;
-	u32 mbox2_reg;
-	int ret;
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *useless;
+	int i, ret;
 
-	ret = intel_ring_begin(ring, 10);
+	ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
+				      MBOX_UPDATE_DWORDS) +
+				      4);
 	if (ret)
 		return ret;
+#undef MBOX_UPDATE_DWORDS
 
-	mbox1_reg = ring->signal_mbox[0];
-	mbox2_reg = ring->signal_mbox[1];
+	for_each_ring(useless, dev_priv, i) {
+		u32 mbox_reg = ring->signal_mbox[i];
+		if (mbox_reg != GEN6_NOSYNC)
+			update_mboxes(ring, mbox_reg);
+	}
 
-	update_mboxes(ring, mbox1_reg);
-	update_mboxes(ring, mbox2_reg);
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, ring->outstanding_lazy_request);
@@ -779,7 +795,7 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
+	if (ring->irq_refcount.gt++ == 0) {
 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
@@ -797,7 +813,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
+	if (--ring->irq_refcount.gt == 0) {
 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 		POSTING_READ(GTIMR);
@@ -816,7 +832,7 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
+	if (ring->irq_refcount.gt++ == 0) {
 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
@@ -834,7 +850,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
+	if (--ring->irq_refcount.gt == 0) {
 		dev_priv->irq_mask |= ring->irq_enable_mask;
 		I915_WRITE(IMR, dev_priv->irq_mask);
 		POSTING_READ(IMR);
@@ -853,7 +869,7 @@
 		return false;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
+	if (ring->irq_refcount.gt++ == 0) {
 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
 		I915_WRITE16(IMR, dev_priv->irq_mask);
 		POSTING_READ16(IMR);
@@ -871,7 +887,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
+	if (--ring->irq_refcount.gt == 0) {
 		dev_priv->irq_mask |= ring->irq_enable_mask;
 		I915_WRITE16(IMR, dev_priv->irq_mask);
 		POSTING_READ16(IMR);
@@ -899,6 +915,9 @@
 		case VCS:
 			mmio = BSD_HWS_PGA_GEN7;
 			break;
+		case VECS:
+			mmio = VEBOX_HWS_PGA_GEN7;
+			break;
 		}
 	} else if (IS_GEN6(ring->dev)) {
 		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
@@ -961,10 +980,11 @@
 	gen6_gt_force_wake_get(dev_priv);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
+	if (ring->irq_refcount.gt++ == 0) {
 		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
-						GEN6_RENDER_L3_PARITY_ERROR));
+			I915_WRITE_IMR(ring,
+				       ~(ring->irq_enable_mask |
+					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
 		else
 			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
@@ -984,9 +1004,10 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
+	if (--ring->irq_refcount.gt == 0) {
 		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+			I915_WRITE_IMR(ring,
+				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
 		else
 			I915_WRITE_IMR(ring, ~0);
 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
@@ -998,6 +1019,48 @@
 	gen6_gt_force_wake_put(dev_priv);
 }
 
+static bool
+hsw_vebox_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	if (ring->irq_refcount.pm++ == 0) {
+		u32 pm_imr = I915_READ(GEN6_PMIMR);
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
+		POSTING_READ(GEN6_PMIMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+	return true;
+}
+
+static void
+hsw_vebox_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	if (--ring->irq_refcount.pm == 0) {
+		u32 pm_imr = I915_READ(GEN6_PMIMR);
+		I915_WRITE_IMR(ring, ~0);
+		I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
+		POSTING_READ(GEN6_PMIMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+}
+
 static int
 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
 			 u32 offset, u32 length,
@@ -1500,6 +1563,7 @@
 	}
 
 	ring->set_seqno(ring, seqno);
+	ring->hangcheck.seqno = seqno;
 }
 
 void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1546,8 +1610,8 @@
 		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate, u32 flush)
+static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
+			       u32 invalidate, u32 flush)
 {
 	uint32_t cmd;
 	int ret;
@@ -1618,8 +1682,8 @@
 
 /* Blitter support (SandyBridge+) */
 
-static int blt_ring_flush(struct intel_ring_buffer *ring,
-			  u32 invalidate, u32 flush)
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+			   u32 invalidate, u32 flush)
 {
 	uint32_t cmd;
 	int ret;
@@ -1662,15 +1726,18 @@
 			ring->flush = gen6_render_ring_flush;
 		ring->irq_get = gen6_ring_get_irq;
 		ring->irq_put = gen6_ring_put_irq;
-		ring->irq_enable_mask = GT_USER_INTERRUPT;
+		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
 		ring->sync_to = gen6_ring_sync;
-		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
-		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
-		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
-		ring->signal_mbox[0] = GEN6_VRSYNC;
-		ring->signal_mbox[1] = GEN6_BRSYNC;
+		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
+		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
+		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
+		ring->signal_mbox[RCS] = GEN6_NOSYNC;
+		ring->signal_mbox[VCS] = GEN6_VRSYNC;
+		ring->signal_mbox[BCS] = GEN6_BRSYNC;
+		ring->signal_mbox[VECS] = GEN6_VERSYNC;
 	} else if (IS_GEN5(dev)) {
 		ring->add_request = pc_render_add_request;
 		ring->flush = gen4_render_ring_flush;
@@ -1678,7 +1745,8 @@
 		ring->set_seqno = pc_render_set_seqno;
 		ring->irq_get = gen5_ring_get_irq;
 		ring->irq_put = gen5_ring_put_irq;
-		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
 	} else {
 		ring->add_request = i9xx_add_request;
 		if (INTEL_INFO(dev)->gen < 4)
@@ -1816,20 +1884,23 @@
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev))
 			ring->write_tail = gen6_bsd_ring_write_tail;
-		ring->flush = gen6_ring_flush;
+		ring->flush = gen6_bsd_ring_flush;
 		ring->add_request = gen6_add_request;
 		ring->get_seqno = gen6_ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
-		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 		ring->irq_get = gen6_ring_get_irq;
 		ring->irq_put = gen6_ring_put_irq;
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		ring->sync_to = gen6_ring_sync;
-		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
-		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
-		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
-		ring->signal_mbox[0] = GEN6_RVSYNC;
-		ring->signal_mbox[1] = GEN6_BVSYNC;
+		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
+		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
+		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
+		ring->signal_mbox[RCS] = GEN6_RVSYNC;
+		ring->signal_mbox[VCS] = GEN6_NOSYNC;
+		ring->signal_mbox[BCS] = GEN6_BVSYNC;
+		ring->signal_mbox[VECS] = GEN6_VEVSYNC;
 	} else {
 		ring->mmio_base = BSD_RING_BASE;
 		ring->flush = bsd_ring_flush;
@@ -1837,7 +1908,7 @@
 		ring->get_seqno = ring_get_seqno;
 		ring->set_seqno = ring_set_seqno;
 		if (IS_GEN5(dev)) {
-			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
 			ring->irq_get = gen5_ring_get_irq;
 			ring->irq_put = gen5_ring_put_irq;
 		} else {
@@ -1862,20 +1933,56 @@
 
 	ring->mmio_base = BLT_RING_BASE;
 	ring->write_tail = ring_write_tail;
-	ring->flush = blt_ring_flush;
+	ring->flush = gen6_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->get_seqno = gen6_ring_get_seqno;
 	ring->set_seqno = ring_set_seqno;
-	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 	ring->irq_get = gen6_ring_get_irq;
 	ring->irq_put = gen6_ring_put_irq;
 	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 	ring->sync_to = gen6_ring_sync;
-	ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
-	ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
-	ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
-	ring->signal_mbox[0] = GEN6_RBSYNC;
-	ring->signal_mbox[1] = GEN6_VBSYNC;
+	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
+	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
+	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
+	ring->signal_mbox[RCS] = GEN6_RBSYNC;
+	ring->signal_mbox[VCS] = GEN6_VBSYNC;
+	ring->signal_mbox[BCS] = GEN6_NOSYNC;
+	ring->signal_mbox[VECS] = GEN6_VEBSYNC;
+	ring->init = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_vebox_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
+
+	ring->name = "video enhancement ring";
+	ring->id = VECS;
+
+	ring->mmio_base = VEBOX_RING_BASE;
+	ring->write_tail = ring_write_tail;
+	ring->flush = gen6_ring_flush;
+	ring->add_request = gen6_add_request;
+	ring->get_seqno = gen6_ring_get_seqno;
+	ring->set_seqno = ring_set_seqno;
+	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
+		PM_VEBOX_CS_ERROR_INTERRUPT;
+	ring->irq_get = hsw_vebox_get_irq;
+	ring->irq_put = hsw_vebox_put_irq;
+	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	ring->sync_to = gen6_ring_sync;
+	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
+	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
+	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
+	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+	ring->signal_mbox[RCS] = GEN6_RVESYNC;
+	ring->signal_mbox[VCS] = GEN6_VVESYNC;
+	ring->signal_mbox[BCS] = GEN6_BVESYNC;
+	ring->signal_mbox[VECS] = GEN6_NOSYNC;
 	ring->init = init_ring_common;
 
 	return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d66208c..022d07e4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -37,14 +37,19 @@
 #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
 #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
 
+struct intel_ring_hangcheck {
+	u32 seqno;
+};
+
 struct  intel_ring_buffer {
 	const char	*name;
 	enum intel_ring_id {
 		RCS = 0x0,
 		VCS,
 		BCS,
+		VECS,
 	} id;
-#define I915_NUM_RINGS 3
+#define I915_NUM_RINGS 4
 	u32		mmio_base;
 	void		__iomem *virtual_start;
 	struct		drm_device *dev;
@@ -67,7 +72,10 @@
 	 */
 	u32		last_retired_head;
 
-	u32		irq_refcount;		/* protected by dev_priv->irq_lock */
+	struct {
+		u32	gt; /*  protected by dev_priv->irq_lock */
+		u32	pm; /*  protected by dev_priv->rps.lock (sucks) */
+	} irq_refcount;
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
 	u32		trace_irq_seqno;
 	u32		sync_seqno[I915_NUM_RINGS-1];
@@ -102,8 +110,11 @@
 				   struct intel_ring_buffer *to,
 				   u32 seqno);
 
-	u32		semaphore_register[3]; /*our mbox written by others */
-	u32		signal_mbox[2]; /* mboxes this ring signals to */
+	/* our mbox written by others */
+	u32		semaphore_register[I915_NUM_RINGS];
+	/* mboxes this ring signals to */
+	u32		signal_mbox[I915_NUM_RINGS];
+
 	/**
 	 * List of objects currently involved in rendering from the
 	 * ringbuffer.
@@ -135,7 +146,9 @@
 	 */
 	bool itlb_before_ctx_switch;
 	struct i915_hw_context *default_context;
-	struct drm_i915_gem_object *last_context_obj;
+	struct i915_hw_context *last_context;
+
+	struct intel_ring_hangcheck hangcheck;
 
 	void *private;
 };
@@ -224,6 +237,7 @@
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
 int intel_init_blt_ring_buffer(struct drm_device *dev);
+int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d4ea6c2..c558419 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -712,6 +712,13 @@
 		intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
 }
 
+static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+				  struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_get_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_get_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
 static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
 					 struct intel_sdvo_dtd *dtd)
 {
@@ -726,6 +733,13 @@
 				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
 }
 
+static bool intel_sdvo_get_input_timing(struct intel_sdvo *intel_sdvo,
+					struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_get_timing(intel_sdvo,
+				     SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
+}
+
 static bool
 intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
 					 uint16_t clock,
@@ -1041,6 +1055,32 @@
 	return true;
 }
 
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
+{
+	unsigned dotclock = pipe_config->adjusted_mode.clock;
+	struct dpll *clock = &pipe_config->dpll;
+
+	/* SDVO TV has fixed PLL values depend on its clock range,
+	   this mirrors vbios setting. */
+	if (dotclock >= 100000 && dotclock < 140500) {
+		clock->p1 = 2;
+		clock->p2 = 10;
+		clock->n = 3;
+		clock->m1 = 16;
+		clock->m2 = 8;
+	} else if (dotclock >= 140500 && dotclock <= 200000) {
+		clock->p1 = 1;
+		clock->p2 = 10;
+		clock->n = 6;
+		clock->m1 = 12;
+		clock->m2 = 8;
+	} else {
+		WARN(1, "SDVO TV clock out of range: %i\n", dotclock);
+	}
+
+	pipe_config->clock_set = true;
+}
+
 static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 				      struct intel_crtc_config *pipe_config)
 {
@@ -1066,6 +1106,7 @@
 		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
 							   mode,
 							   adjusted_mode);
+		pipe_config->sdvo_tv_clock = true;
 	} else if (intel_sdvo->is_lvds) {
 		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
 							     intel_sdvo->sdvo_lvds_fixed_mode))
@@ -1097,6 +1138,10 @@
 	if (intel_sdvo->color_range)
 		pipe_config->limited_color_range = true;
 
+	/* Clock computation needs to happen after pixel multiplier. */
+	if (intel_sdvo->is_tv)
+		i9xx_adjust_sdvo_tv_clock(pipe_config);
+
 	return true;
 }
 
@@ -1264,6 +1309,33 @@
 	return true;
 }
 
+static void intel_sdvo_get_config(struct intel_encoder *encoder,
+				  struct intel_crtc_config *pipe_config)
+{
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	struct intel_sdvo_dtd dtd;
+	u32 flags = 0;
+	bool ret;
+
+	ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
+	if (!ret) {
+		DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+		return;
+	}
+
+	if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+		flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+		flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_NVSYNC;
+
+	pipe_config->adjusted_mode.flags |= flags;
+}
+
 static void intel_disable_sdvo(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
@@ -1344,6 +1416,7 @@
 	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 }
 
+/* Special dpms function to support cloning between dvo/sdvo/crt. */
 static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
 {
 	struct drm_crtc *crtc;
@@ -1365,6 +1438,8 @@
 		return;
 	}
 
+	/* We set active outputs manually below in case pipe dpms doesn't change
+	 * due to cloning. */
 	if (mode != DRM_MODE_DPMS_ON) {
 		intel_sdvo_set_active_outputs(intel_sdvo, 0);
 		if (0)
@@ -1495,7 +1570,7 @@
 
 	return drm_get_edid(connector,
 			    intel_gmbus_get_adapter(dev_priv,
-						    dev_priv->crt_ddc_pin));
+						    dev_priv->vbt.crt_ddc_pin));
 }
 
 static enum drm_connector_status
@@ -1625,12 +1700,9 @@
 	if (ret == connector_status_connected) {
 		intel_sdvo->is_tv = false;
 		intel_sdvo->is_lvds = false;
-		intel_sdvo->base.needs_tv_clock = false;
 
-		if (response & SDVO_TV_MASK) {
+		if (response & SDVO_TV_MASK)
 			intel_sdvo->is_tv = true;
-			intel_sdvo->base.needs_tv_clock = true;
-		}
 		if (response & SDVO_LVDS_MASK)
 			intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
 	}
@@ -1772,21 +1844,12 @@
 	struct drm_display_mode *newmode;
 
 	/*
-	 * Attempt to get the mode list from DDC.
-	 * Assume that the preferred modes are
-	 * arranged in priority order.
-	 */
-	intel_ddc_get_modes(connector, &intel_sdvo->ddc);
-
-	/*
 	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
-	 * SDVO->LVDS transcoders can't cope with the EDID mode. Since
-	 * drm_mode_probed_add adds the mode at the head of the list we add it
-	 * last.
+	 * SDVO->LVDS transcoders can't cope with the EDID mode.
 	 */
-	if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+	if (dev_priv->vbt.sdvo_lvds_vbt_mode != NULL) {
 		newmode = drm_mode_duplicate(connector->dev,
-					     dev_priv->sdvo_lvds_vbt_mode);
+					     dev_priv->vbt.sdvo_lvds_vbt_mode);
 		if (newmode != NULL) {
 			/* Guarantee the mode is preferred */
 			newmode->type = (DRM_MODE_TYPE_PREFERRED |
@@ -1795,6 +1858,13 @@
 		}
 	}
 
+	/*
+	 * Attempt to get the mode list from DDC.
+	 * Assume that the preferred modes are
+	 * arranged in priority order.
+	 */
+	intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
 	list_for_each_entry(newmode, &connector->probed_modes, head) {
 		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
 			intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2329,7 +2399,6 @@
 	intel_sdvo_connector->output_flag = type;
 
 	intel_sdvo->is_tv = true;
-	intel_sdvo->base.needs_tv_clock = true;
 
 	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
@@ -2417,7 +2486,6 @@
 intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
 {
 	intel_sdvo->is_tv = false;
-	intel_sdvo->base.needs_tv_clock = false;
 	intel_sdvo->is_lvds = false;
 
 	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
@@ -2797,6 +2865,7 @@
 	intel_encoder->mode_set = intel_sdvo_mode_set;
 	intel_encoder->enable = intel_enable_sdvo;
 	intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+	intel_encoder->get_config = intel_sdvo_get_config;
 
 	/* In default case sdvo lvds is false */
 	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
new file mode 100644
index 0000000..9a0e6c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* IOSF sideband */
+static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
+			   u32 port, u32 opcode, u32 addr, u32 *val)
+{
+	u32 cmd, be = 0xf, bar = 0;
+	bool is_read = (opcode == PUNIT_OPCODE_REG_READ ||
+			opcode == DPIO_OPCODE_REG_READ);
+
+	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+		(bar << IOSF_BAR_SHIFT);
+
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+		DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
+				 is_read ? "read" : "write");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(VLV_IOSF_ADDR, addr);
+	if (!is_read)
+		I915_WRITE(VLV_IOSF_DATA, *val);
+	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+		DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
+				 is_read ? "read" : "write");
+		return -ETIMEDOUT;
+	}
+
+	if (is_read)
+		*val = I915_READ(VLV_IOSF_DATA);
+	I915_WRITE(VLV_IOSF_DATA, 0);
+
+	return 0;
+}
+
+u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+	u32 val = 0;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	mutex_lock(&dev_priv->dpio_lock);
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+			PUNIT_OPCODE_REG_READ, addr, &val);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	return val;
+}
+
+void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	mutex_lock(&dev_priv->dpio_lock);
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+			PUNIT_OPCODE_REG_WRITE, addr, &val);
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
+u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
+{
+	u32 val = 0;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	mutex_lock(&dev_priv->dpio_lock);
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
+			PUNIT_OPCODE_REG_READ, addr, &val);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	return val;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+			DPIO_OPCODE_REG_READ, reg, &val);
+
+	return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+			DPIO_OPCODE_REG_WRITE, reg, &val);
+}
+
+/* SBI access */
+u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+		   enum intel_sbi_destination destination)
+{
+	u32 value = 0;
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		return 0;
+	}
+
+	I915_WRITE(SBI_ADDR, (reg << 16));
+
+	if (destination == SBI_ICLK)
+		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+	else
+		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+		return 0;
+	}
+
+	return I915_READ(SBI_DATA);
+}
+
+void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+		     enum intel_sbi_destination destination)
+{
+	u32 tmp;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		return;
+	}
+
+	I915_WRITE(SBI_ADDR, (reg << 16));
+	I915_WRITE(SBI_DATA, value);
+
+	if (destination == SBI_ICLK)
+		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+	else
+		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+		return;
+	}
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index c7d25c5..04d38d4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -32,6 +32,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -113,7 +114,7 @@
 	crtc_w--;
 	crtc_h--;
 
-	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
 
 	I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
 	I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -267,7 +268,7 @@
 	crtc_w--;
 	crtc_h--;
 
-	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
 
 	/*
 	 * IVB workaround: must disable low power watermarks for at least
@@ -334,6 +335,8 @@
 
 	dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
+	intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
+
 	/* potentially re-enable LP watermarks */
 	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
 		intel_update_watermarks(dev);
@@ -452,7 +455,7 @@
 	crtc_w--;
 	crtc_h--;
 
-	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
 
 	dvsscale = 0;
 	if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
@@ -583,6 +586,20 @@
 		key->flags = I915_SET_COLORKEY_NONE;
 }
 
+static bool
+format_is_yuv(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_YVYU:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static int
 intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 		   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -600,9 +617,29 @@
 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 								      pipe);
 	int ret = 0;
-	int x = src_x >> 16, y = src_y >> 16;
-	int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
 	bool disable_primary = false;
+	bool visible;
+	int hscale, vscale;
+	int max_scale, min_scale;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	struct drm_rect src = {
+		/* sample coordinates in 16.16 fixed point */
+		.x1 = src_x,
+		.x2 = src_x + src_w,
+		.y1 = src_y,
+		.y2 = src_y + src_h,
+	};
+	struct drm_rect dst = {
+		/* integer pixels */
+		.x1 = crtc_x,
+		.x2 = crtc_x + crtc_w,
+		.y1 = crtc_y,
+		.y2 = crtc_y + crtc_h,
+	};
+	const struct drm_rect clip = {
+		.x2 = crtc->mode.hdisplay,
+		.y2 = crtc->mode.vdisplay,
+	};
 
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
@@ -618,19 +655,23 @@
 	intel_plane->src_w = src_w;
 	intel_plane->src_h = src_h;
 
-	src_w = src_w >> 16;
-	src_h = src_h >> 16;
-
 	/* Pipe must be running... */
-	if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
+	if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
+		DRM_DEBUG_KMS("Pipe disabled\n");
 		return -EINVAL;
-
-	if (crtc_x >= primary_w || crtc_y >= primary_h)
-		return -EINVAL;
+	}
 
 	/* Don't modify another pipe's plane */
-	if (intel_plane->pipe != intel_crtc->pipe)
+	if (intel_plane->pipe != intel_crtc->pipe) {
+		DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
 		return -EINVAL;
+	}
+
+	/* FIXME check all gen limits */
+	if (fb->width < 3 || fb->height < 3 || fb->pitches[0] > 16384) {
+		DRM_DEBUG_KMS("Unsuitable framebuffer for plane\n");
+		return -EINVAL;
+	}
 
 	/* Sprite planes can be linear or x-tiled surfaces */
 	switch (obj->tiling_mode) {
@@ -638,55 +679,123 @@
 		case I915_TILING_X:
 			break;
 		default:
+			DRM_DEBUG_KMS("Unsupported tiling mode\n");
 			return -EINVAL;
 	}
 
 	/*
-	 * Clamp the width & height into the visible area.  Note we don't
-	 * try to scale the source if part of the visible region is offscreen.
-	 * The caller must handle that by adjusting source offset and size.
+	 * FIXME the following code does a bunch of fuzzy adjustments to the
+	 * coordinates and sizes. We probably need some way to decide whether
+	 * more strict checking should be done instead.
 	 */
-	if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
-		crtc_w += crtc_x;
-		crtc_x = 0;
+	max_scale = intel_plane->max_downscale << 16;
+	min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+
+	hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
+	BUG_ON(hscale < 0);
+
+	vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
+	BUG_ON(vscale < 0);
+
+	visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
+
+	crtc_x = dst.x1;
+	crtc_y = dst.y1;
+	crtc_w = drm_rect_width(&dst);
+	crtc_h = drm_rect_height(&dst);
+
+	if (visible) {
+		/* check again in case clipping clamped the results */
+		hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
+		if (hscale < 0) {
+			DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
+			drm_rect_debug_print(&src, true);
+			drm_rect_debug_print(&dst, false);
+
+			return hscale;
+		}
+
+		vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
+		if (vscale < 0) {
+			DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
+			drm_rect_debug_print(&src, true);
+			drm_rect_debug_print(&dst, false);
+
+			return vscale;
+		}
+
+		/* Make the source viewport size an exact multiple of the scaling factors. */
+		drm_rect_adjust_size(&src,
+				     drm_rect_width(&dst) * hscale - drm_rect_width(&src),
+				     drm_rect_height(&dst) * vscale - drm_rect_height(&src));
+
+		/* sanity check to make sure the src viewport wasn't enlarged */
+		WARN_ON(src.x1 < (int) src_x ||
+			src.y1 < (int) src_y ||
+			src.x2 > (int) (src_x + src_w) ||
+			src.y2 > (int) (src_y + src_h));
+
+		/*
+		 * Hardware doesn't handle subpixel coordinates.
+		 * Adjust to (macro)pixel boundary, but be careful not to
+		 * increase the source viewport size, because that could
+		 * push the downscaling factor out of bounds.
+		 */
+		src_x = src.x1 >> 16;
+		src_w = drm_rect_width(&src) >> 16;
+		src_y = src.y1 >> 16;
+		src_h = drm_rect_height(&src) >> 16;
+
+		if (format_is_yuv(fb->pixel_format)) {
+			src_x &= ~1;
+			src_w &= ~1;
+
+			/*
+			 * Must keep src and dst the
+			 * same if we can't scale.
+			 */
+			if (!intel_plane->can_scale)
+				crtc_w &= ~1;
+
+			if (crtc_w == 0)
+				visible = false;
+		}
 	}
-	if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
-		goto out;
-	if ((crtc_x + crtc_w) > primary_w)
-		crtc_w = primary_w - crtc_x;
 
-	if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
-		crtc_h += crtc_y;
-		crtc_y = 0;
+	/* Check size restrictions when scaling */
+	if (visible && (src_w != crtc_w || src_h != crtc_h)) {
+		unsigned int width_bytes;
+
+		WARN_ON(!intel_plane->can_scale);
+
+		/* FIXME interlacing min height is 6 */
+
+		if (crtc_w < 3 || crtc_h < 3)
+			visible = false;
+
+		if (src_w < 3 || src_h < 3)
+			visible = false;
+
+		width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
+
+		if (src_w > 2048 || src_h > 2048 ||
+		    width_bytes > 4096 || fb->pitches[0] > 4096) {
+			DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
+			return -EINVAL;
+		}
 	}
-	if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
-		goto out;
-	if (crtc_y + crtc_h > primary_h)
-		crtc_h = primary_h - crtc_y;
 
-	if (!crtc_w || !crtc_h) /* Again, nothing to display */
-		goto out;
-
-	/*
-	 * We may not have a scaler, eg. HSW does not have it any more
-	 */
-	if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
-		return -EINVAL;
-
-	/*
-	 * We can take a larger source and scale it down, but
-	 * only so much...  16x is the max on SNB.
-	 */
-	if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
-		return -EINVAL;
+	dst.x1 = crtc_x;
+	dst.x2 = crtc_x + crtc_w;
+	dst.y1 = crtc_y;
+	dst.y2 = crtc_y + crtc_h;
 
 	/*
 	 * If the sprite is completely covering the primary plane,
 	 * we can disable the primary and save power.
 	 */
-	if ((crtc_x == 0) && (crtc_y == 0) &&
-	    (crtc_w == primary_w) && (crtc_h == primary_h))
-		disable_primary = true;
+	disable_primary = drm_rect_equals(&dst, &clip);
+	WARN_ON(disable_primary && !visible);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -708,8 +817,12 @@
 	if (!disable_primary)
 		intel_enable_primary(crtc);
 
-	intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
-				  crtc_w, crtc_h, x, y, src_w, src_h);
+	if (visible)
+		intel_plane->update_plane(plane, fb, obj,
+					  crtc_x, crtc_y, crtc_w, crtc_h,
+					  src_x, src_y, src_w, src_h);
+	else
+		intel_plane->disable_plane(plane);
 
 	if (disable_primary)
 		intel_disable_primary(crtc);
@@ -732,7 +845,6 @@
 
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
-out:
 	return ret;
 }
 
@@ -918,13 +1030,15 @@
 		break;
 
 	case 7:
-		if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
-			intel_plane->can_scale = false;
-		else
+		if (IS_IVYBRIDGE(dev)) {
 			intel_plane->can_scale = true;
+			intel_plane->max_downscale = 2;
+		} else {
+			intel_plane->can_scale = false;
+			intel_plane->max_downscale = 1;
+		}
 
 		if (IS_VALLEYVIEW(dev)) {
-			intel_plane->max_downscale = 1;
 			intel_plane->update_plane = vlv_update_plane;
 			intel_plane->disable_plane = vlv_disable_plane;
 			intel_plane->update_colorkey = vlv_update_colorkey;
@@ -933,7 +1047,6 @@
 			plane_formats = vlv_plane_formats;
 			num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
 		} else {
-			intel_plane->max_downscale = 2;
 			intel_plane->update_plane = ivb_update_plane;
 			intel_plane->disable_plane = ivb_disable_plane;
 			intel_plane->update_colorkey = ivb_update_colorkey;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b945bc5..7d11a5a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1521,12 +1521,12 @@
 	struct child_device_config *p_child;
 	int i, ret;
 
-	if (!dev_priv->child_dev_num)
+	if (!dev_priv->vbt.child_dev_num)
 		return 1;
 
 	ret = 0;
-	for (i = 0; i < dev_priv->child_dev_num; i++) {
-		p_child = dev_priv->child_dev + i;
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		p_child = dev_priv->vbt.child_dev + i;
 		/*
 		 * If the device type is not TV, continue.
 		 */
@@ -1564,7 +1564,7 @@
 		return;
 	}
 	/* Even if we have an encoder we may not have a connector */
-	if (!dev_priv->int_tv_support)
+	if (!dev_priv->vbt.int_tv_support)
 		return;
 
 	/*
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 7db592e..a9a0300 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm
-mgag200-y   := mgag200_main.o mgag200_mode.o \
+mgag200-y   := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
 	mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
 
 obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
new file mode 100644
index 0000000..801731a
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2013 Matrox Graphics
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Author: Christopher Harvey <charvey@matrox.com>
+ */
+
+#include <drm/drmP.h>
+#include "mgag200_drv.h"
+
+static bool warn_transparent = true;
+static bool warn_palette = true;
+
+/*
+  Hide the cursor off screen. We can't disable the cursor hardware because it
+  takes too long to re-activate and causes momentary corruption
+*/
+static void mga_hide_cursor(struct mga_device *mdev)
+{
+	WREG8(MGA_CURPOSXL, 0);
+	WREG8(MGA_CURPOSXH, 0);
+	mgag200_bo_unpin(mdev->cursor.pixels_1);
+	mgag200_bo_unpin(mdev->cursor.pixels_2);
+}
+
+int mga_crtc_cursor_set(struct drm_crtc *crtc,
+			struct drm_file *file_priv,
+			uint32_t handle,
+			uint32_t width,
+			uint32_t height)
+{
+	struct drm_device *dev = (struct drm_device *)file_priv->minor->dev;
+	struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+	struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1;
+	struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2;
+	struct mgag200_bo *pixels_current = mdev->cursor.pixels_current;
+	struct mgag200_bo *pixels_prev = mdev->cursor.pixels_prev;
+	struct drm_gem_object *obj;
+	struct mgag200_bo *bo = NULL;
+	int ret = 0;
+	unsigned int i, row, col;
+	uint32_t colour_set[16];
+	uint32_t *next_space = &colour_set[0];
+	uint32_t *palette_iter;
+	uint32_t this_colour;
+	bool found = false;
+	int colour_count = 0;
+	u64 gpu_addr;
+	u8 reg_index;
+	u8 this_row[48];
+
+	if (!pixels_1 || !pixels_2) {
+		WREG8(MGA_CURPOSXL, 0);
+		WREG8(MGA_CURPOSXH, 0);
+		return -ENOTSUPP; /* Didn't allocate space for cursors */
+	}
+
+	if ((width != 64 || height != 64) && handle) {
+		WREG8(MGA_CURPOSXL, 0);
+		WREG8(MGA_CURPOSXH, 0);
+		return -EINVAL;
+	}
+
+	BUG_ON(pixels_1 != pixels_current && pixels_1 != pixels_prev);
+	BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
+	BUG_ON(pixels_current == pixels_prev);
+
+	ret = mgag200_bo_reserve(pixels_1, true);
+	if (ret) {
+		WREG8(MGA_CURPOSXL, 0);
+		WREG8(MGA_CURPOSXH, 0);
+		return ret;
+	}
+	ret = mgag200_bo_reserve(pixels_2, true);
+	if (ret) {
+		WREG8(MGA_CURPOSXL, 0);
+		WREG8(MGA_CURPOSXH, 0);
+		mgag200_bo_unreserve(pixels_1);
+		return ret;
+	}
+
+	if (!handle) {
+		mga_hide_cursor(mdev);
+		ret = 0;
+		goto out1;
+	}
+
+	/* Move cursor buffers into VRAM if they aren't already */
+	if (!pixels_1->pin_count) {
+		ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
+				     &mdev->cursor.pixels_1_gpu_addr);
+		if (ret)
+			goto out1;
+	}
+	if (!pixels_2->pin_count) {
+		ret = mgag200_bo_pin(pixels_2, TTM_PL_FLAG_VRAM,
+				     &mdev->cursor.pixels_2_gpu_addr);
+		if (ret) {
+			mgag200_bo_unpin(pixels_1);
+			goto out1;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -ENOENT;
+		goto out1;
+	}
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	bo = gem_to_mga_bo(obj);
+	ret = mgag200_bo_reserve(bo, true);
+	if (ret) {
+		dev_err(&dev->pdev->dev, "failed to reserve user bo\n");
+		goto out1;
+	}
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			dev_err(&dev->pdev->dev, "failed to kmap user buffer updates\n");
+			goto out2;
+		}
+	}
+
+	memset(&colour_set[0], 0, sizeof(uint32_t)*16);
+	/* width*height*4 = 16384 */
+	for (i = 0; i < 16384; i += 4) {
+		this_colour = ioread32(bo->kmap.virtual + i);
+		/* No transparency */
+		if (this_colour>>24 != 0xff &&
+			this_colour>>24 != 0x0) {
+			if (warn_transparent) {
+				dev_info(&dev->pdev->dev, "Video card doesn't support cursors with partial transparency.\n");
+				dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
+				warn_transparent = false; /* Only tell the user once. */
+			}
+			ret = -EINVAL;
+			goto out3;
+		}
+		/* Don't need to store transparent pixels as colours */
+		if (this_colour>>24 == 0x0)
+			continue;
+		found = false;
+		for (palette_iter = &colour_set[0]; palette_iter != next_space; palette_iter++) {
+			if (*palette_iter == this_colour) {
+				found = true;
+				break;
+			}
+		}
+		if (found)
+			continue;
+		/* We only support 4bit paletted cursors */
+		if (colour_count >= 16) {
+			if (warn_palette) {
+				dev_info(&dev->pdev->dev, "Video card only supports cursors with up to 16 colours.\n");
+				dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
+				warn_palette = false; /* Only tell the user once. */
+			}
+			ret = -EINVAL;
+			goto out3;
+		}
+		*next_space = this_colour;
+		next_space++;
+		colour_count++;
+	}
+
+	/* Program colours from cursor icon into palette */
+	for (i = 0; i < colour_count; i++) {
+		if (i <= 2)
+			reg_index = 0x8 + i*0x4;
+		else
+			reg_index = 0x60 + i*0x3;
+		WREG_DAC(reg_index, colour_set[i] & 0xff);
+		WREG_DAC(reg_index+1, colour_set[i]>>8 & 0xff);
+		WREG_DAC(reg_index+2, colour_set[i]>>16 & 0xff);
+		BUG_ON((colour_set[i]>>24 & 0xff) != 0xff);
+	}
+
+	/* Map up-coming buffer to write colour indices */
+	if (!pixels_prev->kmap.virtual) {
+		ret = ttm_bo_kmap(&pixels_prev->bo, 0,
+				  pixels_prev->bo.num_pages,
+				  &pixels_prev->kmap);
+		if (ret) {
+			dev_err(&dev->pdev->dev, "failed to kmap cursor updates\n");
+			goto out3;
+		}
+	}
+
+	/* now write colour indices into hardware cursor buffer */
+	for (row = 0; row < 64; row++) {
+		memset(&this_row[0], 0, 48);
+		for (col = 0; col < 64; col++) {
+			this_colour = ioread32(bo->kmap.virtual + 4*(col + 64*row));
+			/* write transparent pixels */
+			if (this_colour>>24 == 0x0) {
+				this_row[47 - col/8] |= 0x80>>(col%8);
+				continue;
+			}
+
+			/* write colour index here */
+			for (i = 0; i < colour_count; i++) {
+				if (colour_set[i] == this_colour) {
+					if (col % 2)
+						this_row[col/2] |= i<<4;
+					else
+						this_row[col/2] |= i;
+					break;
+				}
+			}
+		}
+		memcpy_toio(pixels_prev->kmap.virtual + row*48, &this_row[0], 48);
+	}
+
+	/* Program gpu address of cursor buffer */
+	if (pixels_prev == pixels_1)
+		gpu_addr = mdev->cursor.pixels_1_gpu_addr;
+	else
+		gpu_addr = mdev->cursor.pixels_2_gpu_addr;
+	WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((gpu_addr>>10) & 0xff));
+	WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((gpu_addr>>18) & 0x3f));
+
+	/* Adjust cursor control register to turn on the cursor */
+	WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
+
+	/* Now swap internal buffer pointers */
+	if (mdev->cursor.pixels_1 == mdev->cursor.pixels_prev) {
+		mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+		mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+	} else if (mdev->cursor.pixels_1 == mdev->cursor.pixels_current) {
+		mdev->cursor.pixels_prev = mdev->cursor.pixels_1;
+		mdev->cursor.pixels_current = mdev->cursor.pixels_2;
+	} else {
+		BUG();
+	}
+	ret = 0;
+
+	ttm_bo_kunmap(&pixels_prev->kmap);
+ out3:
+	ttm_bo_kunmap(&bo->kmap);
+ out2:
+	mgag200_bo_unreserve(bo);
+ out1:
+	if (ret)
+		mga_hide_cursor(mdev);
+	mgag200_bo_unreserve(pixels_1);
+	mgag200_bo_unreserve(pixels_2);
+	return ret;
+}
+
+int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+	/* Our origin is at (64,64) */
+	x += 64;
+	y += 64;
+
+	BUG_ON(x <= 0);
+	BUG_ON(y <= 0);
+	BUG_ON(x & ~0xffff);
+	BUG_ON(y & ~0xffff);
+
+	WREG8(MGA_CURPOSXL, x & 0xff);
+	WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
+
+	WREG8(MGA_CURPOSYL, y & 0xff);
+	WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
+	return 0;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index bf29b2f..364a05a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -149,6 +149,21 @@
 	struct mga_i2c_chan *i2c;
 };
 
+struct mga_cursor {
+	/*
+	   We have to have 2 buffers for the cursor to avoid occasional
+	   corruption while switching cursor icons.
+	   If either of these is NULL, then don't do hardware cursors, and
+	   fall back to software.
+	*/
+	struct mgag200_bo *pixels_1;
+	struct mgag200_bo *pixels_2;
+	u64 pixels_1_gpu_addr, pixels_2_gpu_addr;
+	/* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
+	struct mgag200_bo *pixels_current;
+	/* The previously displayed icon */
+	struct mgag200_bo *pixels_prev;
+};
 
 struct mga_mc {
 	resource_size_t			vram_size;
@@ -181,6 +196,7 @@
 	struct mga_mode_info		mode_info;
 
 	struct mga_fbdev *mfbdev;
+	struct mga_cursor cursor;
 
 	bool				suspended;
 	int				num_crtc;
@@ -273,4 +289,9 @@
 int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
 int mgag200_bo_unpin(struct mgag200_bo *bo);
 int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+			   /* mgag200_cursor.c */
+int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+						uint32_t handle, uint32_t width, uint32_t height);
+int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+
 #endif				/* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9905923..6d6b598 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -209,7 +209,7 @@
 	r = mgag200_device_init(dev, flags);
 	if (r) {
 		dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
-		goto out;
+		return r;
 	}
 	r = mgag200_mm_init(mdev);
 	if (r)
@@ -221,8 +221,27 @@
 	dev->mode_config.prefer_shadow = 1;
 
 	r = mgag200_modeset_init(mdev);
-	if (r)
+	if (r) {
 		dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+		goto out;
+	}
+
+	/* Make small buffers to store a hardware cursor (double buffered icon updates) */
+	mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
+					  &mdev->cursor.pixels_1);
+	mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
+					  &mdev->cursor.pixels_2);
+	if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
+		goto cursor_nospace;
+	mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+	mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+	goto cursor_done;
+ cursor_nospace:
+	mdev->cursor.pixels_1 = NULL;
+	mdev->cursor.pixels_2 = NULL;
+	dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
+ cursor_done:
+
 out:
 	if (r)
 		mgag200_driver_unload(dev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ee66bad..5b1a9e7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1253,6 +1253,8 @@
 
 /* These provide the minimum set of functions required to handle a CRTC */
 static const struct drm_crtc_funcs mga_crtc_funcs = {
+	.cursor_set = mga_crtc_cursor_set,
+	.cursor_move = mga_crtc_cursor_move,
 	.gamma_set = mga_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = mga_crtc_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index fb24d86..3ae442a6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -235,7 +235,11 @@
 #define MGAREG_CRTCEXT_INDEX	0x1fde
 #define MGAREG_CRTCEXT_DATA	0x1fdf
 
-
+/* Cursor X and Y position */
+#define MGA_CURPOSXL 0x3c0c
+#define MGA_CURPOSXH 0x3c0d
+#define MGA_CURPOSYL 0x3c0e
+#define MGA_CURPOSYH 0x3c0f
 
 /* MGA bits for registers PCI_OPTION_REG */
 #define MGA1064_OPT_SYS_CLK_PCI   		( 0x00 << 0 )
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 401c989..0004f77 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -270,26 +270,20 @@
 		return ret;
 	}
 
-	mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
-				    pci_resource_len(dev->pdev, 0),
-				    DRM_MTRR_WC);
+	mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+					 pci_resource_len(dev->pdev, 0));
 
 	return 0;
 }
 
 void mgag200_mm_fini(struct mga_device *mdev)
 {
-	struct drm_device *dev = mdev->dev;
 	ttm_bo_device_release(&mdev->ttm.bdev);
 
 	mgag200_ttm_global_release(mdev);
 
-	if (mdev->fb_mtrr >= 0) {
-		drm_mtrr_del(mdev->fb_mtrr,
-			     pci_resource_start(dev->pdev, 0),
-			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
-		mdev->fb_mtrr = -1;
-	}
+	arch_phys_wc_del(mdev->fb_mtrr);
+	mdev->fb_mtrr = 0;
 }
 
 void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f19a15a3b..3da985e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -396,9 +396,8 @@
 		return ret;
 	}
 
-	drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
-				     pci_resource_len(dev->pdev, 1),
-				     DRM_MTRR_WC);
+	drm->ttm.mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 1),
+					 pci_resource_len(dev->pdev, 1));
 
 	/* GART init */
 	if (drm->agp.stat != ENABLED) {
@@ -433,10 +432,6 @@
 
 	nouveau_ttm_global_release(drm);
 
-	if (drm->ttm.mtrr >= 0) {
-		drm_mtrr_del(drm->ttm.mtrr,
-			     pci_resource_start(drm->dev->pdev, 1),
-			     pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
-		drm->ttm.mtrr = -1;
-	}
+	arch_phys_wc_del(drm->ttm.mtrr);
+	drm->ttm.mtrr = 0;
 }
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 79b200a..ef161ea9 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -253,10 +253,6 @@
 			NULL, NULL);
 }
 
-static void omap_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
 static void vblank_cb(void *arg)
 {
 	struct drm_crtc *crtc = arg;
@@ -366,7 +362,6 @@
 	.prepare = omap_crtc_prepare,
 	.commit = omap_crtc_commit,
 	.mode_set_base = omap_crtc_mode_set_base,
-	.load_lut = omap_crtc_load_lut,
 };
 
 const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b11ce60..002988d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -281,21 +281,7 @@
 	return ret;
 }
 
-static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
-		u16 red, u16 green, u16 blue, int regno)
-{
-	DBG("fbdev: set gamma");
-}
-
-static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
-		u16 *red, u16 *green, u16 *blue, int regno)
-{
-	DBG("fbdev: get gamma");
-}
-
 static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
-	.gamma_set = omap_crtc_fb_gamma_set,
-	.gamma_get = omap_crtc_fb_gamma_get,
 	.fb_probe = omap_fbdev_create,
 };
 
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index be7cd97..3256693 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -136,10 +136,6 @@
 	kunmap(pages[page_num]);
 }
 
-/*
- * TODO maybe we can split up drm_gem_mmap to avoid duplicating
- * some here.. or at least have a drm_dmabuf_mmap helper.
- */
 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
 		struct vm_area_struct *vma)
 {
@@ -149,31 +145,9 @@
 	if (WARN_ON(!obj->filp))
 		return -EINVAL;
 
-	/* Check for valid size. */
-	if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	if (!obj->dev->driver->gem_vm_ops) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_ops = obj->dev->driver->gem_vm_ops;
-	vma->vm_private_data = obj;
-	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
-	/* Take a ref for this mapping of the object, so that the fault
-	 * handler can dereference the mmap offset's pointer to the object.
-	 * This reference is cleaned up by the corresponding vm_close
-	 * (which should happen whether the vma was created by this call, or
-	 * by a vm_open due to mremap or partial unmap or whatever).
-	 */
-	vma->vm_ops->open(vma);
-
-out_unlock:
+	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
+	if (ret < 0)
+		return ret;
 
 	return omap_gem_mmap_obj(obj, vma);
 }
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 823d29e..5a6bfa2 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -222,12 +222,6 @@
 	return i - 1;
 }
 
-static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-			       u16 *blue, uint32_t start, uint32_t size)
-{
-	/* TODO */
-}
-
 static void qxl_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
@@ -399,7 +393,6 @@
 static const struct drm_crtc_funcs qxl_crtc_funcs = {
 	.cursor_set = qxl_crtc_cursor_set,
 	.cursor_move = qxl_crtc_cursor_move,
-	.gamma_set = qxl_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = qxl_crtc_destroy,
 };
@@ -619,18 +612,12 @@
 	DRM_DEBUG("\n");
 }
 
-static void qxl_crtc_load_lut(struct drm_crtc *crtc)
-{
-	DRM_DEBUG("\n");
-}
-
 static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
 	.dpms = qxl_crtc_dpms,
 	.mode_fixup = qxl_crtc_mode_fixup,
 	.mode_set = qxl_crtc_mode_set,
 	.prepare = qxl_crtc_prepare,
 	.commit = qxl_crtc_commit,
-	.load_lut = qxl_crtc_load_lut,
 };
 
 static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index b3c5127..4b955b0 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -520,10 +520,6 @@
 }
 
 static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
-	/* TODO
-	.gamma_set = qxl_crtc_fb_gamma_set,
-	.gamma_get = qxl_crtc_fb_gamma_get,
-	*/
 	.fb_probe = qxl_fb_find_or_create_single,
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1424ccd..07af5a9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -322,8 +322,8 @@
 {
 	/* Add an MTRR for the VRAM */
 	if (!rdev->fastfb_working) {
-		rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
-			MTRR_TYPE_WRCOMB, 1);
+		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
+						      rdev->mc.aper_size);
 	}
 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
 		rdev->mc.mc_vram_size >> 20,
@@ -336,6 +336,7 @@
 void radeon_bo_fini(struct radeon_device *rdev)
 {
 	radeon_ttm_fini(rdev);
+	arch_phys_wc_del(rdev->mc.vram_mtrr);
 }
 
 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
new file mode 100644
index 0000000..72887df
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -0,0 +1,9 @@
+config DRM_RCAR_DU
+	tristate "DRM Support for R-Car Display Unit"
+	depends on DRM && ARM
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	help
+	  Choose this option if you have an R-Car chipset.
+	  If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
new file mode 100644
index 0000000..7333c00
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -0,0 +1,8 @@
+rcar-du-drm-y := rcar_du_crtc.o \
+		 rcar_du_drv.o \
+		 rcar_du_kms.o \
+		 rcar_du_lvds.o \
+		 rcar_du_plane.o \
+		 rcar_du_vga.o
+
+obj-$(CONFIG_DRM_RCAR_DU)	+= rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
new file mode 100644
index 0000000..24183fb
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -0,0 +1,595 @@
+/*
+ * rcar_du_crtc.c  --  R-Car Display Unit CRTCs
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_lvds.h"
+#include "rcar_du_plane.h"
+#include "rcar_du_regs.h"
+#include "rcar_du_vga.h"
+
+#define to_rcar_crtc(c)	container_of(c, struct rcar_du_crtc, crtc)
+
+static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+
+	return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
+}
+
+static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+
+	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
+}
+
+static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+
+	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
+		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
+}
+
+static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+
+	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
+		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
+}
+
+static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
+				 u32 clr, u32 set)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+	u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
+
+	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
+}
+
+static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+{
+	struct drm_crtc *crtc = &rcrtc->crtc;
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+	const struct drm_display_mode *mode = &crtc->mode;
+	unsigned long clk;
+	u32 value;
+	u32 div;
+
+	/* Dot clock */
+	clk = clk_get_rate(rcdu->clock);
+	div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
+	div = clamp(div, 1U, 64U) - 1;
+
+	rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR,
+		      ESCR_DCLKSEL_CLKS | div);
+	rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0);
+
+	/* Signal polarities */
+	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
+	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+	      | DSMR_DIPM_DE;
+	rcar_du_crtc_write(rcrtc, DSMR, value);
+
+	/* Display timings */
+	rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
+	rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
+					mode->hdisplay - 19);
+	rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
+					mode->hsync_start - 1);
+	rcar_du_crtc_write(rcrtc, HCR,  mode->htotal - 1);
+
+	rcar_du_crtc_write(rcrtc, VDSR, mode->vtotal - mode->vsync_end - 2);
+	rcar_du_crtc_write(rcrtc, VDER, mode->vtotal - mode->vsync_end +
+					mode->vdisplay - 2);
+	rcar_du_crtc_write(rcrtc, VSPR, mode->vtotal - mode->vsync_end +
+					mode->vsync_start - 1);
+	rcar_du_crtc_write(rcrtc, VCR,  mode->vtotal - 1);
+
+	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start);
+	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
+}
+
+static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+	u32 dorcr = rcar_du_read(rcdu, DORCR);
+
+	dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
+
+	/* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
+	 * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
+	 * default.
+	 */
+	if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
+		dorcr |= DORCR_PG2D_DS1;
+	else
+		dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
+
+	rcar_du_write(rcdu, DORCR, dorcr);
+}
+
+static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
+{
+	rcar_du_write(rcdu, DSYSR,
+		      (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
+		      (start ? DSYSR_DEN : DSYSR_DRES));
+}
+
+static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
+{
+	/* Many of the configuration bits are only updated when the display
+	 * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
+	 * of those bits could be pre-configured, but others (especially the
+	 * bits related to plane assignment to display timing controllers) need
+	 * to be modified at runtime.
+	 *
+	 * Restart the display controller if a start is requested. Sorry for the
+	 * flicker. It should be possible to move most of the "DRES-update" bits
+	 * setup to driver initialization time and minimize the number of cases
+	 * when the display controller will have to be restarted.
+	 */
+	if (start) {
+		if (rcdu->used_crtcs++ != 0)
+			__rcar_du_start_stop(rcdu, false);
+		__rcar_du_start_stop(rcdu, true);
+	} else {
+		if (--rcdu->used_crtcs == 0)
+			__rcar_du_start_stop(rcdu, false);
+	}
+}
+
+void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
+{
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+	/* Store the route from the CRTC output to the DU output. The DU will be
+	 * configured when starting the CRTC.
+	 */
+	rcrtc->outputs |= 1 << output;
+}
+
+void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
+{
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+	struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
+	unsigned int num_planes = 0;
+	unsigned int prio = 0;
+	unsigned int i;
+	u32 dptsr = 0;
+	u32 dspr = 0;
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
+		struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+		unsigned int j;
+
+		if (plane->crtc != &rcrtc->crtc || !plane->enabled)
+			continue;
+
+		/* Insert the plane in the sorted planes array. */
+		for (j = num_planes++; j > 0; --j) {
+			if (planes[j-1]->zpos <= plane->zpos)
+				break;
+			planes[j] = planes[j-1];
+		}
+
+		planes[j] = plane;
+		prio += plane->format->planes * 4;
+	}
+
+	for (i = 0; i < num_planes; ++i) {
+		struct rcar_du_plane *plane = planes[i];
+		unsigned int index = plane->hwindex;
+
+		prio -= 4;
+		dspr |= (index + 1) << prio;
+		dptsr |= DPTSR_PnDK(index) |  DPTSR_PnTS(index);
+
+		if (plane->format->planes == 2) {
+			index = (index + 1) % 8;
+
+			prio -= 4;
+			dspr |= (index + 1) << prio;
+			dptsr |= DPTSR_PnDK(index) |  DPTSR_PnTS(index);
+		}
+	}
+
+	/* Select display timing and dot clock generator 2 for planes associated
+	 * with superposition controller 2.
+	 */
+	if (rcrtc->index) {
+		u32 value = rcar_du_read(rcdu, DPTSR);
+
+		/* The DPTSR register is updated when the display controller is
+		 * stopped. We thus need to restart the DU. Once again, sorry
+		 * for the flicker. One way to mitigate the issue would be to
+		 * pre-associate planes with CRTCs (either with a fixed 4/4
+		 * split, or through a module parameter). Flicker would then
+		 * occur only if we need to break the pre-association.
+		 */
+		if (value != dptsr) {
+			rcar_du_write(rcdu, DPTSR, dptsr);
+			if (rcdu->used_crtcs) {
+				__rcar_du_start_stop(rcdu, false);
+				__rcar_du_start_stop(rcdu, true);
+			}
+		}
+	}
+
+	rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr);
+}
+
+static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
+{
+	struct drm_crtc *crtc = &rcrtc->crtc;
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+	unsigned int i;
+
+	if (rcrtc->started)
+		return;
+
+	if (WARN_ON(rcrtc->plane->format == NULL))
+		return;
+
+	/* Set display off and background to black */
+	rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
+	rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
+
+	/* Configure display timings and output routing */
+	rcar_du_crtc_set_display_timing(rcrtc);
+	rcar_du_crtc_set_routing(rcrtc);
+
+	mutex_lock(&rcdu->planes.lock);
+	rcrtc->plane->enabled = true;
+	rcar_du_crtc_update_planes(crtc);
+	mutex_unlock(&rcdu->planes.lock);
+
+	/* Setup planes. */
+	for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
+		struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+
+		if (plane->crtc != crtc || !plane->enabled)
+			continue;
+
+		rcar_du_plane_setup(plane);
+	}
+
+	/* Select master sync mode. This enables display operation in master
+	 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
+	 * actively driven).
+	 */
+	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
+
+	rcar_du_start_stop(rcdu, true);
+
+	rcrtc->started = true;
+}
+
+static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
+{
+	struct drm_crtc *crtc = &rcrtc->crtc;
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+
+	if (!rcrtc->started)
+		return;
+
+	mutex_lock(&rcdu->planes.lock);
+	rcrtc->plane->enabled = false;
+	rcar_du_crtc_update_planes(crtc);
+	mutex_unlock(&rcdu->planes.lock);
+
+	/* Select switch sync mode. This stops display operation and configures
+	 * the HSYNC and VSYNC signals as inputs.
+	 */
+	rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
+
+	rcar_du_start_stop(rcdu, false);
+
+	rcrtc->started = false;
+}
+
+void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+
+	rcar_du_crtc_stop(rcrtc);
+	rcar_du_put(rcdu);
+}
+
+void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
+{
+	struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+
+	if (rcrtc->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	rcar_du_get(rcdu);
+	rcar_du_crtc_start(rcrtc);
+}
+
+static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
+{
+	struct drm_crtc *crtc = &rcrtc->crtc;
+
+	rcar_du_plane_compute_base(rcrtc->plane, crtc->fb);
+	rcar_du_plane_update_base(rcrtc->plane);
+}
+
+static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+	if (rcrtc->dpms == mode)
+		return;
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		rcar_du_get(rcdu);
+		rcar_du_crtc_start(rcrtc);
+	} else {
+		rcar_du_crtc_stop(rcrtc);
+		rcar_du_put(rcdu);
+	}
+
+	rcrtc->dpms = mode;
+}
+
+static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	/* TODO Fixup modes */
+	return true;
+}
+
+static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
+{
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+	/* We need to access the hardware during mode set, acquire a reference
+	 * to the DU.
+	 */
+	rcar_du_get(rcdu);
+
+	/* Stop the CRTC and release the plane. Force the DPMS mode to off as a
+	 * result.
+	 */
+	rcar_du_crtc_stop(rcrtc);
+	rcar_du_plane_release(rcrtc->plane);
+
+	rcrtc->dpms = DRM_MODE_DPMS_OFF;
+}
+
+static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode,
+				 int x, int y,
+				 struct drm_framebuffer *old_fb)
+{
+	struct rcar_du_device *rcdu = crtc->dev->dev_private;
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+	const struct rcar_du_format_info *format;
+	int ret;
+
+	format = rcar_du_format_info(crtc->fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(rcdu->dev, "mode_set: unsupported format %08x\n",
+			crtc->fb->pixel_format);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = rcar_du_plane_reserve(rcrtc->plane, format);
+	if (ret < 0)
+		goto error;
+
+	rcrtc->plane->format = format;
+	rcrtc->plane->pitch = crtc->fb->pitches[0];
+
+	rcrtc->plane->src_x = x;
+	rcrtc->plane->src_y = y;
+	rcrtc->plane->width = mode->hdisplay;
+	rcrtc->plane->height = mode->vdisplay;
+
+	rcar_du_plane_compute_base(rcrtc->plane, crtc->fb);
+
+	rcrtc->outputs = 0;
+
+	return 0;
+
+error:
+	/* There's no rollback/abort operation to clean up in case of error. We
+	 * thus need to release the reference to the DU acquired in prepare()
+	 * here.
+	 */
+	rcar_du_put(rcdu);
+	return ret;
+}
+
+static void rcar_du_crtc_mode_commit(struct drm_crtc *crtc)
+{
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+	/* We're done, restart the CRTC and set the DPMS mode to on. The
+	 * reference to the DU acquired at prepare() time will thus be released
+	 * by the DPMS handler (possibly called by the disable() handler).
+	 */
+	rcar_du_crtc_start(rcrtc);
+	rcrtc->dpms = DRM_MODE_DPMS_ON;
+}
+
+static int rcar_du_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+				      struct drm_framebuffer *old_fb)
+{
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+	rcrtc->plane->src_x = x;
+	rcrtc->plane->src_y = y;
+
+	rcar_du_crtc_update_base(to_rcar_crtc(crtc));
+
+	return 0;
+}
+
+static void rcar_du_crtc_disable(struct drm_crtc *crtc)
+{
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+	rcar_du_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	rcar_du_plane_release(rcrtc->plane);
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+	.dpms = rcar_du_crtc_dpms,
+	.mode_fixup = rcar_du_crtc_mode_fixup,
+	.prepare = rcar_du_crtc_mode_prepare,
+	.commit = rcar_du_crtc_mode_commit,
+	.mode_set = rcar_du_crtc_mode_set,
+	.mode_set_base = rcar_du_crtc_mode_set_base,
+	.disable = rcar_du_crtc_disable,
+};
+
+void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
+				   struct drm_file *file)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = rcrtc->crtc.dev;
+	unsigned long flags;
+
+	/* Destroy the pending vertical blanking event associated with the
+	 * pending page flip, if any, and disable vertical blanking interrupts.
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = rcrtc->event;
+	if (event && event->base.file_priv == file) {
+		rcrtc->event = NULL;
+		event->base.destroy(&event->base);
+		drm_vblank_put(dev, rcrtc->index);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = rcrtc->crtc.dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = rcrtc->event;
+	rcrtc->event = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (event == NULL)
+		return;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	drm_send_vblank_event(dev, rcrtc->index, event);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	drm_vblank_put(dev, rcrtc->index);
+}
+
+static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_pending_vblank_event *event)
+{
+	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+	struct drm_device *dev = rcrtc->crtc.dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (rcrtc->event != NULL) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	crtc->fb = fb;
+	rcar_du_crtc_update_base(rcrtc);
+
+	if (event) {
+		event->pipe = rcrtc->index;
+		drm_vblank_get(dev, rcrtc->index);
+		spin_lock_irqsave(&dev->event_lock, flags);
+		rcrtc->event = event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	return 0;
+}
+
+static const struct drm_crtc_funcs crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = rcar_du_crtc_page_flip,
+};
+
+int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
+{
+	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
+	struct drm_crtc *crtc = &rcrtc->crtc;
+	int ret;
+
+	rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0;
+	rcrtc->index = index;
+	rcrtc->dpms = DRM_MODE_DPMS_OFF;
+	rcrtc->plane = &rcdu->planes.planes[index];
+
+	rcrtc->plane->crtc = crtc;
+
+	ret = drm_crtc_init(rcdu->ddev, crtc, &crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+	return 0;
+}
+
+void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
+{
+	if (enable) {
+		rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
+		rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
+	} else {
+		rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
+	}
+}
+
+void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
+{
+	u32 status;
+
+	status = rcar_du_crtc_read(rcrtc, DSSR);
+	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
+
+	if (status & DSSR_VBK) {
+		drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+		rcar_du_crtc_finish_page_flip(rcrtc);
+	}
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
new file mode 100644
index 0000000..2a0365b
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -0,0 +1,50 @@
+/*
+ * rcar_du_crtc.h  --  R-Car Display Unit CRTCs
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_CRTC_H__
+#define __RCAR_DU_CRTC_H__
+
+#include <linux/mutex.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct rcar_du_device;
+struct rcar_du_plane;
+
+struct rcar_du_crtc {
+	struct drm_crtc crtc;
+
+	unsigned int mmio_offset;
+	unsigned int index;
+	bool started;
+
+	struct drm_pending_vblank_event *event;
+	unsigned int outputs;
+	int dpms;
+
+	struct rcar_du_plane *plane;
+};
+
+int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index);
+void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
+void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
+void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
+				   struct drm_file *file);
+void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
+void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
+
+void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output);
+void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
+
+#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
new file mode 100644
index 0000000..003b34e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -0,0 +1,325 @@
+/*
+ * rcar_du_drv.c  --  R-Car Display Unit DRM driver
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Core device operations
+ */
+
+/*
+ * rcar_du_get - Acquire a reference to the DU
+ *
+ * Acquiring a reference enables the device clock and setup core registers. A
+ * reference must be held before accessing any hardware registers.
+ *
+ * This function must be called with the DRM mode_config lock held.
+ *
+ * Return 0 in case of success or a negative error code otherwise.
+ */
+int rcar_du_get(struct rcar_du_device *rcdu)
+{
+	int ret;
+
+	if (rcdu->use_count)
+		goto done;
+
+	/* Enable clocks before accessing the hardware. */
+	ret = clk_prepare_enable(rcdu->clock);
+	if (ret < 0)
+		return ret;
+
+	/* Enable extended features */
+	rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
+	rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
+	rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
+	rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
+	rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
+
+	/* Use DS1PR and DS2PR to configure planes priorities and connects the
+	 * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
+	 */
+	rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
+
+done:
+	rcdu->use_count++;
+	return 0;
+}
+
+/*
+ * rcar_du_put - Release a reference to the DU
+ *
+ * Releasing the last reference disables the device clock.
+ *
+ * This function must be called with the DRM mode_config lock held.
+ */
+void rcar_du_put(struct rcar_du_device *rcdu)
+{
+	if (--rcdu->use_count)
+		return;
+
+	clk_disable_unprepare(rcdu->clock);
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+static int rcar_du_unload(struct drm_device *dev)
+{
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+static int rcar_du_load(struct drm_device *dev, unsigned long flags)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
+	struct rcar_du_device *rcdu;
+	struct resource *ioarea;
+	struct resource *mem;
+	int ret;
+
+	if (pdata == NULL) {
+		dev_err(dev->dev, "no platform data\n");
+		return -ENODEV;
+	}
+
+	rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
+	if (rcdu == NULL) {
+		dev_err(dev->dev, "failed to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	rcdu->dev = &pdev->dev;
+	rcdu->pdata = pdata;
+	rcdu->ddev = dev;
+	dev->dev_private = rcdu;
+
+	/* I/O resources and clocks */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem == NULL) {
+		dev_err(&pdev->dev, "failed to get memory resource\n");
+		return -EINVAL;
+	}
+
+	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
+					 resource_size(mem), pdev->name);
+	if (ioarea == NULL) {
+		dev_err(&pdev->dev, "failed to request memory region\n");
+		return -EBUSY;
+	}
+
+	rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
+					  resource_size(ioarea));
+	if (rcdu->mmio == NULL) {
+		dev_err(&pdev->dev, "failed to remap memory resource\n");
+		return -ENOMEM;
+	}
+
+	rcdu->clock = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(rcdu->clock)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return -ENOENT;
+	}
+
+	/* DRM/KMS objects */
+	ret = rcar_du_modeset_init(rcdu);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize DRM/KMS\n");
+		goto done;
+	}
+
+	/* IRQ and vblank handling */
+	ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize vblank\n");
+		goto done;
+	}
+
+	ret = drm_irq_install(dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to install IRQ handler\n");
+		goto done;
+	}
+
+	platform_set_drvdata(pdev, rcdu);
+
+done:
+	if (ret)
+		rcar_du_unload(dev);
+
+	return ret;
+}
+
+static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct rcar_du_device *rcdu = dev->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+		rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
+}
+
+static irqreturn_t rcar_du_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct rcar_du_device *rcdu = dev->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+		rcar_du_crtc_irq(&rcdu->crtcs[i]);
+
+	return IRQ_HANDLED;
+}
+
+static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct rcar_du_device *rcdu = dev->dev_private;
+
+	rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], true);
+
+	return 0;
+}
+
+static void rcar_du_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct rcar_du_device *rcdu = dev->dev_private;
+
+	rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], false);
+}
+
+static const struct file_operations rcar_du_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.fasync		= drm_fasync,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver rcar_du_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+				| DRIVER_PRIME,
+	.load			= rcar_du_load,
+	.unload			= rcar_du_unload,
+	.preclose		= rcar_du_preclose,
+	.irq_handler		= rcar_du_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= rcar_du_enable_vblank,
+	.disable_vblank		= rcar_du_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_cma_dmabuf_import,
+	.gem_prime_export	= drm_gem_cma_dmabuf_export,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_cma_dumb_destroy,
+	.fops			= &rcar_du_fops,
+	.name			= "rcar-du",
+	.desc			= "Renesas R-Car Display Unit",
+	.date			= "20130110",
+	.major			= 1,
+	.minor			= 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#if CONFIG_PM_SLEEP
+static int rcar_du_pm_suspend(struct device *dev)
+{
+	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
+
+	drm_kms_helper_poll_disable(rcdu->ddev);
+	/* TODO Suspend the CRTC */
+
+	return 0;
+}
+
+static int rcar_du_pm_resume(struct device *dev)
+{
+	struct rcar_du_device *rcdu = dev_get_drvdata(dev);
+
+	/* TODO Resume the CRTC */
+
+	drm_kms_helper_poll_enable(rcdu->ddev);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops rcar_du_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(rcar_du_pm_suspend, rcar_du_pm_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static int rcar_du_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&rcar_du_driver, pdev);
+}
+
+static int rcar_du_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&rcar_du_driver, pdev);
+
+	return 0;
+}
+
+static struct platform_driver rcar_du_platform_driver = {
+	.probe		= rcar_du_probe,
+	.remove		= rcar_du_remove,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "rcar-du",
+		.pm	= &rcar_du_pm_ops,
+	},
+};
+
+module_platform_driver(rcar_du_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas R-Car Display Unit DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
new file mode 100644
index 0000000..193cc59
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -0,0 +1,66 @@
+/*
+ * rcar_du_drv.h  --  R-Car Display Unit DRM driver
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_DRV_H__
+#define __RCAR_DU_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/rcar-du.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_plane.h"
+
+struct clk;
+struct device;
+struct drm_device;
+
+struct rcar_du_device {
+	struct device *dev;
+	const struct rcar_du_platform_data *pdata;
+
+	void __iomem *mmio;
+	struct clk *clock;
+	unsigned int use_count;
+
+	struct drm_device *ddev;
+
+	struct rcar_du_crtc crtcs[2];
+	unsigned int used_crtcs;
+	unsigned int num_crtcs;
+
+	struct {
+		struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
+		unsigned int free;
+		struct mutex lock;
+
+		struct drm_property *alpha;
+		struct drm_property *colorkey;
+		struct drm_property *zpos;
+	} planes;
+};
+
+int rcar_du_get(struct rcar_du_device *rcdu);
+void rcar_du_put(struct rcar_du_device *rcdu);
+
+static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
+{
+	return ioread32(rcdu->mmio + reg);
+}
+
+static inline void rcar_du_write(struct rcar_du_device *rcdu, u32 reg, u32 data)
+{
+	iowrite32(data, rcdu->mmio + reg);
+}
+
+#endif /* __RCAR_DU_DRV_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
new file mode 100644
index 0000000..9c63f39
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -0,0 +1,245 @@
+/*
+ * rcar_du_kms.c  --  R-Car Display Unit Mode Setting
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_lvds.h"
+#include "rcar_du_regs.h"
+#include "rcar_du_vga.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct rcar_du_format_info rcar_du_format_infos[] = {
+	{
+		.fourcc = DRM_FORMAT_RGB565,
+		.bpp = 16,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		.fourcc = DRM_FORMAT_ARGB1555,
+		.bpp = 16,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		.fourcc = DRM_FORMAT_XRGB1555,
+		.bpp = 16,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		.fourcc = DRM_FORMAT_XRGB8888,
+		.bpp = 32,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
+		.edf = PnDDCR4_EDF_RGB888,
+	}, {
+		.fourcc = DRM_FORMAT_ARGB8888,
+		.bpp = 32,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
+		.edf = PnDDCR4_EDF_ARGB8888,
+	}, {
+		.fourcc = DRM_FORMAT_UYVY,
+		.bpp = 16,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		.fourcc = DRM_FORMAT_YUYV,
+		.bpp = 16,
+		.planes = 1,
+		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		.fourcc = DRM_FORMAT_NV12,
+		.bpp = 12,
+		.planes = 2,
+		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		.fourcc = DRM_FORMAT_NV21,
+		.bpp = 12,
+		.planes = 2,
+		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
+		.edf = PnDDCR4_EDF_NONE,
+	}, {
+		/* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
+		.fourcc = DRM_FORMAT_NV16,
+		.bpp = 16,
+		.planes = 2,
+		.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
+		.edf = PnDDCR4_EDF_NONE,
+	},
+};
+
+const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
+		if (rcar_du_format_infos[i].fourcc == fourcc)
+			return &rcar_du_format_infos[i];
+	}
+
+	return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Common connector and encoder functions
+ */
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector)
+{
+	struct rcar_du_connector *rcon = to_rcar_connector(connector);
+
+	return &rcon->encoder->encoder;
+}
+
+void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+}
+
+void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+	rcar_du_crtc_route_output(encoder->crtc, renc->output);
+}
+
+void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
+{
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+static struct drm_framebuffer *
+rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+		  struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	const struct rcar_du_format_info *format;
+
+	format = rcar_du_format_info(mode_cmd->pixel_format);
+	if (format == NULL) {
+		dev_dbg(dev->dev, "unsupported pixel format %08x\n",
+			mode_cmd->pixel_format);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mode_cmd->pitches[0] & 15 || mode_cmd->pitches[0] >= 8192) {
+		dev_dbg(dev->dev, "invalid pitch value %u\n",
+			mode_cmd->pitches[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (format->planes == 2) {
+		if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
+			dev_dbg(dev->dev,
+				"luma and chroma pitches do not match\n");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
+	.fb_create = rcar_du_fb_create,
+};
+
+int rcar_du_modeset_init(struct rcar_du_device *rcdu)
+{
+	struct drm_device *dev = rcdu->ddev;
+	struct drm_encoder *encoder;
+	unsigned int i;
+	int ret;
+
+	drm_mode_config_init(rcdu->ddev);
+
+	rcdu->ddev->mode_config.min_width = 0;
+	rcdu->ddev->mode_config.min_height = 0;
+	rcdu->ddev->mode_config.max_width = 4095;
+	rcdu->ddev->mode_config.max_height = 2047;
+	rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs;
+
+	ret = rcar_du_plane_init(rcdu);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+		rcar_du_crtc_create(rcdu, i);
+
+	rcdu->used_crtcs = 0;
+	rcdu->num_crtcs = i;
+
+	for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
+		const struct rcar_du_encoder_data *pdata =
+			&rcdu->pdata->encoders[i];
+
+		if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) {
+			dev_warn(rcdu->dev,
+				 "encoder %u references unexisting output %u, skipping\n",
+				 i, pdata->output);
+			continue;
+		}
+
+		switch (pdata->encoder) {
+		case RCAR_DU_ENCODER_VGA:
+			rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
+			break;
+
+		case RCAR_DU_ENCODER_LVDS:
+			rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	/* Set the possible CRTCs and possible clones. All encoders can be
+	 * driven by the CRTC associated with the output they're connected to,
+	 * as well as by CRTC 0.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+		encoder->possible_crtcs = (1 << 0) | (1 << renc->output);
+		encoder->possible_clones = 1 << 0;
+	}
+
+	ret = rcar_du_plane_register(rcdu);
+	if (ret < 0)
+		return ret;
+
+	drm_kms_helper_poll_init(rcdu->ddev);
+
+	drm_helper_disable_unused_functions(rcdu->ddev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
new file mode 100644
index 0000000..e4d8db0
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -0,0 +1,59 @@
+/*
+ * rcar_du_kms.h  --  R-Car Display Unit Mode Setting
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_KMS_H__
+#define __RCAR_DU_KMS_H__
+
+#include <linux/types.h>
+
+#include <drm/drm_crtc.h>
+
+struct rcar_du_device;
+
+struct rcar_du_format_info {
+	u32 fourcc;
+	unsigned int bpp;
+	unsigned int planes;
+	unsigned int pnmr;
+	unsigned int edf;
+};
+
+struct rcar_du_encoder {
+	struct drm_encoder encoder;
+	unsigned int output;
+};
+
+#define to_rcar_encoder(e) \
+	container_of(e, struct rcar_du_encoder, encoder)
+
+struct rcar_du_connector {
+	struct drm_connector connector;
+	struct rcar_du_encoder *encoder;
+};
+
+#define to_rcar_connector(c) \
+	container_of(c, struct rcar_du_connector, connector)
+
+const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector);
+void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
+void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode);
+void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
+
+int rcar_du_modeset_init(struct rcar_du_device *rcdu);
+
+#endif /* __RCAR_DU_KMS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
new file mode 100644
index 0000000..7aefe726
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
@@ -0,0 +1,216 @@
+/*
+ * rcar_du_lvds.c  --  R-Car Display Unit LVDS Encoder and Connector
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_lvds.h"
+
+struct rcar_du_lvds_connector {
+	struct rcar_du_connector connector;
+
+	const struct rcar_du_panel_data *panel;
+};
+
+#define to_rcar_lvds_connector(c) \
+	container_of(c, struct rcar_du_lvds_connector, connector.connector)
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
+{
+	struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (mode == NULL)
+		return 0;
+
+	mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+	mode->clock = lvdscon->panel->mode.clock;
+	mode->hdisplay = lvdscon->panel->mode.hdisplay;
+	mode->hsync_start = lvdscon->panel->mode.hsync_start;
+	mode->hsync_end = lvdscon->panel->mode.hsync_end;
+	mode->htotal = lvdscon->panel->mode.htotal;
+	mode->vdisplay = lvdscon->panel->mode.vdisplay;
+	mode->vsync_start = lvdscon->panel->mode.vsync_start;
+	mode->vsync_end = lvdscon->panel->mode.vsync_end;
+	mode->vtotal = lvdscon->panel->mode.vtotal;
+	mode->flags = lvdscon->panel->mode.flags;
+
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static int rcar_du_lvds_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = rcar_du_lvds_connector_get_modes,
+	.mode_valid = rcar_du_lvds_connector_mode_valid,
+	.best_encoder = rcar_du_connector_best_encoder,
+};
+
+static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = rcar_du_lvds_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = rcar_du_lvds_connector_destroy,
+};
+
+static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+				       struct rcar_du_encoder *renc,
+				       const struct rcar_du_panel_data *panel)
+{
+	struct rcar_du_lvds_connector *lvdscon;
+	struct drm_connector *connector;
+	int ret;
+
+	lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
+	if (lvdscon == NULL)
+		return -ENOMEM;
+
+	lvdscon->panel = panel;
+
+	connector = &lvdscon->connector.connector;
+	connector->display_info.width_mm = panel->width_mm;
+	connector->display_info.height_mm = panel->height_mm;
+
+	ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_sysfs_connector_add(connector);
+	if (ret < 0)
+		return ret;
+
+	drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	drm_object_property_set_value(&connector->base,
+		rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+	ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+	if (ret < 0)
+		return ret;
+
+	connector->encoder = &renc->encoder;
+	lvdscon->connector.encoder = renc;
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
+					   const struct drm_display_mode *mode,
+					   struct drm_display_mode *adjusted_mode)
+{
+	const struct drm_display_mode *panel_mode;
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+	bool found = false;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		dev_dbg(dev->dev, "mode_fixup: no connector found\n");
+		return false;
+	}
+
+	if (list_empty(&connector->modes)) {
+		dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+		return false;
+	}
+
+	panel_mode = list_first_entry(&connector->modes,
+				      struct drm_display_mode, head);
+
+	/* We're not allowed to modify the resolution. */
+	if (mode->hdisplay != panel_mode->hdisplay ||
+	    mode->vdisplay != panel_mode->vdisplay)
+		return false;
+
+	/* The flat panel mode is fixed, just copy it to the adjusted mode. */
+	drm_mode_copy(adjusted_mode, panel_mode);
+
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = rcar_du_lvds_encoder_dpms,
+	.mode_fixup = rcar_du_lvds_encoder_mode_fixup,
+	.prepare = rcar_du_encoder_mode_prepare,
+	.commit = rcar_du_encoder_mode_commit,
+	.mode_set = rcar_du_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+int rcar_du_lvds_init(struct rcar_du_device *rcdu,
+		      const struct rcar_du_encoder_lvds_data *data,
+		      unsigned int output)
+{
+	struct rcar_du_encoder *renc;
+	int ret;
+
+	renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
+	if (renc == NULL)
+		return -ENOMEM;
+
+	renc->output = output;
+
+	ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+
+	return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
new file mode 100644
index 0000000..b47f832
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
@@ -0,0 +1,24 @@
+/*
+ * rcar_du_lvds.h  --  R-Car Display Unit LVDS Encoder and Connector
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_LVDS_H__
+#define __RCAR_DU_LVDS_H__
+
+struct rcar_du_device;
+struct rcar_du_encoder_lvds_data;
+
+int rcar_du_lvds_init(struct rcar_du_device *rcdu,
+		      const struct rcar_du_encoder_lvds_data *data,
+		      unsigned int output);
+
+#endif /* __RCAR_DU_LVDS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
new file mode 100644
index 0000000..a65f81d
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -0,0 +1,507 @@
+/*
+ * rcar_du_plane.c  --  R-Car Display Unit Planes
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_plane.h"
+#include "rcar_du_regs.h"
+
+#define RCAR_DU_COLORKEY_NONE		(0 << 24)
+#define RCAR_DU_COLORKEY_SOURCE		(1 << 24)
+#define RCAR_DU_COLORKEY_MASK		(1 << 24)
+
+struct rcar_du_kms_plane {
+	struct drm_plane plane;
+	struct rcar_du_plane *hwplane;
+};
+
+static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
+{
+	return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
+}
+
+static u32 rcar_du_plane_read(struct rcar_du_device *rcdu,
+			      unsigned int index, u32 reg)
+{
+	return rcar_du_read(rcdu, index * PLANE_OFF + reg);
+}
+
+static void rcar_du_plane_write(struct rcar_du_device *rcdu,
+				unsigned int index, u32 reg, u32 data)
+{
+	rcar_du_write(rcdu, index * PLANE_OFF + reg, data);
+}
+
+int rcar_du_plane_reserve(struct rcar_du_plane *plane,
+			  const struct rcar_du_format_info *format)
+{
+	struct rcar_du_device *rcdu = plane->dev;
+	unsigned int i;
+	int ret = -EBUSY;
+
+	mutex_lock(&rcdu->planes.lock);
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
+		if (!(rcdu->planes.free & (1 << i)))
+			continue;
+
+		if (format->planes == 1 ||
+		    rcdu->planes.free & (1 << ((i + 1) % 8)))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(rcdu->planes.planes))
+		goto done;
+
+	rcdu->planes.free &= ~(1 << i);
+	if (format->planes == 2)
+		rcdu->planes.free &= ~(1 << ((i + 1) % 8));
+
+	plane->hwindex = i;
+
+	ret = 0;
+
+done:
+	mutex_unlock(&rcdu->planes.lock);
+	return ret;
+}
+
+void rcar_du_plane_release(struct rcar_du_plane *plane)
+{
+	struct rcar_du_device *rcdu = plane->dev;
+
+	if (plane->hwindex == -1)
+		return;
+
+	mutex_lock(&rcdu->planes.lock);
+	rcdu->planes.free |= 1 << plane->hwindex;
+	if (plane->format->planes == 2)
+		rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8);
+	mutex_unlock(&rcdu->planes.lock);
+
+	plane->hwindex = -1;
+}
+
+void rcar_du_plane_update_base(struct rcar_du_plane *plane)
+{
+	struct rcar_du_device *rcdu = plane->dev;
+	unsigned int index = plane->hwindex;
+
+	/* According to the datasheet the Y position is expressed in raster line
+	 * units. However, 32bpp formats seem to require a doubled Y position
+	 * value. Similarly, for the second plane, NV12 and NV21 formats seem to
+	 * require a halved Y position value.
+	 */
+	rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
+	rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+			    (plane->format->bpp == 32 ? 2 : 1));
+	rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]);
+
+	if (plane->format->planes == 2) {
+		index = (index + 1) % 8;
+
+		rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
+		rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+				    (plane->format->bpp == 16 ? 2 : 1) / 2);
+		rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]);
+	}
+}
+
+void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
+				struct drm_framebuffer *fb)
+{
+	struct drm_gem_cma_object *gem;
+
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	plane->dma[0] = gem->paddr + fb->offsets[0];
+
+	if (plane->format->planes == 2) {
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		plane->dma[1] = gem->paddr + fb->offsets[1];
+	}
+}
+
+static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
+				     unsigned int index)
+{
+	struct rcar_du_device *rcdu = plane->dev;
+	u32 colorkey;
+	u32 pnmr;
+
+	/* The PnALPHAR register controls alpha-blending in 16bpp formats
+	 * (ARGB1555 and XRGB1555).
+	 *
+	 * For ARGB, set the alpha value to 0, and enable alpha-blending when
+	 * the A bit is 0. This maps A=0 to alpha=0 and A=1 to alpha=255.
+	 *
+	 * For XRGB, set the alpha value to the plane-wide alpha value and
+	 * enable alpha-blending regardless of the X bit value.
+	 */
+	if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
+		rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0);
+	else
+		rcar_du_plane_write(rcdu, index, PnALPHAR,
+				    PnALPHAR_ABIT_X | plane->alpha);
+
+	pnmr = PnMR_BM_MD | plane->format->pnmr;
+
+	/* Disable color keying when requested. YUV formats have the
+	 * PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
+	 * automatically.
+	 */
+	if ((plane->colorkey & RCAR_DU_COLORKEY_MASK) == RCAR_DU_COLORKEY_NONE)
+		pnmr |= PnMR_SPIM_TP_OFF;
+
+	/* For packed YUV formats we need to select the U/V order. */
+	if (plane->format->fourcc == DRM_FORMAT_YUYV)
+		pnmr |= PnMR_YCDF_YUYV;
+
+	rcar_du_plane_write(rcdu, index, PnMR, pnmr);
+
+	switch (plane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+		colorkey = ((plane->colorkey & 0xf80000) >> 8)
+			 | ((plane->colorkey & 0x00fc00) >> 5)
+			 | ((plane->colorkey & 0x0000f8) >> 3);
+		rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+		break;
+
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_XRGB1555:
+		colorkey = ((plane->colorkey & 0xf80000) >> 9)
+			 | ((plane->colorkey & 0x00f800) >> 6)
+			 | ((plane->colorkey & 0x0000f8) >> 3);
+		rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+		break;
+
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		rcar_du_plane_write(rcdu, index, PnTC3R,
+				    PnTC3R_CODE | (plane->colorkey & 0xffffff));
+		break;
+	}
+}
+
+static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
+				  unsigned int index)
+{
+	struct rcar_du_device *rcdu = plane->dev;
+	u32 ddcr2 = PnDDCR2_CODE;
+	u32 ddcr4;
+	u32 mwr;
+
+	/* Data format
+	 *
+	 * The data format is selected by the DDDF field in PnMR and the EDF
+	 * field in DDCR4.
+	 */
+	ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4);
+	ddcr4 &= ~PnDDCR4_EDF_MASK;
+	ddcr4 |= plane->format->edf | PnDDCR4_CODE;
+
+	rcar_du_plane_setup_mode(plane, index);
+
+	if (plane->format->planes == 2) {
+		if (plane->hwindex != index) {
+			if (plane->format->fourcc == DRM_FORMAT_NV12 ||
+			    plane->format->fourcc == DRM_FORMAT_NV21)
+				ddcr2 |= PnDDCR2_Y420;
+
+			if (plane->format->fourcc == DRM_FORMAT_NV21)
+				ddcr2 |= PnDDCR2_NV21;
+
+			ddcr2 |= PnDDCR2_DIVU;
+		} else {
+			ddcr2 |= PnDDCR2_DIVY;
+		}
+	}
+
+	rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2);
+	rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4);
+
+	/* Memory pitch (expressed in pixels) */
+	if (plane->format->planes == 2)
+		mwr = plane->pitch;
+	else
+		mwr = plane->pitch * 8 / plane->format->bpp;
+
+	rcar_du_plane_write(rcdu, index, PnMWR, mwr);
+
+	/* Destination position and size */
+	rcar_du_plane_write(rcdu, index, PnDSXR, plane->width);
+	rcar_du_plane_write(rcdu, index, PnDSYR, plane->height);
+	rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x);
+	rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y);
+
+	/* Wrap-around and blinking, disabled */
+	rcar_du_plane_write(rcdu, index, PnWASPR, 0);
+	rcar_du_plane_write(rcdu, index, PnWAMWR, 4095);
+	rcar_du_plane_write(rcdu, index, PnBTR, 0);
+	rcar_du_plane_write(rcdu, index, PnMLR, 0);
+}
+
+void rcar_du_plane_setup(struct rcar_du_plane *plane)
+{
+	__rcar_du_plane_setup(plane, plane->hwindex);
+	if (plane->format->planes == 2)
+		__rcar_du_plane_setup(plane, (plane->hwindex + 1) % 8);
+
+	rcar_du_plane_update_base(plane);
+}
+
+static int
+rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+		       struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		       unsigned int crtc_w, unsigned int crtc_h,
+		       uint32_t src_x, uint32_t src_y,
+		       uint32_t src_w, uint32_t src_h)
+{
+	struct rcar_du_plane *rplane = to_rcar_plane(plane);
+	struct rcar_du_device *rcdu = plane->dev->dev_private;
+	const struct rcar_du_format_info *format;
+	unsigned int nplanes;
+	int ret;
+
+	format = rcar_du_format_info(fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__,
+			fb->pixel_format);
+		return -EINVAL;
+	}
+
+	if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+		dev_dbg(rcdu->dev, "%s: scaling not supported\n", __func__);
+		return -EINVAL;
+	}
+
+	nplanes = rplane->format ? rplane->format->planes : 0;
+
+	/* Reallocate hardware planes if the number of required planes has
+	 * changed.
+	 */
+	if (format->planes != nplanes) {
+		rcar_du_plane_release(rplane);
+		ret = rcar_du_plane_reserve(rplane, format);
+		if (ret < 0)
+			return ret;
+	}
+
+	rplane->crtc = crtc;
+	rplane->format = format;
+	rplane->pitch = fb->pitches[0];
+
+	rplane->src_x = src_x >> 16;
+	rplane->src_y = src_y >> 16;
+	rplane->dst_x = crtc_x;
+	rplane->dst_y = crtc_y;
+	rplane->width = crtc_w;
+	rplane->height = crtc_h;
+
+	rcar_du_plane_compute_base(rplane, fb);
+	rcar_du_plane_setup(rplane);
+
+	mutex_lock(&rcdu->planes.lock);
+	rplane->enabled = true;
+	rcar_du_crtc_update_planes(rplane->crtc);
+	mutex_unlock(&rcdu->planes.lock);
+
+	return 0;
+}
+
+static int rcar_du_plane_disable(struct drm_plane *plane)
+{
+	struct rcar_du_device *rcdu = plane->dev->dev_private;
+	struct rcar_du_plane *rplane = to_rcar_plane(plane);
+
+	if (!rplane->enabled)
+		return 0;
+
+	mutex_lock(&rcdu->planes.lock);
+	rplane->enabled = false;
+	rcar_du_crtc_update_planes(rplane->crtc);
+	mutex_unlock(&rcdu->planes.lock);
+
+	rcar_du_plane_release(rplane);
+
+	rplane->crtc = NULL;
+	rplane->format = NULL;
+
+	return 0;
+}
+
+/* Both the .set_property and the .update_plane operations are called with the
+ * mode_config lock held. There is this no need to explicitly protect access to
+ * the alpha and colorkey fields and the mode register.
+ */
+static void rcar_du_plane_set_alpha(struct rcar_du_plane *plane, u32 alpha)
+{
+	if (plane->alpha == alpha)
+		return;
+
+	plane->alpha = alpha;
+	if (!plane->enabled || plane->format->fourcc != DRM_FORMAT_XRGB1555)
+		return;
+
+	rcar_du_plane_setup_mode(plane, plane->hwindex);
+}
+
+static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
+				       u32 colorkey)
+{
+	if (plane->colorkey == colorkey)
+		return;
+
+	plane->colorkey = colorkey;
+	if (!plane->enabled)
+		return;
+
+	rcar_du_plane_setup_mode(plane, plane->hwindex);
+}
+
+static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
+				   unsigned int zpos)
+{
+	struct rcar_du_device *rcdu = plane->dev;
+
+	mutex_lock(&rcdu->planes.lock);
+	if (plane->zpos == zpos)
+		goto done;
+
+	plane->zpos = zpos;
+	if (!plane->enabled)
+		goto done;
+
+	rcar_du_crtc_update_planes(plane->crtc);
+
+done:
+	mutex_unlock(&rcdu->planes.lock);
+}
+
+static int rcar_du_plane_set_property(struct drm_plane *plane,
+				      struct drm_property *property,
+				      uint64_t value)
+{
+	struct rcar_du_device *rcdu = plane->dev->dev_private;
+	struct rcar_du_plane *rplane = to_rcar_plane(plane);
+
+	if (property == rcdu->planes.alpha)
+		rcar_du_plane_set_alpha(rplane, value);
+	else if (property == rcdu->planes.colorkey)
+		rcar_du_plane_set_colorkey(rplane, value);
+	else if (property == rcdu->planes.zpos)
+		rcar_du_plane_set_zpos(rplane, value);
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct drm_plane_funcs rcar_du_plane_funcs = {
+	.update_plane = rcar_du_plane_update,
+	.disable_plane = rcar_du_plane_disable,
+	.set_property = rcar_du_plane_set_property,
+	.destroy = drm_plane_cleanup,
+};
+
+static const uint32_t formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_NV16,
+};
+
+int rcar_du_plane_init(struct rcar_du_device *rcdu)
+{
+	unsigned int i;
+
+	mutex_init(&rcdu->planes.lock);
+	rcdu->planes.free = 0xff;
+
+	rcdu->planes.alpha =
+		drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
+	if (rcdu->planes.alpha == NULL)
+		return -ENOMEM;
+
+	/* The color key is expressed as an RGB888 triplet stored in a 32-bit
+	 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
+	 * or enable source color keying (1).
+	 */
+	rcdu->planes.colorkey =
+		drm_property_create_range(rcdu->ddev, 0, "colorkey",
+					  0, 0x01ffffff);
+	if (rcdu->planes.colorkey == NULL)
+		return -ENOMEM;
+
+	rcdu->planes.zpos =
+		drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
+	if (rcdu->planes.zpos == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
+		struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+
+		plane->dev = rcdu;
+		plane->hwindex = -1;
+		plane->alpha = 255;
+		plane->colorkey = RCAR_DU_COLORKEY_NONE;
+		plane->zpos = 0;
+	}
+
+	return 0;
+}
+
+int rcar_du_plane_register(struct rcar_du_device *rcdu)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
+		struct rcar_du_kms_plane *plane;
+
+		plane = devm_kzalloc(rcdu->dev, sizeof(*plane), GFP_KERNEL);
+		if (plane == NULL)
+			return -ENOMEM;
+
+		plane->hwplane = &rcdu->planes.planes[i + 2];
+		plane->hwplane->zpos = 1;
+
+		ret = drm_plane_init(rcdu->ddev, &plane->plane,
+				     (1 << rcdu->num_crtcs) - 1,
+				     &rcar_du_plane_funcs, formats,
+				     ARRAY_SIZE(formats), false);
+		if (ret < 0)
+			return ret;
+
+		drm_object_attach_property(&plane->plane.base,
+					   rcdu->planes.alpha, 255);
+		drm_object_attach_property(&plane->plane.base,
+					   rcdu->planes.colorkey,
+					   RCAR_DU_COLORKEY_NONE);
+		drm_object_attach_property(&plane->plane.base,
+					   rcdu->planes.zpos, 1);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
new file mode 100644
index 0000000..5397dba
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -0,0 +1,67 @@
+/*
+ * rcar_du_plane.h  --  R-Car Display Unit Planes
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_PLANE_H__
+#define __RCAR_DU_PLANE_H__
+
+struct drm_crtc;
+struct drm_framebuffer;
+struct rcar_du_device;
+struct rcar_du_format_info;
+
+/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
+ * using KMS planes requires at least one of the CRTCs being enabled, no more
+ * than 7 KMS planes can be available. We thus create 7 KMS planes and
+ * 9 software planes (one for each KMS planes and one for each CRTC).
+ */
+
+#define RCAR_DU_NUM_KMS_PLANES		7
+#define RCAR_DU_NUM_HW_PLANES		8
+#define RCAR_DU_NUM_SW_PLANES		9
+
+struct rcar_du_plane {
+	struct rcar_du_device *dev;
+	struct drm_crtc *crtc;
+
+	bool enabled;
+
+	int hwindex;		/* 0-based, -1 means unused */
+	unsigned int alpha;
+	unsigned int colorkey;
+	unsigned int zpos;
+
+	const struct rcar_du_format_info *format;
+
+	unsigned long dma[2];
+	unsigned int pitch;
+
+	unsigned int width;
+	unsigned int height;
+
+	unsigned int src_x;
+	unsigned int src_y;
+	unsigned int dst_x;
+	unsigned int dst_y;
+};
+
+int rcar_du_plane_init(struct rcar_du_device *rcdu);
+int rcar_du_plane_register(struct rcar_du_device *rcdu);
+void rcar_du_plane_setup(struct rcar_du_plane *plane);
+void rcar_du_plane_update_base(struct rcar_du_plane *plane);
+void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
+				struct drm_framebuffer *fb);
+int rcar_du_plane_reserve(struct rcar_du_plane *plane,
+			  const struct rcar_du_format_info *format);
+void rcar_du_plane_release(struct rcar_du_plane *plane);
+
+#endif /* __RCAR_DU_PLANE_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
new file mode 100644
index 0000000..69f21f1
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -0,0 +1,445 @@
+/*
+ * rcar_du_regs.h  --  R-Car Display Unit Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RCAR_DU_REGS_H__
+#define __RCAR_DU_REGS_H__
+
+#define DISP2_REG_OFFSET	 0x30000
+
+/* -----------------------------------------------------------------------------
+ * Display Control Registers
+ */
+
+#define DSYSR			0x00000	/* display 1 */
+#define D2SYSR			0x30000	/* display 2 */
+#define DSYSR_ILTS		(1 << 29)
+#define DSYSR_DSEC		(1 << 20)
+#define DSYSR_IUPD		(1 << 16)
+#define DSYSR_DRES		(1 << 9)
+#define DSYSR_DEN		(1 << 8)
+#define DSYSR_TVM_MASTER	(0 << 6)
+#define DSYSR_TVM_SWITCH	(1 << 6)
+#define DSYSR_TVM_TVSYNC	(2 << 6)
+#define DSYSR_TVM_MASK		(3 << 6)
+#define DSYSR_SCM_INT_NONE	(0 << 4)
+#define DSYSR_SCM_INT_SYNC	(2 << 4)
+#define DSYSR_SCM_INT_VIDEO	(3 << 4)
+
+#define DSMR			0x00004
+#define D2SMR			0x30004
+#define DSMR_VSPM		(1 << 28)
+#define DSMR_ODPM		(1 << 27)
+#define DSMR_DIPM_DISP		(0 << 25)
+#define DSMR_DIPM_CSYNC		(1 << 25)
+#define DSMR_DIPM_DE		(3 << 25)
+#define DSMR_DIPM_MASK		(3 << 25)
+#define DSMR_CSPM		(1 << 24)
+#define DSMR_DIL		(1 << 19)
+#define DSMR_VSL		(1 << 18)
+#define DSMR_HSL		(1 << 17)
+#define DSMR_DDIS		(1 << 16)
+#define DSMR_CDEL		(1 << 15)
+#define DSMR_CDEM_CDE		(0 << 13)
+#define DSMR_CDEM_LOW		(2 << 13)
+#define DSMR_CDEM_HIGH		(3 << 13)
+#define DSMR_CDEM_MASK		(3 << 13)
+#define DSMR_CDED		(1 << 12)
+#define DSMR_ODEV		(1 << 8)
+#define DSMR_CSY_VH_OR		(0 << 6)
+#define DSMR_CSY_333		(2 << 6)
+#define DSMR_CSY_222		(3 << 6)
+#define DSMR_CSY_MASK		(3 << 6)
+
+#define DSSR			0x00008
+#define D2SSR			0x30008
+#define DSSR_VC1FB_DSA0		(0 << 30)
+#define DSSR_VC1FB_DSA1		(1 << 30)
+#define DSSR_VC1FB_DSA2		(2 << 30)
+#define DSSR_VC1FB_INIT		(3 << 30)
+#define DSSR_VC1FB_MASK		(3 << 30)
+#define DSSR_VC0FB_DSA0		(0 << 28)
+#define DSSR_VC0FB_DSA1		(1 << 28)
+#define DSSR_VC0FB_DSA2		(2 << 28)
+#define DSSR_VC0FB_INIT		(3 << 28)
+#define DSSR_VC0FB_MASK		(3 << 28)
+#define DSSR_DFB(n)		(1 << ((n)+15))
+#define DSSR_TVR		(1 << 15)
+#define DSSR_FRM		(1 << 14)
+#define DSSR_VBK		(1 << 11)
+#define DSSR_RINT		(1 << 9)
+#define DSSR_HBK		(1 << 8)
+#define DSSR_ADC(n)		(1 << ((n)-1))
+
+#define DSRCR			0x0000c
+#define D2SRCR			0x3000c
+#define DSRCR_TVCL		(1 << 15)
+#define DSRCR_FRCL		(1 << 14)
+#define DSRCR_VBCL		(1 << 11)
+#define DSRCR_RICL		(1 << 9)
+#define DSRCR_HBCL		(1 << 8)
+#define DSRCR_ADCL(n)		(1 << ((n)-1))
+#define DSRCR_MASK		0x0000cbff
+
+#define DIER			0x00010
+#define D2IER			0x30010
+#define DIER_TVE		(1 << 15)
+#define DIER_FRE		(1 << 14)
+#define DIER_VBE		(1 << 11)
+#define DIER_RIE		(1 << 9)
+#define DIER_HBE		(1 << 8)
+#define DIER_ADCE(n)		(1 << ((n)-1))
+
+#define CPCR			0x00014
+#define CPCR_CP4CE		(1 << 19)
+#define CPCR_CP3CE		(1 << 18)
+#define CPCR_CP2CE		(1 << 17)
+#define CPCR_CP1CE		(1 << 16)
+
+#define DPPR			0x00018
+#define DPPR_DPE(n)		(1 << ((n)*4-1))
+#define DPPR_DPS(n, p)		(((p)-1) << DPPR_DPS_SHIFT(n))
+#define DPPR_DPS_SHIFT(n)	(((n)-1)*4)
+#define DPPR_BPP16		(DPPR_DPE(8) | DPPR_DPS(8, 1))	/* plane1 */
+#define DPPR_BPP32_P1		(DPPR_DPE(7) | DPPR_DPS(7, 1))
+#define DPPR_BPP32_P2		(DPPR_DPE(8) | DPPR_DPS(8, 2))
+#define DPPR_BPP32		(DPPR_BPP32_P1 | DPPR_BPP32_P2)	/* plane1 & 2 */
+
+#define DEFR			0x00020
+#define D2EFR			0x30020
+#define DEFR_CODE		(0x7773 << 16)
+#define DEFR_EXSL		(1 << 12)
+#define DEFR_EXVL		(1 << 11)
+#define DEFR_EXUP		(1 << 5)
+#define DEFR_VCUP		(1 << 4)
+#define DEFR_DEFE		(1 << 0)
+
+#define DAPCR			0x00024
+#define DAPCR_CODE		(0x7773 << 16)
+#define DAPCR_AP2E		(1 << 4)
+#define DAPCR_AP1E		(1 << 0)
+
+#define DCPCR			0x00028
+#define DCPCR_CODE		(0x7773 << 16)
+#define DCPCR_CA2B		(1 << 13)
+#define DCPCR_CD2F		(1 << 12)
+#define DCPCR_DC2E		(1 << 8)
+#define DCPCR_CAB		(1 << 5)
+#define DCPCR_CDF		(1 << 4)
+#define DCPCR_DCE		(1 << 0)
+
+#define DEFR2			0x00034
+#define D2EFR2			0x30034
+#define DEFR2_CODE		(0x7775 << 16)
+#define DEFR2_DEFE2G		(1 << 0)
+
+#define DEFR3			0x00038
+#define D2EFR3			0x30038
+#define DEFR3_CODE		(0x7776 << 16)
+#define DEFR3_EVDA		(1 << 14)
+#define DEFR3_EVDM_1		(1 << 12)
+#define DEFR3_EVDM_2		(2 << 12)
+#define DEFR3_EVDM_3		(3 << 12)
+#define DEFR3_VMSM2_EMA		(1 << 6)
+#define DEFR3_VMSM1_ENA		(1 << 4)
+#define DEFR3_DEFE3		(1 << 0)
+
+#define DEFR4			0x0003c
+#define D2EFR4			0x3003c
+#define DEFR4_CODE		(0x7777 << 16)
+#define DEFR4_LRUO		(1 << 5)
+#define DEFR4_SPCE		(1 << 4)
+
+#define DVCSR			0x000d0
+#define DVCSR_VCnFB2_DSA0(n)	(0 << ((n)*2+16))
+#define DVCSR_VCnFB2_DSA1(n)	(1 << ((n)*2+16))
+#define DVCSR_VCnFB2_DSA2(n)	(2 << ((n)*2+16))
+#define DVCSR_VCnFB2_INIT(n)	(3 << ((n)*2+16))
+#define DVCSR_VCnFB2_MASK(n)	(3 << ((n)*2+16))
+#define DVCSR_VCnFB_DSA0(n)	(0 << ((n)*2))
+#define DVCSR_VCnFB_DSA1(n)	(1 << ((n)*2))
+#define DVCSR_VCnFB_DSA2(n)	(2 << ((n)*2))
+#define DVCSR_VCnFB_INIT(n)	(3 << ((n)*2))
+#define DVCSR_VCnFB_MASK(n)	(3 << ((n)*2))
+
+#define DEFR5			0x000e0
+#define DEFR5_CODE		(0x66 << 24)
+#define DEFR5_YCRGB2_DIS	(0 << 14)
+#define DEFR5_YCRGB2_PRI1	(1 << 14)
+#define DEFR5_YCRGB2_PRI2	(2 << 14)
+#define DEFR5_YCRGB2_PRI3	(3 << 14)
+#define DEFR5_YCRGB2_MASK	(3 << 14)
+#define DEFR5_YCRGB1_DIS	(0 << 12)
+#define DEFR5_YCRGB1_PRI1	(1 << 12)
+#define DEFR5_YCRGB1_PRI2	(2 << 12)
+#define DEFR5_YCRGB1_PRI3	(3 << 12)
+#define DEFR5_YCRGB1_MASK	(3 << 12)
+#define DEFR5_DEFE5		(1 << 0)
+
+#define DDLTR			0x000e4
+#define DDLTR_CODE		(0x7766 << 16)
+#define DDLTR_DLAR2		(1 << 6)
+#define DDLTR_DLAY2		(1 << 5)
+#define DDLTR_DLAY1		(1 << 1)
+
+#define DEFR6			0x000e8
+#define DEFR6_CODE		(0x7778 << 16)
+#define DEFR6_ODPM22_D2SMR	(0 << 10)
+#define DEFR6_ODPM22_DISP	(2 << 10)
+#define DEFR6_ODPM22_CDE	(3 << 10)
+#define DEFR6_ODPM22_MASK	(3 << 10)
+#define DEFR6_ODPM12_DSMR	(0 << 8)
+#define DEFR6_ODPM12_DISP	(2 << 8)
+#define DEFR6_ODPM12_CDE	(3 << 8)
+#define DEFR6_ODPM12_MASK	(3 << 8)
+#define DEFR6_TCNE2		(1 << 6)
+#define DEFR6_MLOS1		(1 << 2)
+#define DEFR6_DEFAULT		(DEFR6_CODE | DEFR6_TCNE2)
+
+/* -----------------------------------------------------------------------------
+ * Display Timing Generation Registers
+ */
+
+#define HDSR			0x00040
+#define HDER			0x00044
+#define VDSR			0x00048
+#define VDER			0x0004c
+#define HCR			0x00050
+#define HSWR			0x00054
+#define VCR			0x00058
+#define VSPR			0x0005c
+#define EQWR			0x00060
+#define SPWR			0x00064
+#define CLAMPSR			0x00070
+#define CLAMPWR			0x00074
+#define DESR			0x00078
+#define DEWR			0x0007c
+
+/* -----------------------------------------------------------------------------
+ * Display Attribute Registers
+ */
+
+#define CP1TR			0x00080
+#define CP2TR			0x00084
+#define CP3TR			0x00088
+#define CP4TR			0x0008c
+
+#define DOOR			0x00090
+#define DOOR_RGB(r, g, b)	(((r) << 18) | ((g) << 10) | ((b) << 2))
+#define CDER			0x00094
+#define CDER_RGB(r, g, b)	(((r) << 18) | ((g) << 10) | ((b) << 2))
+#define BPOR			0x00098
+#define BPOR_RGB(r, g, b)	(((r) << 18) | ((g) << 10) | ((b) << 2))
+
+#define RINTOFSR		0x0009c
+
+#define DSHPR			0x000c8
+#define DSHPR_CODE		(0x7776 << 16)
+#define DSHPR_PRIH		(0xa << 4)
+#define DSHPR_PRIL_BPP16	(0x8 << 0)
+#define DSHPR_PRIL_BPP32	(0x9 << 0)
+
+/* -----------------------------------------------------------------------------
+ * Display Plane Registers
+ */
+
+#define PLANE_OFF		0x00100
+
+#define PnMR			0x00100 /* plane 1 */
+#define PnMR_VISL_VIN0		(0 << 26)	/* use Video Input 0 */
+#define PnMR_VISL_VIN1		(1 << 26)	/* use Video Input 1 */
+#define PnMR_VISL_VIN2		(2 << 26)	/* use Video Input 2 */
+#define PnMR_VISL_VIN3		(3 << 26)	/* use Video Input 3 */
+#define PnMR_YCDF_YUYV		(1 << 20)	/* YUYV format */
+#define PnMR_TC_R		(0 << 17)	/* Tranparent color is PnTC1R */
+#define PnMR_TC_CP		(1 << 17)	/* Tranparent color is color palette */
+#define PnMR_WAE		(1 << 16)	/* Wrap around Enable */
+#define PnMR_SPIM_TP		(0 << 12)	/* Transparent Color */
+#define PnMR_SPIM_ALP		(1 << 12)	/* Alpha Blending */
+#define PnMR_SPIM_EOR		(2 << 12)	/* EOR */
+#define PnMR_SPIM_TP_OFF	(1 << 14)	/* No Transparent Color */
+#define PnMR_CPSL_CP1		(0 << 8)	/* Color Palette selected 1 */
+#define PnMR_CPSL_CP2		(1 << 8)	/* Color Palette selected 2 */
+#define PnMR_CPSL_CP3		(2 << 8)	/* Color Palette selected 3 */
+#define PnMR_CPSL_CP4		(3 << 8)	/* Color Palette selected 4 */
+#define PnMR_DC			(1 << 7)	/* Display Area Change */
+#define PnMR_BM_MD		(0 << 4)	/* Manual Display Change Mode */
+#define PnMR_BM_AR		(1 << 4)	/* Auto Rendering Mode */
+#define PnMR_BM_AD		(2 << 4)	/* Auto Display Change Mode */
+#define PnMR_BM_VC		(3 << 4)	/* Video Capture Mode */
+#define PnMR_DDDF_8BPP		(0 << 0)	/* 8bit */
+#define PnMR_DDDF_16BPP		(1 << 0)	/* 16bit or 32bit */
+#define PnMR_DDDF_ARGB		(2 << 0)	/* ARGB */
+#define PnMR_DDDF_YC		(3 << 0)	/* YC */
+#define PnMR_DDDF_MASK		(3 << 0)
+
+#define PnMWR			0x00104
+
+#define PnALPHAR		0x00108
+#define PnALPHAR_ABIT_1		(0 << 12)
+#define PnALPHAR_ABIT_0		(1 << 12)
+#define PnALPHAR_ABIT_X		(2 << 12)
+
+#define PnDSXR			0x00110
+#define PnDSYR			0x00114
+#define PnDPXR			0x00118
+#define PnDPYR			0x0011c
+
+#define PnDSA0R			0x00120
+#define PnDSA1R			0x00124
+#define PnDSA2R			0x00128
+#define PnDSA_MASK		0xfffffff0
+
+#define PnSPXR			0x00130
+#define PnSPYR			0x00134
+#define PnWASPR			0x00138
+#define PnWAMWR			0x0013c
+
+#define PnBTR			0x00140
+
+#define PnTC1R			0x00144
+#define PnTC2R			0x00148
+#define PnTC3R			0x0014c
+#define PnTC3R_CODE		(0x66 << 24)
+
+#define PnMLR			0x00150
+
+#define PnSWAPR			0x00180
+#define PnSWAPR_DIGN		(1 << 4)
+#define PnSWAPR_SPQW		(1 << 3)
+#define PnSWAPR_SPLW		(1 << 2)
+#define PnSWAPR_SPWD		(1 << 1)
+#define PnSWAPR_SPBY		(1 << 0)
+
+#define PnDDCR			0x00184
+#define PnDDCR_CODE		(0x7775 << 16)
+#define PnDDCR_LRGB1		(1 << 11)
+#define PnDDCR_LRGB0		(1 << 10)
+
+#define PnDDCR2			0x00188
+#define PnDDCR2_CODE		(0x7776 << 16)
+#define PnDDCR2_NV21		(1 << 5)
+#define PnDDCR2_Y420		(1 << 4)
+#define PnDDCR2_DIVU		(1 << 1)
+#define PnDDCR2_DIVY		(1 << 0)
+
+#define PnDDCR4			0x00190
+#define PnDDCR4_CODE		(0x7766 << 16)
+#define PnDDCR4_SDFS_RGB	(0 << 4)
+#define PnDDCR4_SDFS_YC		(5 << 4)
+#define PnDDCR4_SDFS_MASK	(7 << 4)
+#define PnDDCR4_EDF_NONE	(0 << 0)
+#define PnDDCR4_EDF_ARGB8888	(1 << 0)
+#define PnDDCR4_EDF_RGB888	(2 << 0)
+#define PnDDCR4_EDF_RGB666	(3 << 0)
+#define PnDDCR4_EDF_MASK	(7 << 0)
+
+#define APnMR			0x0a100
+#define APnMR_WAE		(1 << 16)	/* Wrap around Enable */
+#define APnMR_DC		(1 << 7)	/* Display Area Change */
+#define APnMR_BM_MD		(0 << 4)	/* Manual Display Change Mode */
+#define APnMR_BM_AD		(2 << 4)	/* Auto Display Change Mode */
+
+#define APnMWR			0x0a104
+#define APnDSA0R		0x0a120
+#define APnDSA1R		0x0a124
+#define APnDSA2R		0x0a128
+#define APnMLR			0x0a150
+
+/* -----------------------------------------------------------------------------
+ * Display Capture Registers
+ */
+
+#define DCMWR			0x0c104
+#define DC2MWR			0x0c204
+#define DCSAR			0x0c120
+#define DC2SAR			0x0c220
+#define DCMLR			0x0c150
+#define DC2MLR			0x0c250
+
+/* -----------------------------------------------------------------------------
+ * Color Palette Registers
+ */
+
+#define CP1_000R		0x01000
+#define CP1_255R		0x013fc
+#define CP2_000R		0x02000
+#define CP2_255R		0x023fc
+#define CP3_000R		0x03000
+#define CP3_255R		0x033fc
+#define CP4_000R		0x04000
+#define CP4_255R		0x043fc
+
+/* -----------------------------------------------------------------------------
+ * External Synchronization Control Registers
+ */
+
+#define ESCR			0x10000
+#define ESCR2			0x31000
+#define ESCR_DCLKOINV		(1 << 25)
+#define ESCR_DCLKSEL_DCLKIN	(0 << 20)
+#define ESCR_DCLKSEL_CLKS	(1 << 20)
+#define ESCR_DCLKSEL_MASK	(1 << 20)
+#define ESCR_DCLKDIS		(1 << 16)
+#define ESCR_SYNCSEL_OFF	(0 << 8)
+#define ESCR_SYNCSEL_EXVSYNC	(2 << 8)
+#define ESCR_SYNCSEL_EXHSYNC	(3 << 8)
+#define ESCR_FRQSEL_MASK	(0x3f << 0)
+
+#define OTAR			0x10004
+#define OTAR2			0x31004
+
+/* -----------------------------------------------------------------------------
+ * Dual Display Output Control Registers
+ */
+
+#define DORCR			0x11000
+#define DORCR_PG2T		(1 << 30)
+#define DORCR_DK2S		(1 << 28)
+#define DORCR_PG2D_DS1		(0 << 24)
+#define DORCR_PG2D_DS2		(1 << 24)
+#define DORCR_PG2D_FIX0		(2 << 24)
+#define DORCR_PG2D_DOOR		(3 << 24)
+#define DORCR_PG2D_MASK		(3 << 24)
+#define DORCR_DR1D		(1 << 21)
+#define DORCR_PG1D_DS1		(0 << 16)
+#define DORCR_PG1D_DS2		(1 << 16)
+#define DORCR_PG1D_FIX0		(2 << 16)
+#define DORCR_PG1D_DOOR		(3 << 16)
+#define DORCR_PG1D_MASK		(3 << 16)
+#define DORCR_RGPV		(1 << 4)
+#define DORCR_DPRS		(1 << 0)
+
+#define DPTSR			0x11004
+#define DPTSR_PnDK(n)		(1 << ((n) + 16))
+#define DPTSR_PnTS(n)		(1 << (n))
+
+#define DAPTSR			0x11008
+#define DAPTSR_APnDK(n)		(1 << ((n) + 16))
+#define DAPTSR_APnTS(n)		(1 << (n))
+
+#define DS1PR			0x11020
+#define DS2PR			0x11024
+
+/* -----------------------------------------------------------------------------
+ * YC-RGB Conversion Coefficient Registers
+ */
+
+#define YNCR			0x11080
+#define YNOR			0x11084
+#define CRNOR			0x11088
+#define CBNOR			0x1108c
+#define RCRCR			0x11090
+#define GCRCR			0x11094
+#define GCBCR			0x11098
+#define BCBCR			0x1109c
+
+#endif /* __RCAR_DU_REGS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vga.c
new file mode 100644
index 0000000..327289e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vga.c
@@ -0,0 +1,149 @@
+/*
+ * rcar_du_vga.c  --  R-Car Display Unit VGA DAC and Connector
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_vga.h"
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
+{
+	return 0;
+}
+
+static int rcar_du_vga_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = rcar_du_vga_connector_get_modes,
+	.mode_valid = rcar_du_vga_connector_mode_valid,
+	.best_encoder = rcar_du_connector_best_encoder,
+};
+
+static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = rcar_du_vga_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = rcar_du_vga_connector_destroy,
+};
+
+static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+				      struct rcar_du_encoder *renc)
+{
+	struct rcar_du_connector *rcon;
+	struct drm_connector *connector;
+	int ret;
+
+	rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
+	if (rcon == NULL)
+		return -ENOMEM;
+
+	connector = &rcon->connector;
+	connector->display_info.width_mm = 0;
+	connector->display_info.height_mm = 0;
+
+	ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
+				 DRM_MODE_CONNECTOR_VGA);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_sysfs_connector_add(connector);
+	if (ret < 0)
+		return ret;
+
+	drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	drm_object_property_set_value(&connector->base,
+		rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+	ret = drm_mode_connector_attach_encoder(connector, &renc->encoder);
+	if (ret < 0)
+		return ret;
+
+	connector->encoder = &renc->encoder;
+	rcon->encoder = renc;
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
+					   const struct drm_display_mode *mode,
+					   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = rcar_du_vga_encoder_dpms,
+	.mode_fixup = rcar_du_vga_encoder_mode_fixup,
+	.prepare = rcar_du_encoder_mode_prepare,
+	.commit = rcar_du_encoder_mode_commit,
+	.mode_set = rcar_du_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+int rcar_du_vga_init(struct rcar_du_device *rcdu,
+		     const struct rcar_du_encoder_vga_data *data,
+		     unsigned int output)
+{
+	struct rcar_du_encoder *renc;
+	int ret;
+
+	renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
+	if (renc == NULL)
+		return -ENOMEM;
+
+	renc->output = output;
+
+	ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_DAC);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+
+	return rcar_du_vga_connector_init(rcdu, renc);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vga.h
new file mode 100644
index 0000000..66b4d2d
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vga.h
@@ -0,0 +1,24 @@
+/*
+ * rcar_du_vga.h  --  R-Car Display Unit VGA DAC and Connector
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_VGA_H__
+#define __RCAR_DU_VGA_H__
+
+struct rcar_du_device;
+struct rcar_du_encoder_vga_data;
+
+int rcar_du_vga_init(struct rcar_du_device *rcdu,
+		     const struct rcar_du_encoder_vga_data *data,
+		     unsigned int output);
+
+#endif /* __RCAR_DU_VGA_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b55c1d6..bd6b2cf 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -570,9 +570,6 @@
 	unsigned int fb_rsrc, aper_rsrc;
 	int ret = 0;
 
-	dev_priv->mtrr[0].handle = -1;
-	dev_priv->mtrr[1].handle = -1;
-	dev_priv->mtrr[2].handle = -1;
 	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
 		fb_rsrc = 0;
 		fb_base = pci_resource_start(dev->pdev, 0);
@@ -584,21 +581,14 @@
 		if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
 			/* Don't make MMIO write-cobining! We need 3
 			 * MTRRs. */
-			dev_priv->mtrr[0].base = fb_base;
-			dev_priv->mtrr[0].size = 0x01000000;
-			dev_priv->mtrr[0].handle =
-			    drm_mtrr_add(dev_priv->mtrr[0].base,
-				         dev_priv->mtrr[0].size, DRM_MTRR_WC);
-			dev_priv->mtrr[1].base = fb_base + 0x02000000;
-			dev_priv->mtrr[1].size = 0x02000000;
-			dev_priv->mtrr[1].handle =
-			    drm_mtrr_add(dev_priv->mtrr[1].base,
-					 dev_priv->mtrr[1].size, DRM_MTRR_WC);
-			dev_priv->mtrr[2].base = fb_base + 0x04000000;
-			dev_priv->mtrr[2].size = 0x04000000;
-			dev_priv->mtrr[2].handle =
-			    drm_mtrr_add(dev_priv->mtrr[2].base,
-					 dev_priv->mtrr[2].size, DRM_MTRR_WC);
+			dev_priv->mtrr_handles[0] =
+				arch_phys_wc_add(fb_base, 0x01000000);
+			dev_priv->mtrr_handles[1] =
+				arch_phys_wc_add(fb_base + 0x02000000,
+						 0x02000000);
+			dev_priv->mtrr_handles[2] =
+				arch_phys_wc_add(fb_base + 0x04000000,
+						0x04000000);
 		} else {
 			DRM_ERROR("strange pci_resource_len %08llx\n",
 				  (unsigned long long)
@@ -616,11 +606,9 @@
 		if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
 			/* Can use one MTRR to cover both fb and
 			 * aperture. */
-			dev_priv->mtrr[0].base = fb_base;
-			dev_priv->mtrr[0].size = 0x08000000;
-			dev_priv->mtrr[0].handle =
-			    drm_mtrr_add(dev_priv->mtrr[0].base,
-					 dev_priv->mtrr[0].size, DRM_MTRR_WC);
+			dev_priv->mtrr_handles[0] =
+				arch_phys_wc_add(fb_base,
+						 0x08000000);
 		} else {
 			DRM_ERROR("strange pci_resource_len %08llx\n",
 				  (unsigned long long)
@@ -660,11 +648,10 @@
 	drm_savage_private_t *dev_priv = dev->dev_private;
 	int i;
 
-	for (i = 0; i < 3; ++i)
-		if (dev_priv->mtrr[i].handle >= 0)
-			drm_mtrr_del(dev_priv->mtrr[i].handle,
-				 dev_priv->mtrr[i].base,
-				 dev_priv->mtrr[i].size, DRM_MTRR_WC);
+	for (i = 0; i < 3; ++i) {
+		arch_phys_wc_del(dev_priv->mtrr_handles[i]);
+		dev_priv->mtrr_handles[i] = 0;
+	}
 }
 
 int savage_driver_unload(struct drm_device *dev)
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index df2aac6..c05082a 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -160,10 +160,7 @@
 	drm_local_map_t *cmd_dma;
 	drm_local_map_t fake_dma;
 
-	struct {
-		int handle;
-		unsigned long base, size;
-	} mtrr[3];
+	int mtrr_handles[3];
 
 	/* BCI and status-related stuff */
 	volatile uint32_t *status_ptr, *bci_ptr;
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 7e7d52b..ca498d1 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,6 @@
 config DRM_SHMOBILE
 	tristate "DRM Support for SH Mobile"
-	depends on DRM && (SUPERH || ARCH_SHMOBILE)
+	depends on DRM && (ARM || SUPERH)
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index f6e0b53..edc1018 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -90,7 +90,7 @@
 		return -EINVAL;
 	}
 
-	clk = clk_get(sdev->dev, clkname);
+	clk = devm_clk_get(sdev->dev, clkname);
 	if (IS_ERR(clk)) {
 		dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
 		return PTR_ERR(clk);
@@ -106,21 +106,12 @@
 
 static int shmob_drm_unload(struct drm_device *dev)
 {
-	struct shmob_drm_device *sdev = dev->dev_private;
-
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
 	drm_vblank_cleanup(dev);
 	drm_irq_uninstall(dev);
 
-	if (sdev->clock)
-		clk_put(sdev->clock);
-
-	if (sdev->mmio)
-		iounmap(sdev->mmio);
-
 	dev->dev_private = NULL;
-	kfree(sdev);
 
 	return 0;
 }
@@ -139,7 +130,7 @@
 		return -EINVAL;
 	}
 
-	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+	sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
 	if (sdev == NULL) {
 		dev_err(dev->dev, "failed to allocate private data\n");
 		return -ENOMEM;
@@ -156,29 +147,28 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (res == NULL) {
 		dev_err(&pdev->dev, "failed to get memory resource\n");
-		ret = -EINVAL;
-		goto done;
+		return -EINVAL;
 	}
 
-	sdev->mmio = ioremap_nocache(res->start, resource_size(res));
+	sdev->mmio = devm_ioremap_nocache(&pdev->dev, res->start,
+					  resource_size(res));
 	if (sdev->mmio == NULL) {
 		dev_err(&pdev->dev, "failed to remap memory resource\n");
-		ret = -ENOMEM;
-		goto done;
+		return -ENOMEM;
 	}
 
 	ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
 	if (ret < 0)
-		goto done;
+		return ret;
 
 	ret = shmob_drm_init_interface(sdev);
 	if (ret < 0)
-		goto done;
+		return ret;
 
 	ret = shmob_drm_modeset_init(sdev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to initialize mode setting\n");
-		goto done;
+		return ret;
 	}
 
 	for (i = 0; i < 4; ++i) {
@@ -273,7 +263,8 @@
 };
 
 static struct drm_driver shmob_drm_driver = {
-	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+				| DRIVER_PRIME,
 	.load			= shmob_drm_load,
 	.unload			= shmob_drm_unload,
 	.preclose		= shmob_drm_preclose,
@@ -283,6 +274,10 @@
 	.disable_vblank		= shmob_drm_disable_vblank,
 	.gem_free_object	= drm_gem_cma_free_object,
 	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_cma_dmabuf_import,
+	.gem_prime_export	= drm_gem_cma_dmabuf_export,
 	.dumb_create		= drm_gem_cma_dumb_create,
 	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
 	.dumb_destroy		= drm_gem_cma_dumb_destroy,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index c291ee3..fc0ef0c 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -116,7 +116,7 @@
 	}
 
 	if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
-		dev_dbg(dev->dev, "valid pitch value %u\n",
+		dev_dbg(dev->dev, "invalid pitch value %u\n",
 			mode_cmd->pitches[0]);
 		return ERR_PTR(-EINVAL);
 	}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index e1eb899b0..060ae03 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -166,7 +166,7 @@
 {
 	struct shmob_drm_plane *splane = to_shmob_plane(plane);
 
-	if (plane->fb == NULL || !plane->enabled)
+	if (plane->fb == NULL)
 		return;
 
 	__shmob_drm_plane_setup(splane, plane->fb);
@@ -221,11 +221,8 @@
 
 static void shmob_drm_plane_destroy(struct drm_plane *plane)
 {
-	struct shmob_drm_plane *splane = to_shmob_plane(plane);
-
 	shmob_drm_plane_disable(plane);
 	drm_plane_cleanup(plane);
-	kfree(splane);
 }
 
 static const struct drm_plane_funcs shmob_drm_plane_funcs = {
@@ -251,7 +248,7 @@
 	struct shmob_drm_plane *splane;
 	int ret;
 
-	splane = kzalloc(sizeof(*splane), GFP_KERNEL);
+	splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL);
 	if (splane == NULL)
 		return -ENOMEM;
 
@@ -261,8 +258,6 @@
 	ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
 			     &shmob_drm_plane_funcs, formats,
 			     ARRAY_SIZE(formats), false);
-	if (ret < 0)
-		kfree(splane);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 5dd3c7d..4de3fb4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -384,10 +384,6 @@
 	return 0;
 }
 
-static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
 static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
 		.destroy        = tilcdc_crtc_destroy,
 		.set_config     = drm_crtc_helper_set_config,
@@ -401,7 +397,6 @@
 		.commit         = tilcdc_crtc_commit,
 		.mode_set       = tilcdc_crtc_mode_set,
 		.mode_set_base  = tilcdc_crtc_mode_set_base,
-		.load_lut       = tilcdc_crtc_load_lut,
 };
 
 int tilcdc_crtc_max_width(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2b5461b..f2a6528 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -157,7 +157,9 @@
 	struct platform_device *pdev = dev->platformdev;
 	struct device_node *node = pdev->dev.of_node;
 	struct tilcdc_drm_private *priv;
+	struct tilcdc_module *mod;
 	struct resource *res;
+	u32 bpp = 0;
 	int ret;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -256,7 +258,15 @@
 
 	platform_set_drvdata(pdev, dev);
 
-	priv->fbdev = drm_fbdev_cma_init(dev, 16,
+
+	list_for_each_entry(mod, &module_list, list) {
+		DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp);
+		bpp = mod->preferred_bpp;
+		if (bpp > 0)
+			break;
+	}
+
+	priv->fbdev = drm_fbdev_cma_init(dev, bpp,
 			dev->mode_config.num_crtc,
 			dev->mode_config.num_connector);
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 8242b5a..0906843 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -89,6 +89,7 @@
 	const char *name;
 	struct list_head list;
 	const struct tilcdc_module_ops *funcs;
+	unsigned int preferred_bpp;
 };
 
 void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 0917665..86c6732 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -393,6 +393,8 @@
 		goto fail;
 	}
 
+	mod->preferred_bpp = panel_mod->info->bpp;
+
 	panel_mod->backlight = of_find_backlight_by_node(node);
 	if (panel_mod->backlight)
 		dev_info(&pdev->dev, "found backlight\n");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index db1d2fc..8bf4fd1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -323,6 +323,8 @@
 		goto fail;
 	}
 
+	mod->preferred_bpp = slave_info.bpp;
+
 	i2c_node = of_find_node_by_phandle(i2c_phandle);
 	if (!i2c_node) {
 		dev_err(&pdev->dev, "could not get i2c bus node\n");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index a36788f..925c7cd 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -354,6 +354,8 @@
 		goto fail;
 	}
 
+	mod->preferred_bpp = dvi_info.bpp;
+
 	i2c_node = of_find_node_by_phandle(i2c_phandle);
 	if (!i2c_node) {
 		dev_err(&pdev->dev, "could not get i2c bus node\n");
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dc0c065..97e9d61 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -393,19 +393,6 @@
 	.fb_release = udl_fb_release,
 };
 
-static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-			   u16 blue, int regno)
-{
-}
-
-static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-			     u16 *blue, int regno)
-{
-	*red = 0;
-	*green = 0;
-	*blue = 0;
-}
-
 static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 				      struct drm_file *file,
 				      unsigned flags, unsigned color,
@@ -558,8 +545,6 @@
 }
 
 static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
-	.gamma_set = udl_crtc_fb_gamma_set,
-	.gamma_get = udl_crtc_fb_gamma_get,
 	.fb_probe = udlfb_create,
 };
 
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index e96d234..2ae1eb7 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,10 +363,6 @@
 	kfree(crtc);
 }
 
-static void udl_load_lut(struct drm_crtc *crtc)
-{
-}
-
 static void udl_crtc_prepare(struct drm_crtc *crtc)
 {
 }
@@ -383,7 +379,6 @@
 	.prepare = udl_crtc_prepare,
 	.commit = udl_crtc_commit,
 	.disable = udl_crtc_disable,
-	.load_lut = udl_load_lut,
 };
 
 static const struct drm_crtc_funcs udl_crtc_funcs = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 07dfd82..78e2164 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -565,8 +565,8 @@
 		dev_priv->has_gmr = false;
 	}
 
-	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
-					   dev_priv->mmio_size, DRM_MTRR_WC);
+	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
+					       dev_priv->mmio_size);
 
 	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
 					 dev_priv->mmio_size);
@@ -664,8 +664,7 @@
 out_err4:
 	iounmap(dev_priv->mmio_virt);
 out_err3:
-	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
-		     dev_priv->mmio_size, DRM_MTRR_WC);
+	arch_phys_wc_del(dev_priv->mmio_mtrr);
 	if (dev_priv->has_gmr)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
@@ -709,8 +708,7 @@
 
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
-	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
-		     dev_priv->mmio_size, DRM_MTRR_WC);
+	arch_phys_wc_del(dev_priv->mmio_mtrr);
 	if (dev_priv->has_gmr)
 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3e3c7ab..d4607b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -174,7 +174,6 @@
 			   uint32_t handle, uint32_t width, uint32_t height)
 {
 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 	struct vmw_surface *surface = NULL;
 	struct vmw_dma_buffer *dmabuf = NULL;
@@ -197,6 +196,8 @@
 	}
 
 	if (handle) {
+		struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
 		ret = vmw_user_lookup_handle(dev_priv, tfile,
 					     handle, &surface, &dmabuf);
 		if (ret) {
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index ff5c633..b2730b1 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -364,17 +364,12 @@
 	ipu_fb_enable(ipu_crtc);
 }
 
-static void ipu_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
 static struct drm_crtc_helper_funcs ipu_helper_funcs = {
 	.dpms = ipu_crtc_dpms,
 	.mode_fixup = ipu_crtc_mode_fixup,
 	.mode_set = ipu_crtc_mode_set,
 	.prepare = ipu_crtc_prepare,
 	.commit = ipu_crtc_commit,
-	.load_lut = ipu_crtc_load_lut,
 };
 
 static int ipu_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 5855d17..9d8feac 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -42,6 +42,7 @@
 #include <linux/kd.h>
 #include <linux/slab.h>
 #include <linux/vt_kern.h>
+#include <linux/sched.h>
 #include <linux/selection.h>
 #include <linux/spinlock.h>
 #include <linux/ioport.h>
@@ -1124,11 +1125,15 @@
 
 	if (arg) {
 		if (set)
-			for (i = 0; i < cmapsz; i++)
+			for (i = 0; i < cmapsz; i++) {
 				vga_writeb(arg[i], charmap + i);
+				cond_resched();
+			}
 		else
-			for (i = 0; i < cmapsz; i++)
+			for (i = 0; i < cmapsz; i++) {
 				arg[i] = vga_readb(charmap + i);
+				cond_resched();
+			}
 
 		/*
 		 * In 512-character mode, the character map is not contiguous if
@@ -1139,11 +1144,15 @@
 			charmap += 2 * cmapsz;
 			arg += cmapsz;
 			if (set)
-				for (i = 0; i < cmapsz; i++)
+				for (i = 0; i < cmapsz; i++) {
 					vga_writeb(arg[i], charmap + i);
+					cond_resched();
+				}
 			else
-				for (i = 0; i < cmapsz; i++)
+				for (i = 0; i < cmapsz; i++) {
 					arg[i] = vga_readb(charmap + i);
+					cond_resched();
+				}
 		}
 	}
 
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 56009bc..2894e03 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -23,7 +23,7 @@
  * Every display_timing can be specified with either just the typical value or
  * a range consisting of min/typ/max. This function helps handling this
  **/
-static int parse_timing_property(struct device_node *np, const char *name,
+static int parse_timing_property(const struct device_node *np, const char *name,
 			  struct timing_entry *result)
 {
 	struct property *prop;
@@ -56,7 +56,8 @@
  * of_get_display_timing - parse display_timing entry from device_node
  * @np: device_node with the properties
  **/
-static struct display_timing *of_get_display_timing(struct device_node *np)
+static struct display_timing *of_get_display_timing(const struct device_node
+						    *np)
 {
 	struct display_timing *dt;
 	u32 val = 0;
@@ -97,6 +98,8 @@
 		dt->flags |= DISPLAY_FLAGS_INTERLACED;
 	if (of_property_read_bool(np, "doublescan"))
 		dt->flags |= DISPLAY_FLAGS_DOUBLESCAN;
+	if (of_property_read_bool(np, "doubleclk"))
+		dt->flags |= DISPLAY_FLAGS_DOUBLECLK;
 
 	if (ret) {
 		pr_err("%s: error reading timing properties\n",
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index e328a61..296279b 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -24,9 +24,6 @@
 #ifdef CONFIG_X86
 #include <video/vga.h>
 #endif
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
 #include "edid.h"
 
 static struct cb_id uvesafb_cn_id = {
@@ -1540,67 +1537,30 @@
 
 static void uvesafb_init_mtrr(struct fb_info *info)
 {
-#ifdef CONFIG_MTRR
+	struct uvesafb_par *par = info->par;
+
 	if (mtrr && !(info->fix.smem_start & (PAGE_SIZE - 1))) {
 		int temp_size = info->fix.smem_len;
-		unsigned int type = 0;
 
-		switch (mtrr) {
-		case 1:
-			type = MTRR_TYPE_UNCACHABLE;
-			break;
-		case 2:
-			type = MTRR_TYPE_WRBACK;
-			break;
-		case 3:
-			type = MTRR_TYPE_WRCOMB;
-			break;
-		case 4:
-			type = MTRR_TYPE_WRTHROUGH;
-			break;
-		default:
-			type = 0;
-			break;
-		}
+		int rc;
 
-		if (type) {
-			int rc;
+		/* Find the largest power-of-two */
+		temp_size = roundup_pow_of_two(temp_size);
 
-			/* Find the largest power-of-two */
-			temp_size = roundup_pow_of_two(temp_size);
+		/* Try and find a power of two to add */
+		do {
+			rc = arch_phys_wc_add(info->fix.smem_start, temp_size);
+			temp_size >>= 1;
+		} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
 
-			/* Try and find a power of two to add */
-			do {
-				rc = mtrr_add(info->fix.smem_start,
-					      temp_size, type, 1);
-				temp_size >>= 1;
-			} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
-		}
+		if (rc >= 0)
+			par->mtrr_handle = rc;
 	}
-#endif /* CONFIG_MTRR */
 }
 
 static void uvesafb_ioremap(struct fb_info *info)
 {
-#ifdef CONFIG_X86
-	switch (mtrr) {
-	case 1: /* uncachable */
-		info->screen_base = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
-		break;
-	case 2: /* write-back */
-		info->screen_base = ioremap_cache(info->fix.smem_start, info->fix.smem_len);
-		break;
-	case 3: /* write-combining */
-		info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
-		break;
-	case 4: /* write-through */
-	default:
-		info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
-		break;
-	}
-#else
-	info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
-#endif /* CONFIG_X86 */
+	info->screen_base = ioremap_wc(info->fix.smem_start, info->fix.smem_len);
 }
 
 static ssize_t uvesafb_show_vbe_ver(struct device *dev,
@@ -1851,6 +1811,7 @@
 		unregister_framebuffer(info);
 		release_region(0x3c0, 32);
 		iounmap(info->screen_base);
+		arch_phys_wc_del(par->mtrr_handle);
 		release_mem_region(info->fix.smem_start, info->fix.smem_len);
 		fb_destroy_modedb(info->monspecs.modedb);
 		fb_dealloc_cmap(&info->cmap);
@@ -1930,6 +1891,9 @@
 		}
 	}
 
+	if (mtrr != 3 && mtrr != 1)
+		pr_warn("uvesafb: mtrr should be set to 0 or 3; %d is unsupported", mtrr);
+
 	return 0;
 }
 #endif /* !MODULE */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 63d17ee..82670ac 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -55,16 +55,13 @@
 #include <linux/mm.h>
 #include <linux/cdev.h>
 #include <linux/mutex.h>
+#include <linux/io.h>
 #include <linux/slab.h>
 #if defined(__alpha__) || defined(__powerpc__)
 #include <asm/pgtable.h>	/* For pte_wrprotect */
 #endif
-#include <asm/io.h>
 #include <asm/mman.h>
 #include <asm/uaccess.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
 #include <linux/types.h>
 #include <linux/agp_backend.h>
@@ -933,6 +930,7 @@
 				struct dma_buf *dma_buf);
 	/* low-level interface used by drm_gem_prime_{import,export} */
 	int (*gem_prime_pin)(struct drm_gem_object *obj);
+	void (*gem_prime_unpin)(struct drm_gem_object *obj);
 	struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
 	struct drm_gem_object *(*gem_prime_import_sg_table)(
 				struct drm_device *dev, size_t size,
@@ -1250,37 +1248,8 @@
 {
 	return drm_core_check_feature(dev, DRIVER_USE_MTRR);
 }
-
-#define DRM_MTRR_WC		MTRR_TYPE_WRCOMB
-
-static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
-			       unsigned int flags)
-{
-	return mtrr_add(offset, size, flags, 1);
-}
-
-static inline int drm_mtrr_del(int handle, unsigned long offset,
-			       unsigned long size, unsigned int flags)
-{
-	return mtrr_del(handle, offset, size);
-}
-
 #else
 #define drm_core_has_MTRR(dev) (0)
-
-#define DRM_MTRR_WC		0
-
-static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
-			       unsigned int flags)
-{
-	return 0;
-}
-
-static inline int drm_mtrr_del(int handle, unsigned long offset,
-			       unsigned long size, unsigned int flags)
-{
-	return 0;
-}
 #endif
 
 static inline void drm_device_set_unplugged(struct drm_device *dev)
@@ -1630,7 +1599,6 @@
 extern int drm_sysfs_device_add(struct drm_minor *minor);
 extern void drm_sysfs_hotplug_event(struct drm_device *dev);
 extern void drm_sysfs_device_remove(struct drm_minor *minor);
-extern char *drm_get_connector_status_name(enum drm_connector_status status);
 extern int drm_sysfs_connector_add(struct drm_connector *connector);
 extern void drm_sysfs_connector_remove(struct drm_connector *connector);
 
@@ -1648,6 +1616,8 @@
 void drm_gem_object_handle_free(struct drm_gem_object *obj);
 void drm_gem_vm_open(struct vm_area_struct *vma);
 void drm_gem_vm_close(struct vm_area_struct *vma);
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+		     struct vm_area_struct *vma);
 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
 #include <drm/drm_global.h>
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index adb3f9b..0fd007a 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -409,6 +409,10 @@
 	/* framebuffer the connector is currently bound to */
 	struct drm_framebuffer *fb;
 
+	/* Temporary tracking of the old fb while a modeset is ongoing. Used
+	 * by drm_mode_set_config_internal to implement correct refcounting. */
+	struct drm_framebuffer *old_fb;
+
 	bool enabled;
 
 	/* Requested mode from modesetting. */
@@ -654,11 +658,7 @@
  * @format_count: number of formats supported
  * @crtc: currently bound CRTC
  * @fb: currently bound fb
- * @gamma_size: size of gamma table
- * @gamma_store: gamma correction table
- * @enabled: enabled flag
  * @funcs: helper functions
- * @helper_private: storage for drver layer
  * @properties: property tracking for this plane
  */
 struct drm_plane {
@@ -674,14 +674,7 @@
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb;
 
-	/* CRTC gamma size for reporting to userspace */
-	uint32_t gamma_size;
-	uint16_t *gamma_store;
-
-	bool enabled;
-
 	const struct drm_plane_funcs *funcs;
-	void *helper_private;
 
 	struct drm_object_properties properties;
 };
@@ -815,6 +808,7 @@
 	/* output poll support */
 	bool poll_enabled;
 	bool poll_running;
+	bool delayed_event;
 	struct delayed_work output_poll_work;
 
 	/* pointers to standard properties */
@@ -894,15 +888,17 @@
 			  const uint32_t *formats, uint32_t format_count,
 			  bool priv);
 extern void drm_plane_cleanup(struct drm_plane *plane);
+extern void drm_plane_force_disable(struct drm_plane *plane);
 
 extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
-extern char *drm_get_connector_name(struct drm_connector *connector);
-extern char *drm_get_dpms_name(int val);
-extern char *drm_get_dvi_i_subconnector_name(int val);
-extern char *drm_get_dvi_i_select_name(int val);
-extern char *drm_get_tv_subconnector_name(int val);
-extern char *drm_get_tv_select_name(int val);
+extern const char *drm_get_connector_name(const struct drm_connector *connector);
+extern const char *drm_get_connector_status_name(enum drm_connector_status status);
+extern const char *drm_get_dpms_name(int val);
+extern const char *drm_get_dvi_i_subconnector_name(int val);
+extern const char *drm_get_dvi_i_select_name(int val);
+extern const char *drm_get_tv_subconnector_name(int val);
+extern const char *drm_get_tv_select_name(int val);
 extern void drm_fb_release(struct drm_file *file_priv);
 extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
@@ -994,7 +990,7 @@
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 extern int drm_mode_create_dithering_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
-extern char *drm_get_encoder_name(struct drm_encoder *encoder);
+extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
 
 extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
 					     struct drm_encoder *encoder);
@@ -1094,5 +1090,6 @@
 extern int drm_format_plane_cpp(uint32_t format, int plane);
 extern int drm_format_horz_chroma_subsampling(uint32_t format);
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern const char *drm_get_format_name(uint32_t format);
 
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 63397ce..6e17251 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -4,6 +4,9 @@
 struct drm_gem_cma_object {
 	struct drm_gem_object base;
 	dma_addr_t paddr;
+	struct sg_table *sgt;
+
+	/* For objects with DMA memory allocated by GEM CMA */
 	void *vaddr;
 };
 
@@ -45,4 +48,10 @@
 void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
 #endif
 
+struct dma_buf *drm_gem_cma_dmabuf_export(struct drm_device *drm_dev,
+					  struct drm_gem_object *obj,
+					  int flags);
+struct drm_gem_object *drm_gem_cma_dmabuf_import(struct drm_device *drm_dev,
+						 struct dma_buf *dma_buf);
+
 #endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 675ddf4..815fafc 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -65,22 +65,6 @@
 #define DRM_AGP_KERN            struct no_agp_kern
 #endif
 
-#if !(__OS_HAS_MTRR)
-static __inline__ int mtrr_add(unsigned long base, unsigned long size,
-			       unsigned int type, char increment)
-{
-	return -ENODEV;
-}
-
-static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
-{
-	return -ENODEV;
-}
-
-#define MTRR_TYPE_WRCOMB     1
-
-#endif
-
 /** Other copying of data to kernel space */
 #define DRM_COPY_FROM_USER(arg1, arg2, arg3)		\
 	copy_from_user(arg1, arg2, arg3)
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
new file mode 100644
index 0000000..d128629
--- /dev/null
+++ b/include/drm/drm_rect.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_RECT_H
+#define DRM_RECT_H
+
+/**
+ * DOC: rect utils
+ *
+ * Utility functions to help manage rectangular areas for
+ * clipping, scaling, etc. calculations.
+ */
+
+/**
+ * struct drm_rect - two dimensional rectangle
+ * @x1: horizontal starting coordinate (inclusive)
+ * @x2: horizontal ending coordinate (exclusive)
+ * @y1: vertical starting coordinate (inclusive)
+ * @y2: vertical ending coordinate (exclusive)
+ */
+struct drm_rect {
+	int x1, y1, x2, y2;
+};
+
+/**
+ * drm_rect_adjust_size - adjust the size of the rectangle
+ * @r: rectangle to be adjusted
+ * @dw: horizontal adjustment
+ * @dh: vertical adjustment
+ *
+ * Change the size of rectangle @r by @dw in the horizontal direction,
+ * and by @dh in the vertical direction, while keeping the center
+ * of @r stationary.
+ *
+ * Positive @dw and @dh increase the size, negative values decrease it.
+ */
+static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
+{
+	r->x1 -= dw >> 1;
+	r->y1 -= dh >> 1;
+	r->x2 += (dw + 1) >> 1;
+	r->y2 += (dh + 1) >> 1;
+}
+
+/**
+ * drm_rect_translate - translate the rectangle
+ * @r: rectangle to be tranlated
+ * @dx: horizontal translation
+ * @dy: vertical translation
+ *
+ * Move rectangle @r by @dx in the horizontal direction,
+ * and by @dy in the vertical direction.
+ */
+static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
+{
+	r->x1 += dx;
+	r->y1 += dy;
+	r->x2 += dx;
+	r->y2 += dy;
+}
+
+/**
+ * drm_rect_downscale - downscale a rectangle
+ * @r: rectangle to be downscaled
+ * @horz: horizontal downscale factor
+ * @vert: vertical downscale factor
+ *
+ * Divide the coordinates of rectangle @r by @horz and @vert.
+ */
+static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
+{
+	r->x1 /= horz;
+	r->y1 /= vert;
+	r->x2 /= horz;
+	r->y2 /= vert;
+}
+
+/**
+ * drm_rect_width - determine the rectangle width
+ * @r: rectangle whose width is returned
+ *
+ * RETURNS:
+ * The width of the rectangle.
+ */
+static inline int drm_rect_width(const struct drm_rect *r)
+{
+	return r->x2 - r->x1;
+}
+
+/**
+ * drm_rect_height - determine the rectangle height
+ * @r: rectangle whose height is returned
+ *
+ * RETURNS:
+ * The height of the rectangle.
+ */
+static inline int drm_rect_height(const struct drm_rect *r)
+{
+	return r->y2 - r->y1;
+}
+
+/**
+ * drm_rect_visible - determine if the the rectangle is visible
+ * @r: rectangle whose visibility is returned
+ *
+ * RETURNS:
+ * %true if the rectangle is visible, %false otherwise.
+ */
+static inline bool drm_rect_visible(const struct drm_rect *r)
+{
+	return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
+}
+
+/**
+ * drm_rect_equals - determine if two rectangles are equal
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles are equal, %false otherwise.
+ */
+static inline bool drm_rect_equals(const struct drm_rect *r1,
+				   const struct drm_rect *r2)
+{
+	return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
+		r1->y1 == r2->y1 && r1->y2 == r2->y2;
+}
+
+bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
+bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+			  const struct drm_rect *clip,
+			  int hscale, int vscale);
+int drm_rect_calc_hscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_hscale, int max_hscale);
+int drm_rect_calc_vscale(const struct drm_rect *src,
+			 const struct drm_rect *dst,
+			 int min_vscale, int max_vscale);
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_hscale, int max_hscale);
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+				 struct drm_rect *dst,
+				 int min_vscale, int max_vscale);
+void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
+
+#endif
diff --git a/include/linux/io.h b/include/linux/io.h
index 069e407..f4f42fa 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -76,4 +76,29 @@
 #define arch_has_dev_port()     (1)
 #endif
 
+/*
+ * Some systems (x86 without PAT) have a somewhat reliable way to mark a
+ * physical address range such that uncached mappings will actually
+ * end up write-combining.  This facility should be used in conjunction
+ * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
+ * no effect if the per-page mechanisms are functional.
+ * (On x86 without PAT, these functions manipulate MTRRs.)
+ *
+ * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
+ * to have no effect.
+ */
+#ifndef arch_phys_wc_add
+static inline int __must_check arch_phys_wc_add(unsigned long base,
+						unsigned long size)
+{
+	return 0;  /* It worked (i.e. did nothing). */
+}
+
+static inline void arch_phys_wc_del(int handle)
+{
+}
+
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
 #endif /* _LINUX_IO_H */
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
new file mode 100644
index 0000000..80587fd
--- /dev/null
+++ b/include/linux/platform_data/rcar-du.h
@@ -0,0 +1,54 @@
+/*
+ * rcar_du.h  --  R-Car Display Unit DRM driver
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_H__
+#define __RCAR_DU_H__
+
+#include <drm/drm_mode.h>
+
+enum rcar_du_encoder_type {
+	RCAR_DU_ENCODER_UNUSED = 0,
+	RCAR_DU_ENCODER_VGA,
+	RCAR_DU_ENCODER_LVDS,
+};
+
+struct rcar_du_panel_data {
+	unsigned int width_mm;		/* Panel width in mm */
+	unsigned int height_mm;		/* Panel height in mm */
+	struct drm_mode_modeinfo mode;
+};
+
+struct rcar_du_encoder_lvds_data {
+	struct rcar_du_panel_data panel;
+};
+
+struct rcar_du_encoder_vga_data {
+	/* TODO: Add DDC information for EDID retrieval */
+};
+
+struct rcar_du_encoder_data {
+	enum rcar_du_encoder_type encoder;
+	unsigned int output;
+
+	union {
+		struct rcar_du_encoder_lvds_data lvds;
+		struct rcar_du_encoder_vga_data vga;
+	} u;
+};
+
+struct rcar_du_platform_data {
+	struct rcar_du_encoder_data *encoders;
+	unsigned int num_encoders;
+};
+
+#endif /* __RCAR_DU_H__ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 07d5941..923ed7f 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -305,7 +305,7 @@
 #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
 #define I915_PARAM_HAS_SEMAPHORES	 20
 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
-#define I915_PARAM_RSVD_FOR_FUTURE_USE	 22
+#define I915_PARAM_HAS_VEBOX		 22
 #define I915_PARAM_HAS_SECURE_BATCHES	 23
 #define I915_PARAM_HAS_PINNED_BATCHES	 24
 #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
@@ -660,6 +660,7 @@
 #define I915_EXEC_RENDER                 (1<<0)
 #define I915_EXEC_BSD                    (2<<0)
 #define I915_EXEC_BLT                    (3<<0)
+#define I915_EXEC_VEBOX                  (4<<0)
 
 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
  * Gen6+ only supports relative addressing to dynamic state (default) and
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
index 5d0259b..28d9d0d 100644
--- a/include/video/display_timing.h
+++ b/include/video/display_timing.h
@@ -27,6 +27,7 @@
 	DISPLAY_FLAGS_PIXDATA_NEGEDGE	= BIT(7),
 	DISPLAY_FLAGS_INTERLACED	= BIT(8),
 	DISPLAY_FLAGS_DOUBLESCAN	= BIT(9),
+	DISPLAY_FLAGS_DOUBLECLK		= BIT(10),
 };
 
 /*
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 1a91850..30f5362 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -134,6 +134,7 @@
 
 	int mode_idx;
 	struct vbe_crtc_ib crtc;
+	int mtrr_handle;
 };
 
 #endif /* _UVESAFB_H */