Merge branch 'msm-next' of git://people.freedesktop.org/~robclark/linux into drm-next

prime support, inactive rework, render nodes
* 'msm-next' of git://people.freedesktop.org/~robclark/linux:
  drm/msm/mdp4: page_flip cleanups/fixes
  drm/msm: EBUSY status handling in msm_gem_fault()
  drm/msm: rework inactive-work
  drm/msm: add plane support
  drm/msm: resync generated headers
  drm/msm: support render nodes
  drm/msm: prime support
diff --git a/MAINTAINERS b/MAINTAINERS
index 3438384..63dbfc38 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2834,7 +2834,9 @@
 L:	linux-tegra@vger.kernel.org
 T:	git git://anongit.freedesktop.org/tegra/linux.git
 S:	Supported
+F:	drivers/gpu/drm/tegra/
 F:	drivers/gpu/host1x/
+F:	include/linux/host1x.h
 F:	include/uapi/drm/tegra_drm.h
 F:	Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 
@@ -8917,61 +8919,14 @@
 S:	Maintained
 F:	drivers/net/usb/rtl8150.c
 
-USB SERIAL BELKIN F5U103 DRIVER
-M:	William Greathouse <wgreathouse@smva.com>
+USB SERIAL SUBSYSTEM
+M:	Johan Hovold <jhovold@gmail.com>
 L:	linux-usb@vger.kernel.org
 S:	Maintained
-F:	drivers/usb/serial/belkin_sa.*
-
-USB SERIAL CYPRESS M8 DRIVER
-M:	Lonnie Mendez <dignome@gmail.com>
-L:	linux-usb@vger.kernel.org
-S:	Maintained
-W:	http://geocities.com/i0xox0i
-W:	http://firstlight.net/cvs
-F:	drivers/usb/serial/cypress_m8.*
-
-USB SERIAL CYBERJACK DRIVER
-M:	Matthias Bruestle and Harald Welte <support@reiner-sct.com>
-W:	http://www.reiner-sct.de/support/treiber_cyberjack.php
-S:	Maintained
-F:	drivers/usb/serial/cyberjack.c
-
-USB SERIAL DIGI ACCELEPORT DRIVER
-M:	Peter Berger <pberger@brimson.com>
-M:	Al Borchers <alborchers@steinerpoint.com>
-L:	linux-usb@vger.kernel.org
-S:	Maintained
-F:	drivers/usb/serial/digi_acceleport.c
-
-USB SERIAL DRIVER
-M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-L:	linux-usb@vger.kernel.org
-S:	Supported
 F:	Documentation/usb/usb-serial.txt
-F:	drivers/usb/serial/generic.c
-F:	drivers/usb/serial/usb-serial.c
+F:	drivers/usb/serial/
 F:	include/linux/usb/serial.h
 
-USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
-M:	Gary Brubaker <xavyer@ix.netcom.com>
-L:	linux-usb@vger.kernel.org
-S:	Maintained
-F:	drivers/usb/serial/empeg.c
-
-USB SERIAL KEYSPAN DRIVER
-M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-L:	linux-usb@vger.kernel.org
-S:	Maintained
-F:	drivers/usb/serial/*keyspan*
-
-USB SERIAL WHITEHEAT DRIVER
-M:	Support Department <support@connecttech.com>
-L:	linux-usb@vger.kernel.org
-W:	http://www.connecttech.com
-S:	Supported
-F:	drivers/usb/serial/whiteheat*
-
 USB SMSC75XX ETHERNET DRIVER
 M:	Steve Glendinning <steve.glendinning@shawell.net>
 L:	netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 868c0eb..67077ad 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index d63f3de..0c14d8a 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -17,7 +17,7 @@
 #include <asm/pgalloc.h>
 #include <asm/mmu.h>
 
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+static int handle_vmalloc_fault(unsigned long address)
 {
 	/*
 	 * Synchronize this task's top level page-table
@@ -27,7 +27,7 @@
 	pud_t *pud, *pud_k;
 	pmd_t *pmd, *pmd_k;
 
-	pgd = pgd_offset_fast(mm, address);
+	pgd = pgd_offset_fast(current->active_mm, address);
 	pgd_k = pgd_offset_k(address);
 
 	if (!pgd_present(*pgd_k))
@@ -72,7 +72,7 @@
 	 * nothing more.
 	 */
 	if (address >= VMALLOC_START && address <= VMALLOC_END) {
-		ret = handle_vmalloc_fault(mm, address);
+		ret = handle_vmalloc_fault(address);
 		if (unlikely(ret))
 			goto bad_area_nosemaphore;
 		else
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 45f1ffc..24cdf64 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -971,11 +971,11 @@
 [C(LL)] = {
 	[C(OP_READ)] = {
 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
-		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
 	},
 	[C(OP_WRITE)] = {
 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
-		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
 	},
 },
 [C(ITLB)] = {
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index c69da37..5b28e81 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -473,7 +473,7 @@
 {
 	int cpu;
 
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
 		fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
 		fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
 	}
@@ -574,8 +574,9 @@
 		/* FIXME */
 		int i;
 #if defined(CONFIG_MIPS_MT_SMP)
-		gic_call_int_base = GIC_NUM_INTRS - NR_CPUS;
-		gic_resched_int_base = gic_call_int_base - NR_CPUS;
+		gic_call_int_base = GIC_NUM_INTRS -
+			(NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
+		gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
 		fill_ipi_map();
 #endif
 		gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
@@ -599,7 +600,7 @@
 		printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
 		write_c0_status(0x1100dc00);
 		printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
-		for (i = 0; i < NR_CPUS; i++) {
+		for (i = 0; i < nr_cpu_ids; i++) {
 			arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
 					 GIC_RESCHED_INT(i), &irq_resched);
 			arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index e49241a..2027857 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -126,7 +126,7 @@
 		return -ENOENT;
 	}
 
-	rt->membase = devm_request_and_ioremap(&pdev->dev, res);
+	rt->membase = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(rt->membase))
 		return PTR_ERR(rt->membase);
 
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 829df49..41ebbfe 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -40,9 +40,11 @@
 		const char __user *buffer, size_t count, loff_t *pos)
 {
 	char *end, buf[sizeof("nnnnn\0")];
+	size_t size;
 	int tmp;
 
-	if (copy_from_user(buf, buffer, count))
+	size = min(count, sizeof(buf));
+	if (copy_from_user(buf, buffer, size))
 		return -EFAULT;
 
 	tmp = simple_strtol(buf, &end, 0);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0da5200..b3e18f8 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -128,7 +128,8 @@
 do {									\
 	typedef typeof(var) pao_T__;					\
 	const int pao_ID__ = (__builtin_constant_p(val) &&		\
-			      ((val) == 1 || (val) == -1)) ? (val) : 0;	\
+			      ((val) == 1 || (val) == -1)) ?		\
+				(int)(val) : 0;				\
 	if (0) {							\
 		pao_T__ pao_tmp__;					\
 		pao_tmp__ = (val);					\
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9d84491..8a87a32 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1276,16 +1276,16 @@
 static int __kprobes
 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
 {
-	int ret;
 	u64 start_clock;
 	u64 finish_clock;
+	int ret;
 
 	if (!atomic_read(&active_events))
 		return NMI_DONE;
 
-	start_clock = local_clock();
+	start_clock = sched_clock();
 	ret = x86_pmu.handle_irq(regs);
-	finish_clock = local_clock();
+	finish_clock = sched_clock();
 
 	perf_sample_event_took(finish_clock - start_clock);
 
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a0e2a8a..b2046e4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -609,7 +609,7 @@
 
 struct dentry *kvm_init_debugfs(void)
 {
-	d_kvm_debug = debugfs_create_dir("kvm", NULL);
+	d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
 	if (!d_kvm_debug)
 		printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
 
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ba77ebc..6fcb49c 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -113,10 +113,10 @@
 		u64 before, delta, whole_msecs;
 		int remainder_ns, decimal_msecs, thishandled;
 
-		before = local_clock();
+		before = sched_clock();
 		thishandled = a->handler(type, regs);
 		handled += thishandled;
-		delta = local_clock() - before;
+		delta = sched_clock() - before;
 		trace_nmi_handler(a->handler, (int)delta, thishandled);
 
 		if (delta < nmi_longest_ns)
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index de1dfa1..21dbe6b 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1122,7 +1122,7 @@
  * a3: exctable, original value in excsave1
  */
 
-fast_syscall_spill_registers_fixup:
+ENTRY(fast_syscall_spill_registers_fixup)
 
 	rsr	a2, windowbase	# get current windowbase (a2 is saved)
 	xsr	a0, depc	# restore depc and a0
@@ -1134,22 +1134,26 @@
 	 */
 
 	xsr	a3, excsave1	# get spill-mask
-	slli	a2, a3, 1	# shift left by one
+	slli	a3, a3, 1	# shift left by one
 
-	slli	a3, a2, 32-WSBITS
-	src	a2, a2, a3	# a1 = xxwww1yyxxxwww1yy......
+	slli	a2, a3, 32-WSBITS
+	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
 	wsr	a2, windowstart	# set corrected windowstart
 
-	rsr	a3, excsave1
-	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE	# restore a2
-	l32i	a3, a3, EXC_TABLE_PARAM	# original WB (in user task)
+	srli	a3, a3, 1
+	rsr	a2, excsave1
+	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
+	xsr	a2, excsave1
+	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
+	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
+	xsr	a2, excsave1
 
 	/* Return to the original (user task) WINDOWBASE.
 	 * We leave the following frame behind:
 	 * a0, a1, a2	same
-	 * a3:		trashed (saved in excsave_1)
+	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
 	 * depc:	depc (we have to return to that address)
-	 * excsave_1:	a3
+	 * excsave_1:	exctable
 	 */
 
 	wsr	a3, windowbase
@@ -1159,9 +1163,9 @@
 	 *  a0: return address
 	 *  a1: used, stack pointer
 	 *  a2: kernel stack pointer
-	 *  a3: available, saved in EXCSAVE_1
+	 *  a3: available
 	 *  depc: exception address
-	 *  excsave: a3
+	 *  excsave: exctable
 	 * Note: This frame might be the same as above.
 	 */
 
@@ -1181,9 +1185,12 @@
 	rsr	a0, exccause
 	addx4	a0, a0, a3              	# find entry in table
 	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
 	jx	a0
 
-fast_syscall_spill_registers_fixup_return:
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
 
 	/* When we return here, all registers have been restored (a2: DEPC) */
 
@@ -1191,13 +1198,13 @@
 
 	/* Restore fixup handler. */
 
-	xsr	a3, excsave1
-	movi	a2, fast_syscall_spill_registers_fixup
-	s32i	a2, a3, EXC_TABLE_FIXUP
-	s32i	a0, a3, EXC_TABLE_DOUBLE_SAVE
-	rsr	a2, windowbase
-	s32i	a2, a3, EXC_TABLE_PARAM
-	l32i	a2, a3, EXC_TABLE_KSTK
+	rsr	a2, excsave1
+	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
+	movi	a3, fast_syscall_spill_registers_fixup
+	s32i	a3, a2, EXC_TABLE_FIXUP
+	rsr	a3, windowbase
+	s32i	a3, a2, EXC_TABLE_PARAM
+	l32i	a2, a2, EXC_TABLE_KSTK
 
 	/* Load WB at the time the exception occurred. */
 
@@ -1206,8 +1213,12 @@
 	wsr	a3, windowbase
 	rsync
 
+	rsr	a3, excsave1
+	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
+
 	rfde
 
+ENDPROC(fast_syscall_spill_registers_fixup_return)
 
 /*
  * spill all registers.
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 718eca1..98b67d5 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -341,7 +341,7 @@
 
 	sp = regs->areg[1];
 
-	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
+	if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
 		sp = current->sas_ss_sp + current->sas_ss_size;
 	}
 
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 56f88b7..e9e1aad 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -737,7 +737,8 @@
 		return 1;
 	}
 
-	if ((new = alloc_bootmem(sizeof new)) == NULL) {
+	new = alloc_bootmem(sizeof(*new));
+	if (new == NULL) {
 		printk("Alloc_bootmem failed\n");
 		return 1;
 	}
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 51410c2..4d978a3 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -27,6 +27,14 @@
  */
 
 #define SRC_CR			0x00U
+#define SRC_CR_T0_ENSEL		BIT(15)
+#define SRC_CR_T1_ENSEL		BIT(17)
+#define SRC_CR_T2_ENSEL		BIT(19)
+#define SRC_CR_T3_ENSEL		BIT(21)
+#define SRC_CR_T4_ENSEL		BIT(23)
+#define SRC_CR_T5_ENSEL		BIT(25)
+#define SRC_CR_T6_ENSEL		BIT(27)
+#define SRC_CR_T7_ENSEL		BIT(29)
 #define SRC_XTALCR		0x0CU
 #define SRC_XTALCR_XTALTIMEN	BIT(20)
 #define SRC_XTALCR_SXTALDIS	BIT(19)
@@ -543,6 +551,19 @@
 		       __func__, np->name);
 		return;
 	}
+
+	/* Set all timers to use the 2.4 MHz TIMCLK */
+	val = readl(src_base + SRC_CR);
+	val |= SRC_CR_T0_ENSEL;
+	val |= SRC_CR_T1_ENSEL;
+	val |= SRC_CR_T2_ENSEL;
+	val |= SRC_CR_T3_ENSEL;
+	val |= SRC_CR_T4_ENSEL;
+	val |= SRC_CR_T5_ENSEL;
+	val |= SRC_CR_T6_ENSEL;
+	val |= SRC_CR_T7_ENSEL;
+	writel(val, src_base + SRC_CR);
+
 	val = readl(src_base + SRC_XTALCR);
 	pr_info("SXTALO is %s\n",
 		(val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index fc777bd..81a202d 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -39,8 +39,8 @@
 };
 
 static const u32 a370_tclk_freqs[] __initconst = {
-	16600000,
-	20000000,
+	166000000,
+	200000000,
 };
 
 static u32 __init a370_get_tclk_freq(void __iomem *sar)
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 5bb848c..81dd31a 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -49,7 +49,7 @@
 #define SOCFPGA_L4_SP_CLK		"l4_sp_clk"
 #define SOCFPGA_NAND_CLK		"nand_clk"
 #define SOCFPGA_NAND_X_CLK		"nand_x_clk"
-#define SOCFPGA_MMC_CLK			"mmc_clk"
+#define SOCFPGA_MMC_CLK			"sdmmc_clk"
 #define SOCFPGA_DB_CLK			"gpio_db_clk"
 
 #define div_mask(width)	((1 << (width)) - 1)
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 67ccf4a..f5e4c21 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -107,7 +107,7 @@
 
 	vco = icst_hz_to_vco(icst->params, rate);
 	icst->rate = icst_hz(icst->params, vco);
-	vco_set(icst->vcoreg, icst->lockreg, vco);
+	vco_set(icst->lockreg, icst->vcoreg, vco);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 955555d..f864275 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -29,11 +29,17 @@
 config DRM_KMS_HELPER
 	tristate
 	depends on DRM
+	help
+	  CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+	bool
+	depends on DRM_KMS_HELPER
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
 	help
-	  FB and CRTC helpers for KMS drivers.
+	  FBDEV helpers for KMS drivers.
 
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@
 config DRM_KMS_CMA_HELPER
 	bool
 	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@
 	select FB_CFB_IMAGEBLIT
 	select FW_LOADER
         select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
         select DRM_TTM
 	select POWER_SUPPLY
 	select HWMON
@@ -120,64 +128,7 @@
 	  selected, the module will be called i810.  AGP support is required
 	  for this driver to work.
 
-config DRM_I915
-	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
-	depends on DRM
-	depends on AGP
-	depends on AGP_INTEL
-	# we need shmfs for the swappable backing store, and in particular
-	# the shmem_readpage() which depends upon tmpfs
-	select SHMEM
-	select TMPFS
-	select DRM_KMS_HELPER
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	# i915 depends on ACPI_VIDEO when ACPI is enabled
-	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select BACKLIGHT_LCD_SUPPORT if ACPI
-	select BACKLIGHT_CLASS_DEVICE if ACPI
-	select VIDEO_OUTPUT_CONTROL if ACPI
-	select INPUT if ACPI
-	select THERMAL if ACPI
-	select ACPI_VIDEO if ACPI
-	select ACPI_BUTTON if ACPI
-	help
-	  Choose this option if you have a system that has "Intel Graphics
-	  Media Accelerator" or "HD Graphics" integrated graphics,
-	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
-	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
-	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
-	  If M is selected, the module will be called i915.  AGP support
-	  is required for this driver to work. This driver is used by
-	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
-	  replaces the older i830 module that supported a subset of the
-	  hardware in older X.org releases.
-
-	  Note that the older i810/i815 chipsets require the use of the
-	  i810 driver instead, and the Atom z5xx series has an entirely
-	  different implementation.
-
-config DRM_I915_KMS
-	bool "Enable modesetting on intel by default"
-	depends on DRM_I915
-	help
-	  Choose this option if you want kernel modesetting enabled by default,
-	  and you have a new enough userspace to support this. Running old
-	  userspaces with this enabled will cause pain.  Note that this causes
-	  the driver to bind to PCI devices, which precludes loading things
-	  like intelfb.
-
-config DRM_I915_PRELIMINARY_HW_SUPPORT
-	bool "Enable preliminary support for prerelease Intel hardware by default"
-	depends on DRM_I915
-	help
-	  Choose this option if you have prerelease Intel hardware and want the
-	  i915 driver to support it by default.  You can enable such support at
-	  runtime with the module option i915.preliminary_hw_support=1; this
-	  option changes the default for that module option.
-
-	  If in doubt, say "N".
+source "drivers/gpu/drm/i915/Kconfig"
 
 config DRM_MGA
 	tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@
 
 source "drivers/gpu/drm/cirrus/Kconfig"
 
+source "drivers/gpu/drm/armada/Kconfig"
+
 source "drivers/gpu/drm/rcar-du/Kconfig"
 
 source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,5 @@
 source "drivers/gpu/drm/qxl/Kconfig"
 
 source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f089adf..cc08b84 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -21,8 +21,9 @@
 
 drm-usb-y   := drm_usb.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +50,12 @@
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
 obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644
index 0000000..40d3715
--- /dev/null
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -0,0 +1,24 @@
+config DRM_ARMADA
+	tristate "DRM support for Marvell Armada SoCs"
+	depends on DRM && HAVE_CLK && ARM
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select DRM_KMS_HELPER
+	help
+	  Support the "LCD" controllers found on the Marvell Armada 510
+	  devices.  There are two controllers on the device, each controller
+	  supports graphics and video overlays.
+
+	  This driver provides no built-in acceleration; acceleration is
+	  performed by other IP found on the SoC.  This driver provides
+	  kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+	bool "Support TDA1998X HDMI output"
+	depends on DRM_ARMADA != n
+	depends on I2C && DRM_I2C_NXP_TDA998X = y
+	default y
+	help
+	  Support the TDA1998x HDMI output device found on the Solid-Run
+	  CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644
index 0000000..d6f43e0
--- /dev/null
+++ b/drivers/gpu/drm/armada/Makefile
@@ -0,0 +1,7 @@
+armada-y	:= armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+		   armada_gem.o armada_output.o armada_overlay.o \
+		   armada_slave.o
+armada-y	+= armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644
index 0000000..59948ef
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+	priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+	if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+		priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+	return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+	/* Lower the watermark so to eliminate jitter at higher bandwidths */
+	armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+	return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that.  The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1.  This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+	const struct drm_display_mode *mode, uint32_t *sclk)
+{
+	struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+	struct clk *clk = priv->extclk[0];
+	int ret;
+
+	if (dcrtc->num == 1)
+		return -EINVAL;
+
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	if (dcrtc->clk != clk) {
+		ret = clk_prepare_enable(clk);
+		if (ret)
+			return ret;
+		dcrtc->clk = clk;
+	}
+
+	if (sclk) {
+		uint32_t rate, ref, div;
+
+		rate = mode->clock * 1000;
+		ref = clk_round_rate(clk, rate);
+		div = DIV_ROUND_UP(ref, rate);
+		if (div < 1)
+			div = 1;
+
+		clk_set_rate(clk, ref);
+		*sclk = div | SCLK_510_EXTCLK1;
+	}
+
+	return 0;
+}
+
+const struct armada_variant armada510_ops = {
+	.has_spu_adv_reg = true,
+	.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+	.init = armada510_init,
+	.crtc_init = armada510_crtc_init,
+	.crtc_compute_clock = armada510_crtc_compute_clock,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644
index 0000000..d8e3982
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -0,0 +1,1098 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+	struct drm_pending_vblank_event *event;
+	struct armada_regs regs[4];
+	struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+	CSC_AUTO = 0,
+	CSC_YUV_CCIR601 = 1,
+	CSC_YUV_CCIR709 = 2,
+	CSC_RGB_COMPUTER = 1,
+	CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing.  Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640  1080 1084 1094 1125
+ * Which get translated to:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640   540  542  547  562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC.  Total clocks per
+ * line: 2640.  The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN:    560     561     562     563             567     568    569
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ *  22 blanking lines.  VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN:    1123   1124    1125      1               5       6      7
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ *  23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ *  Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ *  Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ *   vtotal = mode->crtc_vtotal + 1;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ *   vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ *   vtotal = mode->crtc_vtotal;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ *   vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+	while (regs->offset != ~0) {
+		void __iomem *reg = dcrtc->base + regs->offset;
+		uint32_t val;
+
+		val = regs->mask;
+		if (val != 0)
+			val &= readl_relaxed(reg);
+		writel_relaxed(val | regs->val, reg);
+		++regs;
+	}
+}
+
+#define dpms_blanked(dpms)	((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+	uint32_t dumb_ctrl;
+
+	dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+	if (!dpms_blanked(dcrtc->dpms))
+		dumb_ctrl |= CFG_DUMB_ENA;
+
+	/*
+	 * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+	 * be using SPI or GPIO.  If we set this to DUMB_BLANK, we will
+	 * force LCD_D[23:0] to output blank color, overriding the GPIO or
+	 * SPI usage.  So leave it as-is unless in DUMB24_RGB888_0 mode.
+	 */
+	if (dpms_blanked(dcrtc->dpms) &&
+	    (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+		dumb_ctrl &= ~DUMB_MASK;
+		dumb_ctrl |= DUMB_BLANK;
+	}
+
+	/*
+	 * The documentation doesn't indicate what the normal state of
+	 * the sync signals are.  Sebastian Hesselbart kindly probed
+	 * these signals on his board to determine their state.
+	 *
+	 * The non-inverted state of the sync signals is active high.
+	 * Setting these bits makes the appropriate signal active low.
+	 */
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+		dumb_ctrl |= CFG_INV_CSYNC;
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+		dumb_ctrl |= CFG_INV_HSYNC;
+	if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+		dumb_ctrl |= CFG_INV_VSYNC;
+
+	if (dcrtc->dumb_ctrl != dumb_ctrl) {
+		dcrtc->dumb_ctrl = dumb_ctrl;
+		writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+	}
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+	int x, int y, struct armada_regs *regs, bool interlaced)
+{
+	struct armada_gem_object *obj = drm_fb_obj(fb);
+	unsigned pitch = fb->pitches[0];
+	unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+	uint32_t addr_odd, addr_even;
+	unsigned i = 0;
+
+	DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+		pitch, x, y, fb->bits_per_pixel);
+
+	addr_odd = addr_even = obj->dev_addr + offset;
+
+	if (interlaced) {
+		addr_even += pitch;
+		pitch *= 2;
+	}
+
+	/* write offset, base, and pitch */
+	armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+	armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+	armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+	return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+	struct armada_frame_work *work)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+	unsigned long flags;
+	int ret;
+
+	ret = drm_vblank_get(dev, dcrtc->num);
+	if (ret) {
+		DRM_ERROR("failed to acquire vblank counter\n");
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (!dcrtc->frame_work)
+		dcrtc->frame_work = work;
+	else
+		ret = -EBUSY;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (ret)
+		drm_vblank_put(dev, dcrtc->num);
+
+	return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+	struct armada_frame_work *work = dcrtc->frame_work;
+
+	dcrtc->frame_work = NULL;
+
+	armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+	if (work->event)
+		drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+	drm_vblank_put(dev, dcrtc->num);
+
+	/* Finally, queue the process-half of the cleanup. */
+	__armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+	kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+	struct drm_framebuffer *fb, bool force)
+{
+	struct armada_frame_work *work;
+
+	if (!fb)
+		return;
+
+	if (force) {
+		/* Display is disabled, so just drop the old fb */
+		drm_framebuffer_unreference(fb);
+		return;
+	}
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (work) {
+		int i = 0;
+		work->event = NULL;
+		work->old_fb = fb;
+		armada_reg_queue_end(work->regs, i);
+
+		if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+			return;
+
+		kfree(work);
+	}
+
+	/*
+	 * Oops - just drop the reference immediately and hope for
+	 * the best.  The worst that will happen is the buffer gets
+	 * reused before it has finished being displayed.
+	 */
+	drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+	struct drm_device *dev = dcrtc->crtc.dev;
+
+	/*
+	 * Tell the DRM core that vblank IRQs aren't going to happen for
+	 * a while.  This cleans up any pending vblank events for us.
+	 */
+	drm_vblank_off(dev, dcrtc->num);
+
+	/* Handle any pending flip event. */
+	spin_lock_irq(&dev->event_lock);
+	if (dcrtc->frame_work)
+		armada_drm_crtc_complete_frame_work(dcrtc);
+	spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+	int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+	int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	if (dcrtc->dpms != dpms) {
+		dcrtc->dpms = dpms;
+		armada_drm_crtc_update(dcrtc);
+		if (dpms_blanked(dpms))
+			armada_drm_vblank_off(dcrtc);
+	}
+}
+
+/*
+ * Prepare for a mode set.  Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct drm_plane *plane;
+
+	/*
+	 * If we have an overlay plane associated with this CRTC, disable
+	 * it before the modeset to avoid its coordinates being outside
+	 * the new mode parameters.  DRM doesn't provide help with this.
+	 */
+	plane = dcrtc->plane;
+	if (plane) {
+		struct drm_framebuffer *fb = plane->fb;
+
+		plane->funcs->disable_plane(plane);
+		plane->fb = NULL;
+		plane->crtc = NULL;
+		drm_framebuffer_unreference(fb);
+	}
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+		dcrtc->dpms = DRM_MODE_DPMS_ON;
+		armada_drm_crtc_update(dcrtc);
+	}
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+	const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	int ret;
+
+	/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+	if (!priv->variant->has_spu_adv_reg &&
+	    adj->flags & DRM_MODE_FLAG_INTERLACE)
+		return false;
+
+	/* Check whether the display mode is possible */
+	ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+	struct armada_vbl_event *e, *n;
+	void __iomem *base = dcrtc->base;
+
+	if (stat & DMA_FF_UNDERFLOW)
+		DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+	if (stat & GRA_FF_UNDERFLOW)
+		DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+	if (stat & VSYNC_IRQ)
+		drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+	spin_lock(&dcrtc->irq_lock);
+
+	list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+		list_del_init(&e->node);
+		drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+		e->fn(dcrtc, e->data);
+	}
+
+	if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+		int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+		uint32_t val;
+
+		writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+		writel_relaxed(dcrtc->v[i].spu_v_h_total,
+			       base + LCD_SPUT_V_H_TOTAL);
+
+		val = readl_relaxed(base + LCD_SPU_ADV_REG);
+		val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+		val |= dcrtc->v[i].spu_adv_reg;
+		writel_relaxed(val, base + LCD_SPU_ADV_REG);
+	}
+
+	if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+		writel_relaxed(dcrtc->cursor_hw_pos,
+			       base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+		writel_relaxed(dcrtc->cursor_hw_sz,
+			       base + LCD_SPU_HWC_HPXL_VLN);
+		armada_updatel(CFG_HWC_ENA,
+			       CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+			       base + LCD_SPU_DMA_CTRL0);
+		dcrtc->cursor_update = false;
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+	}
+
+	spin_unlock(&dcrtc->irq_lock);
+
+	if (stat & GRA_FRAME_IRQ) {
+		struct drm_device *dev = dcrtc->crtc.dev;
+
+		spin_lock(&dev->event_lock);
+		if (dcrtc->frame_work)
+			armada_drm_crtc_complete_frame_work(dcrtc);
+		spin_unlock(&dev->event_lock);
+
+		wake_up(&dcrtc->frame_wait);
+	}
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+	if (dcrtc->irq_ena & mask) {
+		dcrtc->irq_ena &= ~mask;
+		writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	}
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+	if ((dcrtc->irq_ena & mask) != mask) {
+		dcrtc->irq_ena |= mask;
+		writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+		if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+			writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+	}
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+	struct drm_display_mode *adj = &dcrtc->crtc.mode;
+	uint32_t val = 0;
+
+	if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+		val |= CFG_CSC_YUV_CCIR709;
+	if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+		val |= CFG_CSC_RGB_STUDIO;
+
+	/*
+	 * In auto mode, set the colorimetry, based upon the HDMI spec.
+	 * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+	 * ITU601.  It may be more appropriate to set this depending on
+	 * the source - but what if the graphic frame is YUV and the
+	 * video frame is RGB?
+	 */
+	if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+	     !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+	    (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+		if (dcrtc->csc_yuv_mode == CSC_AUTO)
+			val |= CFG_CSC_YUV_CCIR709;
+	}
+
+	/*
+	 * We assume we're connected to a TV-like device, so the YUV->RGB
+	 * conversion should produce a limited range.  We should set this
+	 * depending on the connectors attached to this CRTC, and what
+	 * kind of device they report being connected.
+	 */
+	if (dcrtc->csc_rgb_mode == CSC_AUTO)
+		val |= CFG_CSC_RGB_STUDIO;
+
+	return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+	struct drm_display_mode *mode, struct drm_display_mode *adj,
+	int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_regs regs[17];
+	uint32_t lm, rm, tm, bm, val, sclk;
+	unsigned long flags;
+	unsigned i;
+	bool interlaced;
+
+	drm_framebuffer_reference(crtc->fb);
+
+	interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+	i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+
+	rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+	lm = adj->crtc_htotal - adj->crtc_hsync_end;
+	bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+	tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+	DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+		adj->crtc_hdisplay,
+		adj->crtc_hsync_start,
+		adj->crtc_hsync_end,
+		adj->crtc_htotal, lm, rm);
+	DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+		adj->crtc_vdisplay,
+		adj->crtc_vsync_start,
+		adj->crtc_vsync_end,
+		adj->crtc_vtotal, tm, bm);
+
+	/* Wait for pending flips to complete */
+	wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+	drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+	crtc->mode = *adj;
+
+	val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+	if (val != dcrtc->dumb_ctrl) {
+		dcrtc->dumb_ctrl = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+	}
+
+	/* Now compute the divider for real */
+	priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+	/* Ensure graphic fifo is enabled */
+	armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+	armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+	if (interlaced ^ dcrtc->interlaced) {
+		if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+			drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+		else
+			drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+		dcrtc->interlaced = interlaced;
+	}
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+	/* Even interlaced/progressive frame */
+	dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+				    adj->crtc_htotal;
+	dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+	val = adj->crtc_hsync_start;
+	dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+		priv->variant->spu_adv_reg;
+
+	if (interlaced) {
+		/* Odd interlaced frame */
+		dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+						(1 << 16);
+		dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+		val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+		dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+			priv->variant->spu_adv_reg;
+	} else {
+		dcrtc->v[0] = dcrtc->v[1];
+	}
+
+	val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+	armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+	armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+	armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+			   LCD_SPUT_V_H_TOTAL);
+
+	if (priv->variant->has_spu_adv_reg) {
+		armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+				     ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+				     ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+	}
+
+	val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+	val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
+	val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+
+	if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+		val |= CFG_PALETTE_ENA;
+
+	if (interlaced)
+		val |= CFG_GRA_FTOGGLE;
+
+	armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+			     CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+					 CFG_SWAPYU | CFG_YUV2RGB) |
+			     CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+			     LCD_SPU_DMA_CTRL0);
+
+	val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+	armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+	val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+	armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+	armada_reg_queue_end(regs, i);
+
+	armada_drm_crtc_update_regs(dcrtc, regs);
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+	armada_drm_crtc_update(dcrtc);
+
+	drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+	armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+	return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+	struct drm_framebuffer *old_fb)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_regs regs[4];
+	unsigned i;
+
+	i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+				    dcrtc->interlaced);
+	armada_reg_queue_end(regs, i);
+
+	/* Wait for pending flips to complete */
+	wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+	/* Take a reference to the new fb as we're using it */
+	drm_framebuffer_reference(crtc->fb);
+
+	/* Update the base in the CRTC */
+	armada_drm_crtc_update_regs(dcrtc, regs);
+
+	/* Drop our previously held reference */
+	armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+	return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+	armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+
+	/* Power down most RAMs and FIFOs */
+	writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+		       CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+	.dpms		= armada_drm_crtc_dpms,
+	.prepare	= armada_drm_crtc_prepare,
+	.commit		= armada_drm_crtc_commit,
+	.mode_fixup	= armada_drm_crtc_mode_fixup,
+	.mode_set	= armada_drm_crtc_mode_set,
+	.mode_set_base	= armada_drm_crtc_mode_set_base,
+	.load_lut	= armada_drm_crtc_load_lut,
+	.disable	= armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+	unsigned stride, unsigned width, unsigned height)
+{
+	uint32_t addr;
+	unsigned y;
+
+	addr = SRAM_HWC32_RAM1;
+	for (y = 0; y < height; y++) {
+		uint32_t *p = &pix[y * stride];
+		unsigned x;
+
+		for (x = 0; x < width; x++, p++) {
+			uint32_t val = *p;
+
+			val = (val & 0xff00ff00) |
+			      (val & 0x000000ff) << 16 |
+			      (val & 0x00ff0000) >> 16;
+
+			writel_relaxed(val,
+				       base + LCD_SPU_SRAM_WRDAT);
+			writel_relaxed(addr | SRAM_WRITE,
+				       base + LCD_SPU_SRAM_CTRL);
+			addr += 1;
+			if ((addr & 0x00ff) == 0)
+				addr += 0xf00;
+			if ((addr & 0x30ff) == 0)
+				addr = SRAM_HWC32_RAM2;
+		}
+	}
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+	unsigned addr;
+
+	for (addr = 0; addr < 256; addr++) {
+		/* write the default value */
+		writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+		writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+			       base + LCD_SPU_SRAM_CTRL);
+	}
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+	uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+	uint32_t yoff, yscr, h = dcrtc->cursor_h;
+	uint32_t para1;
+
+	/*
+	 * Calculate the visible width and height of the cursor,
+	 * screen position, and the position in the cursor bitmap.
+	 */
+	if (dcrtc->cursor_x < 0) {
+		xoff = -dcrtc->cursor_x;
+		xscr = 0;
+		w -= min(xoff, w);
+	} else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+		xoff = 0;
+		xscr = dcrtc->cursor_x;
+		w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+	} else {
+		xoff = 0;
+		xscr = dcrtc->cursor_x;
+	}
+
+	if (dcrtc->cursor_y < 0) {
+		yoff = -dcrtc->cursor_y;
+		yscr = 0;
+		h -= min(yoff, h);
+	} else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+		yoff = 0;
+		yscr = dcrtc->cursor_y;
+		h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+	} else {
+		yoff = 0;
+		yscr = dcrtc->cursor_y;
+	}
+
+	/* On interlaced modes, the vertical cursor size must be halved */
+	s = dcrtc->cursor_w;
+	if (dcrtc->interlaced) {
+		s *= 2;
+		yscr /= 2;
+		h /= 2;
+	}
+
+	if (!dcrtc->cursor_obj || !h || !w) {
+		spin_lock_irq(&dcrtc->irq_lock);
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+		dcrtc->cursor_update = false;
+		armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+		spin_unlock_irq(&dcrtc->irq_lock);
+		return 0;
+	}
+
+	para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+	armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+		       dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+	/*
+	 * Initialize the transparency if the SRAM was powered down.
+	 * We must also reload the cursor data as well.
+	 */
+	if (!(para1 & CFG_CSB_256x32)) {
+		armada_drm_crtc_cursor_tran(dcrtc->base);
+		reload = true;
+	}
+
+	if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+		spin_lock_irq(&dcrtc->irq_lock);
+		armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+		dcrtc->cursor_update = false;
+		armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+		spin_unlock_irq(&dcrtc->irq_lock);
+		reload = true;
+	}
+	if (reload) {
+		struct armada_gem_object *obj = dcrtc->cursor_obj;
+		uint32_t *pix;
+		/* Set the top-left corner of the cursor image */
+		pix = obj->addr;
+		pix += yoff * s + xoff;
+		armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+	}
+
+	/* Reload the cursor position, size and enable in the IRQ handler */
+	spin_lock_irq(&dcrtc->irq_lock);
+	dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+	dcrtc->cursor_hw_sz = h << 16 | w;
+	dcrtc->cursor_update = true;
+	armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+	spin_unlock_irq(&dcrtc->irq_lock);
+
+	return 0;
+}
+
+static void cursor_update(void *data)
+{
+	armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+	struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+	struct drm_device *dev = crtc->dev;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_gem_object *obj = NULL;
+	int ret;
+
+	/* If no cursor support, replicate drm's return value */
+	if (!priv->variant->has_spu_adv_reg)
+		return -ENXIO;
+
+	if (handle && w > 0 && h > 0) {
+		/* maximum size is 64x32 or 32x64 */
+		if (w > 64 || h > 64 || (w > 32 && h > 32))
+			return -ENOMEM;
+
+		obj = armada_gem_object_lookup(dev, file, handle);
+		if (!obj)
+			return -ENOENT;
+
+		/* Must be a kernel-mapped object */
+		if (!obj->addr) {
+			drm_gem_object_unreference_unlocked(&obj->obj);
+			return -EINVAL;
+		}
+
+		if (obj->obj.size < w * h * 4) {
+			DRM_ERROR("buffer is too small\n");
+			drm_gem_object_unreference_unlocked(&obj->obj);
+			return -ENOMEM;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (dcrtc->cursor_obj) {
+		dcrtc->cursor_obj->update = NULL;
+		dcrtc->cursor_obj->update_data = NULL;
+		drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+	}
+	dcrtc->cursor_obj = obj;
+	dcrtc->cursor_w = w;
+	dcrtc->cursor_h = h;
+	ret = armada_drm_crtc_cursor_update(dcrtc, true);
+	if (obj) {
+		obj->update_data = dcrtc;
+		obj->update = cursor_update;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+	int ret;
+
+	/* If no cursor support, replicate drm's return value */
+	if (!priv->variant->has_spu_adv_reg)
+		return -EFAULT;
+
+	mutex_lock(&dev->struct_mutex);
+	dcrtc->cursor_x = x;
+	dcrtc->cursor_y = y;
+	ret = armada_drm_crtc_cursor_update(dcrtc, false);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_private *priv = crtc->dev->dev_private;
+
+	if (dcrtc->cursor_obj)
+		drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+	priv->dcrtc[dcrtc->num] = NULL;
+	drm_crtc_cleanup(&dcrtc->crtc);
+
+	if (!IS_ERR(dcrtc->clk))
+		clk_disable_unprepare(dcrtc->clk);
+
+	kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+	struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	struct armada_frame_work *work;
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+	unsigned i;
+	int ret;
+
+	/* We don't support changing the pixel format */
+	if (fb->pixel_format != crtc->fb->pixel_format)
+		return -EINVAL;
+
+	work = kmalloc(sizeof(*work), GFP_KERNEL);
+	if (!work)
+		return -ENOMEM;
+
+	work->event = event;
+	work->old_fb = dcrtc->crtc.fb;
+
+	i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+				    dcrtc->interlaced);
+	armada_reg_queue_end(work->regs, i);
+
+	/*
+	 * Hold the old framebuffer for the work - DRM appears to drop our
+	 * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+	 */
+	drm_framebuffer_reference(work->old_fb);
+
+	ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+	if (ret) {
+		/*
+		 * Undo our reference above; DRM does not drop the reference
+		 * to this object on error, so that's okay.
+		 */
+		drm_framebuffer_unreference(work->old_fb);
+		kfree(work);
+		return ret;
+	}
+
+	/*
+	 * Don't take a reference on the new framebuffer;
+	 * drm_mode_page_flip_ioctl() has already grabbed a reference and
+	 * will _not_ drop that reference on successful return from this
+	 * function.  Simply mark this new framebuffer as the current one.
+	 */
+	dcrtc->crtc.fb = fb;
+
+	/*
+	 * Finally, if the display is blanked, we won't receive an
+	 * interrupt, so complete it now.
+	 */
+	if (dpms_blanked(dcrtc->dpms)) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (dcrtc->frame_work)
+			armada_drm_crtc_complete_frame_work(dcrtc);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+	struct drm_property *property, uint64_t val)
+{
+	struct armada_private *priv = crtc->dev->dev_private;
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	bool update_csc = false;
+
+	if (property == priv->csc_yuv_prop) {
+		dcrtc->csc_yuv_mode = val;
+		update_csc = true;
+	} else if (property == priv->csc_rgb_prop) {
+		dcrtc->csc_rgb_mode = val;
+		update_csc = true;
+	}
+
+	if (update_csc) {
+		uint32_t val;
+
+		val = dcrtc->spu_iopad_ctrl |
+		      armada_drm_crtc_calculate_csc(dcrtc);
+		writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+	}
+
+	return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+	.cursor_set	= armada_drm_crtc_cursor_set,
+	.cursor_move	= armada_drm_crtc_cursor_move,
+	.destroy	= armada_drm_crtc_destroy,
+	.set_config	= drm_crtc_helper_set_config,
+	.page_flip	= armada_drm_crtc_page_flip,
+	.set_property	= armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+	{ CSC_AUTO,        "Auto" },
+	{ CSC_YUV_CCIR601, "CCIR601" },
+	{ CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+	{ CSC_AUTO,         "Auto" },
+	{ CSC_RGB_COMPUTER, "Computer system" },
+	{ CSC_RGB_STUDIO,   "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	if (priv->csc_yuv_prop)
+		return 0;
+
+	priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+				"CSC_YUV", armada_drm_csc_yuv_enum_list,
+				ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+	priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+				"CSC_RGB", armada_drm_csc_rgb_enum_list,
+				ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+	if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+	struct resource *res)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc;
+	void __iomem *base;
+	int ret;
+
+	ret = armada_drm_crtc_create_properties(dev);
+	if (ret)
+		return ret;
+
+	base = devm_request_and_ioremap(dev->dev, res);
+	if (!base) {
+		DRM_ERROR("failed to ioremap register\n");
+		return -ENOMEM;
+	}
+
+	dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+	if (!dcrtc) {
+		DRM_ERROR("failed to allocate Armada crtc\n");
+		return -ENOMEM;
+	}
+
+	dcrtc->base = base;
+	dcrtc->num = num;
+	dcrtc->clk = ERR_PTR(-EINVAL);
+	dcrtc->csc_yuv_mode = CSC_AUTO;
+	dcrtc->csc_rgb_mode = CSC_AUTO;
+	dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+	dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+	spin_lock_init(&dcrtc->irq_lock);
+	dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+	INIT_LIST_HEAD(&dcrtc->vbl_list);
+	init_waitqueue_head(&dcrtc->frame_wait);
+
+	/* Initialize some registers which we don't otherwise set */
+	writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+	writel_relaxed(dcrtc->spu_iopad_ctrl,
+		       dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+	writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+		       CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+		       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+	writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+	writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+	if (priv->variant->crtc_init) {
+		ret = priv->variant->crtc_init(dcrtc);
+		if (ret) {
+			kfree(dcrtc);
+			return ret;
+		}
+	}
+
+	/* Ensure AXI pipeline is enabled */
+	armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+	priv->dcrtc[dcrtc->num] = dcrtc;
+
+	drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+	drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+				   dcrtc->csc_yuv_mode);
+	drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+				   dcrtc->csc_rgb_mode);
+
+	return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644
index 0000000..9c10a07
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o)	\
+	do {					\
+		struct armada_regs *__reg = _r;	\
+		__reg[_i].offset = _o;		\
+		__reg[_i].mask = ~(_m);		\
+		__reg[_i].val = _v;		\
+		_i++;				\
+	} while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o)	\
+	armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i)		\
+	armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+	struct drm_crtc		crtc;
+	unsigned		num;
+	void __iomem		*base;
+	struct clk		*clk;
+	struct {
+		uint32_t	spu_v_h_total;
+		uint32_t	spu_v_porch;
+		uint32_t	spu_adv_reg;
+	} v[2];
+	bool			interlaced;
+	bool			cursor_update;
+	uint8_t			csc_yuv_mode;
+	uint8_t			csc_rgb_mode;
+
+	struct drm_plane	*plane;
+
+	struct armada_gem_object	*cursor_obj;
+	int			cursor_x;
+	int			cursor_y;
+	uint32_t		cursor_hw_pos;
+	uint32_t		cursor_hw_sz;
+	uint32_t		cursor_w;
+	uint32_t		cursor_h;
+
+	int			dpms;
+	uint32_t		cfg_dumb_ctrl;
+	uint32_t		dumb_ctrl;
+	uint32_t		spu_iopad_ctrl;
+
+	wait_queue_head_t	frame_wait;
+	struct armada_frame_work *frame_work;
+
+	spinlock_t		irq_lock;
+	uint32_t		irq_ena;
+	struct list_head	vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644
index 0000000..471e456
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct armada_private *priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_mm_dump_table(m, &priv->linear);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct armada_private *priv = dev->dev_private;
+	int n, i;
+
+	if (priv) {
+		for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+			struct armada_crtc *dcrtc = priv->dcrtc[n];
+			if (!dcrtc)
+				continue;
+
+			for (i = 0x84; i <= 0x1c4; i += 4) {
+				uint32_t v = readl_relaxed(dcrtc->base + i);
+				seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+	.owner = THIS_MODULE,
+	.open = armada_debugfs_reg_r_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+	size_t len, loff_t *off)
+{
+	struct drm_device *dev = file->private_data;
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+	char buf[32], *p;
+	uint32_t reg, val;
+	int ret;
+
+	if (*off != 0)
+		return 0;
+
+	if (len > sizeof(buf) - 1)
+		len = sizeof(buf) - 1;
+
+	ret = strncpy_from_user(buf, ptr, len);
+	if (ret < 0)
+		return ret;
+	buf[len] = '\0';
+
+	reg = simple_strtoul(buf, &p, 16);
+	if (!isspace(*p))
+		return -EINVAL;
+	val = simple_strtoul(p + 1, NULL, 16);
+
+	if (reg >= 0x84 && reg <= 0x1c4)
+		writel(val, dcrtc->base + reg);
+
+	return len;
+}
+
+static const struct file_operations fops_reg_w = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.write = armada_debugfs_write,
+	.llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+	{ "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+	const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+	const char *name, umode_t mode, const struct file_operations *fops)
+{
+	struct dentry *de;
+
+	de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+	return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+	int ret;
+
+	ret = drm_debugfs_create_files(armada_debugfs_list,
+				       ARMADA_DEBUGFS_ENTRIES,
+				       minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
+	ret = armada_debugfs_create(minor->debugfs_root, minor,
+				   "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+	if (ret)
+		goto err_1;
+
+	ret = armada_debugfs_create(minor->debugfs_root, minor,
+				"reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+	if (ret)
+		goto err_2;
+	return ret;
+
+ err_2:
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+	drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+				 minor);
+	return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+	drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+				 minor);
+}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644
index 0000000..eef09ec
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+	uint32_t ov, v;
+
+	ov = v = readl_relaxed(ptr);
+	v = (v & ~mask) | val;
+	if (ov != v)
+		writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+	uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+	/* 88AP510 spec recommends pitch be a multiple of 128 */
+	return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+	struct list_head	node;
+	void			*data;
+	void			(*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+	struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+	struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+	struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do {	\
+	struct armada_vbl_event *__e = _e;		\
+	INIT_LIST_HEAD(&__e->node);			\
+	__e->data = _d;					\
+	__e->fn = _f;					\
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+	bool	has_spu_adv_reg;
+	uint32_t spu_adv_reg;
+	int (*init)(struct armada_private *, struct device *);
+	int (*crtc_init)(struct armada_crtc *);
+	int (*crtc_compute_clock)(struct armada_crtc *,
+				  const struct drm_display_mode *,
+				  uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+	const struct armada_variant *variant;
+	struct work_struct	fb_unref_work;
+	DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+	struct drm_fb_helper	*fbdev;
+	struct armada_crtc	*dcrtc[2];
+	struct drm_mm		linear;
+	struct clk		*extclk[2];
+	struct drm_property	*csc_yuv_prop;
+	struct drm_property	*csc_rgb_prop;
+	struct drm_property	*colorkey_prop;
+	struct drm_property	*colorkey_min_prop;
+	struct drm_property	*colorkey_max_prop;
+	struct drm_property	*colorkey_val_prop;
+	struct drm_property	*colorkey_alpha_prop;
+	struct drm_property	*colorkey_mode_prop;
+	struct drm_property	*brightness_prop;
+	struct drm_property	*contrast_prop;
+	struct drm_property	*saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry		*de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+	struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+	struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644
index 0000000..4f2b283
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+	/* With 0x24, there is no translation between vp_out and int_vp
+	FB	LCD out	Pins	VIP	Int Vp
+	R:23:16	R:7:0	VPC7:0	7:0	7:0[R]
+	G:15:8	G:15:8	VPB7:0	23:16	23:16[G]
+	B:7:0	B:23:16	VPA7:0	15:8	15:8[B]
+	*/
+	.swap_a = 2,
+	.swap_b = 3,
+	.swap_c = 4,
+	.swap_d = 5,
+	.swap_e = 0,
+	.swap_f = 1,
+	.audio_cfg = BIT(2),
+	.audio_frame[1] = 1,
+	.audio_format = AFMT_SPDIF,
+	.audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+	.i2c_adapter_id = 0,
+	.crtcs = 1 << 0, /* Only LCD0 at the moment */
+	.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+	.interlace_allowed = true,
+	.info = {
+		.type = "tda998x",
+		.addr = 0x70,
+		.platform_data = &params,
+	},
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+	struct armada_private *priv =
+		container_of(work, struct armada_private, fb_unref_work);
+	struct drm_framebuffer *fb;
+
+	while (kfifo_get(&priv->fb_unref, &fb))
+		drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+	struct drm_framebuffer *fb)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	/*
+	 * Yes, we really must jump through these hoops just to store a
+	 * _pointer_ to something into the kfifo.  This is utterly insane
+	 * and idiotic, because it kfifo requires the _data_ pointed to by
+	 * the pointer const, not the pointer itself.  Not only that, but
+	 * you have to pass a pointer _to_ the pointer you want stored.
+	 */
+	const struct drm_framebuffer *silly_api_alert = fb;
+	WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+	schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+	struct drm_framebuffer *fb)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	__armada_drm_queue_unref_work(dev, fb);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	const struct platform_device_id *id;
+	struct armada_private *priv;
+	struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+	struct resource *mem = NULL;
+	int ret, n, i;
+
+	memset(res, 0, sizeof(res));
+
+	for (n = i = 0; ; n++) {
+		struct resource *r = platform_get_resource(dev->platformdev,
+							   IORESOURCE_MEM, n);
+		if (!r)
+			break;
+
+		/* Resources above 64K are graphics memory */
+		if (resource_size(r) > SZ_64K)
+			mem = r;
+		else if (i < ARRAY_SIZE(priv->dcrtc))
+			res[i++] = r;
+		else
+			return -EINVAL;
+	}
+
+	if (!res[0] || !mem)
+		return -ENXIO;
+
+	if (!devm_request_mem_region(dev->dev, mem->start,
+			resource_size(mem), "armada-drm"))
+		return -EBUSY;
+
+	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		DRM_ERROR("failed to allocate private\n");
+		return -ENOMEM;
+	}
+
+	dev->dev_private = priv;
+
+	/* Get the implementation specific driver data. */
+	id = platform_get_device_id(dev->platformdev);
+	if (!id)
+		return -ENXIO;
+
+	priv->variant = (struct armada_variant *)id->driver_data;
+
+	ret = priv->variant->init(priv, dev->dev);
+	if (ret)
+		return ret;
+
+	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+	INIT_KFIFO(priv->fb_unref);
+
+	/* Mode setting support */
+	drm_mode_config_init(dev);
+	dev->mode_config.min_width = 320;
+	dev->mode_config.min_height = 200;
+
+	/*
+	 * With vscale enabled, the maximum width is 1920 due to the
+	 * 1920 by 3 lines RAM
+	 */
+	dev->mode_config.max_width = 1920;
+	dev->mode_config.max_height = 2048;
+
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+	drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+	/* Create all LCD controllers */
+	for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+		if (!res[n])
+			break;
+
+		ret = armada_drm_crtc_create(dev, n, res[n]);
+		if (ret)
+			goto err_kms;
+	}
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+	ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+	if (ret)
+		goto err_kms;
+#endif
+
+	ret = drm_vblank_init(dev, n);
+	if (ret)
+		goto err_kms;
+
+	ret = drm_irq_install(dev);
+	if (ret)
+		goto err_kms;
+
+	dev->vblank_disable_allowed = 1;
+
+	ret = armada_fbdev_init(dev);
+	if (ret)
+		goto err_irq;
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+
+ err_irq:
+	drm_irq_uninstall(dev);
+ err_kms:
+	drm_mode_config_cleanup(dev);
+	drm_mm_takedown(&priv->linear);
+	flush_work(&priv->fb_unref_work);
+
+	return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+	armada_fbdev_fini(dev);
+	drm_irq_uninstall(dev);
+	drm_mode_config_cleanup(dev);
+	drm_mm_takedown(&priv->linear);
+	flush_work(&priv->fb_unref_work);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+	if (list_empty(&evt->node)) {
+		list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+		drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+	}
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	if (!list_empty(&evt->node)) {
+		list_del_init(&evt->node);
+		drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+	}
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+	struct armada_vbl_event *evt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dcrtc->irq_lock, flags);
+	armada_drm_vbl_event_remove(dcrtc, evt);
+	spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct armada_private *priv = dev->dev_private;
+	armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+	return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct armada_private *priv = dev->dev_private;
+	armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+	uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+	irqreturn_t handled = IRQ_NONE;
+
+	/*
+	 * This is rediculous - rather than writing bits to clear, we
+	 * have to set the actual status register value.  This is racy.
+	 */
+	writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+	/* Mask out those interrupts we haven't enabled */
+	v = stat & dcrtc->irq_ena;
+
+	if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+		armada_drm_crtc_irq(dcrtc, stat);
+		handled = IRQ_HANDLED;
+	}
+
+	return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+	spin_lock_irq(&dev->vbl_lock);
+	writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+	writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+	spin_unlock_irq(&dev->vbl_lock);
+
+	return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+	writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+		DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+		DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+		DRM_UNLOCKED),
+};
+
+static const struct file_operations armada_drm_fops = {
+	.owner			= THIS_MODULE,
+	.llseek			= no_llseek,
+	.read			= drm_read,
+	.poll			= drm_poll,
+	.unlocked_ioctl		= drm_ioctl,
+	.mmap			= drm_gem_mmap,
+	.open			= drm_open,
+	.release		= drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+	.load			= armada_drm_load,
+	.open			= NULL,
+	.preclose		= NULL,
+	.postclose		= NULL,
+	.lastclose		= NULL,
+	.unload			= armada_drm_unload,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= armada_drm_enable_vblank,
+	.disable_vblank		= armada_drm_disable_vblank,
+	.irq_handler		= armada_drm_irq_handler,
+	.irq_postinstall	= armada_drm_irq_postinstall,
+	.irq_uninstall		= armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init		= armada_drm_debugfs_init,
+	.debugfs_cleanup	= armada_drm_debugfs_cleanup,
+#endif
+	.gem_free_object	= armada_gem_free_object,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_export	= armada_gem_prime_export,
+	.gem_prime_import	= armada_gem_prime_import,
+	.dumb_create		= armada_gem_dumb_create,
+	.dumb_map_offset	= armada_gem_dumb_map_offset,
+	.dumb_destroy		= armada_gem_dumb_destroy,
+	.gem_vm_ops		= &armada_gem_vm_ops,
+	.major			= 1,
+	.minor			= 0,
+	.name			= "armada-drm",
+	.desc			= "Armada SoC DRM",
+	.date			= "20120730",
+	.driver_features	= DRIVER_GEM | DRIVER_MODESET |
+				  DRIVER_HAVE_IRQ | DRIVER_PRIME,
+	.ioctls			= armada_ioctls,
+	.fops			= &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&armada_drm_driver, pdev);
+	return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+	{
+		.name		= "armada-drm",
+		.driver_data	= (unsigned long)&armada510_ops,
+	}, {
+		.name		= "armada-510-drm",
+		.driver_data	= (unsigned long)&armada510_ops,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+	.probe	= armada_drm_probe,
+	.remove	= armada_drm_remove,
+	.driver	= {
+		.name	= "armada-drm",
+		.owner	= THIS_MODULE,
+	},
+	.id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+	armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+	return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+	platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644
index 0000000..1c90969
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+	struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+	drm_framebuffer_cleanup(&dfb->fb);
+	drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+	kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *dfile, unsigned int *handle)
+{
+	struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+	return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+	.destroy	= armada_fb_destroy,
+	.create_handle	= armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+	struct armada_framebuffer *dfb;
+	uint8_t format, config;
+	int ret;
+
+	switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod)		\
+	case DRM_FORMAT_##drm:		\
+		format = CFG_##fmt;	\
+		config = mod;		\
+		break
+	FMT(RGB565,	565,		CFG_SWAPRB);
+	FMT(BGR565,	565,		0);
+	FMT(ARGB1555,	1555,		CFG_SWAPRB);
+	FMT(ABGR1555,	1555,		0);
+	FMT(RGB888,	888PACK,	CFG_SWAPRB);
+	FMT(BGR888,	888PACK,	0);
+	FMT(XRGB8888,	X888,		CFG_SWAPRB);
+	FMT(XBGR8888,	X888,		0);
+	FMT(ARGB8888,	8888,		CFG_SWAPRB);
+	FMT(ABGR8888,	8888,		0);
+	FMT(YUYV,	422PACK,	CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+	FMT(UYVY,	422PACK,	CFG_YUV2RGB);
+	FMT(VYUY,	422PACK,	CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(YVYU,	422PACK,	CFG_YUV2RGB | CFG_SWAPYU);
+	FMT(YUV422,	422,		CFG_YUV2RGB);
+	FMT(YVU422,	422,		CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(YUV420,	420,		CFG_YUV2RGB);
+	FMT(YVU420,	420,		CFG_YUV2RGB | CFG_SWAPUV);
+	FMT(C8,		PSEUDO8,	0);
+#undef FMT
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+	if (!dfb) {
+		DRM_ERROR("failed to allocate Armada fb object\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dfb->fmt = format;
+	dfb->mod = config;
+	dfb->obj = obj;
+
+	drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+	ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+	if (ret) {
+		kfree(dfb);
+		return ERR_PTR(ret);
+	}
+
+	/*
+	 * Take a reference on our object as we're successful - the
+	 * caller already holds a reference, which keeps us safe for
+	 * the above call, but the caller will drop their reference
+	 * to it.  Hence we need to take our own reference.
+	 */
+	drm_gem_object_reference(&obj->obj);
+
+	return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+	struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+	struct armada_gem_object *obj;
+	struct armada_framebuffer *dfb;
+	int ret;
+
+	DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+		mode->width, mode->height, mode->pixel_format,
+		mode->flags, mode->pitches[0], mode->pitches[1],
+		mode->pitches[2]);
+
+	/* We can only handle a single plane at the moment */
+	if (drm_format_num_planes(mode->pixel_format) > 1 &&
+	    (mode->handles[0] != mode->handles[1] ||
+	     mode->handles[0] != mode->handles[2])) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+	if (!obj) {
+		ret = -ENOENT;
+		goto err;
+	}
+
+	if (obj->obj.import_attach && !obj->sgt) {
+		ret = armada_gem_map_import(obj);
+		if (ret)
+			goto err_unref;
+	}
+
+	/* Framebuffer objects must have a valid device address for scanout */
+	if (obj->dev_addr == DMA_ERROR_CODE) {
+		ret = -EINVAL;
+		goto err_unref;
+	}
+
+	dfb = armada_framebuffer_create(dev, mode, obj);
+	if (IS_ERR(dfb)) {
+		ret = PTR_ERR(dfb);
+		goto err;
+	}
+
+	drm_gem_object_unreference_unlocked(&obj->obj);
+
+	return &dfb->fb;
+
+ err_unref:
+	drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+	DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+	return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh = priv->fbdev;
+
+	if (fbh)
+		drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+	.fb_create		= armada_fb_create,
+	.output_poll_changed	= armada_output_poll_changed,
+};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644
index 0000000..ce3f12e
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+	struct drm_framebuffer	fb;
+	struct armada_gem_object *obj;
+	uint8_t			fmt;
+	uint8_t			mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+	container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+	struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644
index 0000000..dd5ea77
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+	.fb_debug_enter	= drm_fb_helper_debug_enter,
+	.fb_debug_leave	= drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = fbh->dev;
+	struct drm_mode_fb_cmd2 mode;
+	struct armada_framebuffer *dfb;
+	struct armada_gem_object *obj;
+	struct fb_info *info;
+	int size, ret;
+	void *ptr;
+
+	memset(&mode, 0, sizeof(mode));
+	mode.width = sizes->surface_width;
+	mode.height = sizes->surface_height;
+	mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+	mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+					sizes->surface_depth);
+
+	size = mode.pitches[0] * mode.height;
+	obj = armada_gem_alloc_private_object(dev, size);
+	if (!obj) {
+		DRM_ERROR("failed to allocate fb memory\n");
+		return -ENOMEM;
+	}
+
+	ret = armada_gem_linear_back(dev, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(&obj->obj);
+		return ret;
+	}
+
+	ptr = armada_gem_map_object(dev, obj);
+	if (!ptr) {
+		drm_gem_object_unreference_unlocked(&obj->obj);
+		return -ENOMEM;
+	}
+
+	dfb = armada_framebuffer_create(dev, &mode, obj);
+
+	/*
+	 * A reference is now held by the framebuffer object if
+	 * successful, otherwise this drops the ref for the error path.
+	 */
+	drm_gem_object_unreference_unlocked(&obj->obj);
+
+	if (IS_ERR(dfb))
+		return PTR_ERR(dfb);
+
+	info = framebuffer_alloc(0, dev->dev);
+	if (!info) {
+		ret = -ENOMEM;
+		goto err_fballoc;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto err_fbcmap;
+	}
+
+	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+	info->par = fbh;
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &armada_fb_ops;
+	info->fix.smem_start = obj->phys_addr;
+	info->fix.smem_len = obj->obj.size;
+	info->screen_size = obj->obj.size;
+	info->screen_base = ptr;
+	fbh->fb = &dfb->fb;
+	fbh->fbdev = info;
+	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+	DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
+		dfb->fb.width, dfb->fb.height,
+		dfb->fb.bits_per_pixel, obj->phys_addr);
+
+	return 0;
+
+ err_fbcmap:
+	framebuffer_release(info);
+ err_fballoc:
+	dfb->fb.funcs->destroy(&dfb->fb);
+	return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	int ret = 0;
+
+	if (!fbh->fb) {
+		ret = armada_fb_create(fbh, sizes);
+		if (ret == 0)
+			ret = 1;
+	}
+	return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+	.gamma_set	= armada_drm_crtc_gamma_set,
+	.gamma_get	= armada_drm_crtc_gamma_get,
+	.fb_probe	= armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh;
+	int ret;
+
+	fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+	if (!fbh)
+		return -ENOMEM;
+
+	priv->fbdev = fbh;
+
+	fbh->funcs = &armada_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, fbh, 1, 1);
+	if (ret) {
+		DRM_ERROR("failed to initialize drm fb helper\n");
+		goto err_fb_helper;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(fbh);
+	if (ret) {
+		DRM_ERROR("failed to add fb connectors\n");
+		goto err_fb_setup;
+	}
+
+	ret = drm_fb_helper_initial_config(fbh, 32);
+	if (ret) {
+		DRM_ERROR("failed to set initial config\n");
+		goto err_fb_setup;
+	}
+
+	return 0;
+ err_fb_setup:
+	drm_fb_helper_fini(fbh);
+ err_fb_helper:
+	priv->fbdev = NULL;
+	return ret;
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_fb_helper *fbh = priv->fbdev;
+
+	if (fbh) {
+		struct fb_info *info = fbh->fbdev;
+
+		if (info) {
+			unregister_framebuffer(info);
+			if (info->cmap.len)
+				fb_dealloc_cmap(&info->cmap);
+			framebuffer_release(info);
+		}
+
+		if (fbh->fb)
+			fbh->fb->funcs->destroy(fbh->fb);
+
+		drm_fb_helper_fini(fbh);
+
+		priv->fbdev = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644
index 0000000..9f2356b
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+	unsigned long addr = (unsigned long)vmf->virtual_address;
+	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+	int ret;
+
+	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+	ret = vm_insert_pfn(vma, addr, pfn);
+
+	switch (ret) {
+	case 0:
+	case -EBUSY:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+	.fault	= armada_gem_vm_fault,
+	.open	= drm_gem_vm_open,
+	.close	= drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+	return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+	drm_gem_free_mmap_offset(&dobj->obj);
+
+	if (dobj->page) {
+		/* page backed memory */
+		unsigned int order = get_order(dobj->obj.size);
+		__free_pages(dobj->page, order);
+	} else if (dobj->linear) {
+		/* linear backed memory */
+		drm_mm_remove_node(dobj->linear);
+		kfree(dobj->linear);
+		if (dobj->addr)
+			iounmap(dobj->addr);
+	}
+
+	if (dobj->obj.import_attach) {
+		/* We only ever display imported data */
+		dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+					 DMA_TO_DEVICE);
+		drm_prime_gem_destroy(&dobj->obj, NULL);
+	}
+
+	drm_gem_object_release(&dobj->obj);
+
+	kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+	struct armada_private *priv = dev->dev_private;
+	size_t size = obj->obj.size;
+
+	if (obj->page || obj->linear)
+		return 0;
+
+	/*
+	 * If it is a small allocation (typically cursor, which will
+	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+	 * Framebuffers will never be this small (our minimum size for
+	 * framebuffers is larger than this anyway.)  Such objects are
+	 * only accessed by the CPU so we don't need any special handing
+	 * here.
+	 */
+	if (size <= 8192) {
+		unsigned int order = get_order(size);
+		struct page *p = alloc_pages(GFP_KERNEL, order);
+
+		if (p) {
+			obj->addr = page_address(p);
+			obj->phys_addr = page_to_phys(p);
+			obj->page = p;
+
+			memset(obj->addr, 0, PAGE_ALIGN(size));
+		}
+	}
+
+	/*
+	 * We could grab something from CMA if it's enabled, but that
+	 * involves building in a problem:
+	 *
+	 * CMA's interface uses dma_alloc_coherent(), which provides us
+	 * with an CPU virtual address and a device address.
+	 *
+	 * The CPU virtual address may be either an address in the kernel
+	 * direct mapped region (for example, as it would be on x86) or
+	 * it may be remapped into another part of kernel memory space
+	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
+	 * returned virtual address is invalid depending on the architecture
+	 * implementation.
+	 *
+	 * The device address may also not be a physical address; it may
+	 * be that there is some kind of remapping between the device and
+	 * system RAM, which makes the use of the device address also
+	 * unsafe to re-use as a physical address.
+	 *
+	 * This makes DRM usage of dma_alloc_coherent() in a generic way
+	 * at best very questionable and unsafe.
+	 */
+
+	/* Otherwise, grab it from our linear allocation */
+	if (!obj->page) {
+		struct drm_mm_node *node;
+		unsigned align = min_t(unsigned, size, SZ_2M);
+		void __iomem *ptr;
+		int ret;
+
+		node = kzalloc(sizeof(*node), GFP_KERNEL);
+		if (!node)
+			return -ENOSPC;
+
+		mutex_lock(&dev->struct_mutex);
+		ret = drm_mm_insert_node(&priv->linear, node, size, align,
+					 DRM_MM_SEARCH_DEFAULT);
+		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			kfree(node);
+			return ret;
+		}
+
+		obj->linear = node;
+
+		/* Ensure that the memory we're returning is cleared. */
+		ptr = ioremap_wc(obj->linear->start, size);
+		if (!ptr) {
+			mutex_lock(&dev->struct_mutex);
+			drm_mm_remove_node(obj->linear);
+			mutex_unlock(&dev->struct_mutex);
+			kfree(obj->linear);
+			obj->linear = NULL;
+			return -ENOMEM;
+		}
+
+		memset_io(ptr, 0, size);
+		iounmap(ptr);
+
+		obj->phys_addr = obj->linear->start;
+		obj->dev_addr = obj->linear->start;
+	}
+
+	DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
+			 obj, obj->phys_addr, obj->dev_addr);
+
+	return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+	/* only linear objects need to be ioremap'd */
+	if (!dobj->addr && dobj->linear)
+		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+	return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+	struct armada_gem_object *obj;
+
+	size = roundup_gem_size(size);
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	drm_gem_private_object_init(dev, &obj->obj, size);
+	obj->dev_addr = DMA_ERROR_CODE;
+
+	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+	return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+	size_t size)
+{
+	struct armada_gem_object *obj;
+	struct address_space *mapping;
+
+	size = roundup_gem_size(size);
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->obj, size)) {
+		kfree(obj);
+		return NULL;
+	}
+
+	obj->dev_addr = DMA_ERROR_CODE;
+
+	mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+	return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+	struct drm_mode_create_dumb *args)
+{
+	struct armada_gem_object *dobj;
+	u32 handle;
+	size_t size;
+	int ret;
+
+	args->pitch = armada_pitch(args->width, args->bpp);
+	args->size = size = args->pitch * args->height;
+
+	dobj = armada_gem_alloc_private_object(dev, size);
+	if (dobj == NULL)
+		return -ENOMEM;
+
+	ret = armada_gem_linear_back(dev, dobj);
+	if (ret)
+		goto err;
+
+	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+	if (ret)
+		goto err;
+
+	args->handle = handle;
+
+	/* drop reference from allocate - handle holds it now */
+	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+	uint32_t handle, uint64_t *offset)
+{
+	struct armada_gem_object *obj;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = armada_gem_object_lookup(dev, file, handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object\n");
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	/* Don't allow imported objects to be mapped */
+	if (obj->obj.import_attach) {
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	ret = drm_gem_create_mmap_offset(&obj->obj);
+	if (ret == 0) {
+		*offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+		DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+	}
+
+	drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+	uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_create *args = data;
+	struct armada_gem_object *dobj;
+	size_t size;
+	u32 handle;
+	int ret;
+
+	if (args->size == 0)
+		return -ENOMEM;
+
+	size = args->size;
+
+	dobj = armada_gem_alloc_object(dev, size);
+	if (dobj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+	if (ret)
+		goto err;
+
+	args->handle = handle;
+
+	/* drop reference from allocate - handle holds it now */
+	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_mmap *args = data;
+	struct armada_gem_object *dobj;
+	unsigned long addr;
+
+	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	if (dobj == NULL)
+		return -ENOENT;
+
+	if (!dobj->obj.filp) {
+		drm_gem_object_unreference(&dobj->obj);
+		return -EINVAL;
+	}
+
+	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+		       MAP_SHARED, args->offset);
+	drm_gem_object_unreference(&dobj->obj);
+	if (IS_ERR_VALUE(addr))
+		return addr;
+
+	args->addr = addr;
+
+	return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+	struct drm_file *file)
+{
+	struct drm_armada_gem_pwrite *args = data;
+	struct armada_gem_object *dobj;
+	char __user *ptr;
+	int ret;
+
+	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+		args->handle, args->offset, args->size, args->ptr);
+
+	if (args->size == 0)
+		return 0;
+
+	ptr = (char __user *)(uintptr_t)args->ptr;
+
+	if (!access_ok(VERIFY_READ, ptr, args->size))
+		return -EFAULT;
+
+	ret = fault_in_multipages_readable(ptr, args->size);
+	if (ret)
+		return ret;
+
+	dobj = armada_gem_object_lookup(dev, file, args->handle);
+	if (dobj == NULL)
+		return -ENOENT;
+
+	/* Must be a kernel-mapped object */
+	if (!dobj->addr)
+		return -EINVAL;
+
+	if (args->offset > dobj->obj.size ||
+	    args->size > dobj->obj.size - args->offset) {
+		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+		ret = -EINVAL;
+		goto unref;
+	}
+
+	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+		ret = -EFAULT;
+	} else if (dobj->update) {
+		dobj->update(dobj->update_data);
+		ret = 0;
+	}
+
+ unref:
+	drm_gem_object_unreference_unlocked(&dobj->obj);
+	return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+	enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+	struct scatterlist *sg;
+	struct sg_table *sgt;
+	int i, num;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return NULL;
+
+	if (dobj->obj.filp) {
+		struct address_space *mapping;
+		gfp_t gfp;
+		int count;
+
+		count = dobj->obj.size / PAGE_SIZE;
+		if (sg_alloc_table(sgt, count, GFP_KERNEL))
+			goto free_sgt;
+
+		mapping = file_inode(dobj->obj.filp)->i_mapping;
+		gfp = mapping_gfp_mask(mapping);
+
+		for_each_sg(sgt->sgl, sg, count, i) {
+			struct page *page;
+
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			if (IS_ERR(page)) {
+				num = i;
+				goto release;
+			}
+
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+		}
+
+		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+			num = sgt->nents;
+			goto release;
+		}
+	} else if (dobj->page) {
+		/* Single contiguous page */
+		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+			goto free_sgt;
+
+		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+		if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+			goto free_table;
+	} else if (dobj->linear) {
+		/* Single contiguous physical region - no struct page */
+		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+			goto free_sgt;
+		sg_dma_address(sgt->sgl) = dobj->dev_addr;
+		sg_dma_len(sgt->sgl) = dobj->obj.size;
+	} else {
+		goto free_sgt;
+	}
+	return sgt;
+
+ release:
+	for_each_sg(sgt->sgl, sg, num, i)
+		page_cache_release(sg_page(sg));
+ free_table:
+	sg_free_table(sgt);
+ free_sgt:
+	kfree(sgt);
+	return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+	struct sg_table *sgt, enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+	int i;
+
+	if (!dobj->linear)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+	if (dobj->obj.filp) {
+		struct scatterlist *sg;
+		for_each_sg(sgt->sgl, sg, sgt->nents, i)
+			page_cache_release(sg_page(sg));
+	}
+
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+	return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+	.map_dma_buf	= armada_gem_prime_map_dma_buf,
+	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
+	.release	= drm_gem_dmabuf_release,
+	.kmap_atomic	= armada_gem_dmabuf_no_kmap,
+	.kunmap_atomic	= armada_gem_dmabuf_no_kunmap,
+	.kmap		= armada_gem_dmabuf_no_kmap,
+	.kunmap		= armada_gem_dmabuf_no_kunmap,
+	.mmap		= armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+	int flags)
+{
+	return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+			      O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+	struct dma_buf_attachment *attach;
+	struct armada_gem_object *dobj;
+
+	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+		struct drm_gem_object *obj = buf->priv;
+		if (obj->dev == dev) {
+			/*
+			 * Importing our own dmabuf(s) increases the
+			 * refcount on the gem object itself.
+			 */
+			drm_gem_object_reference(obj);
+			dma_buf_put(buf);
+			return obj;
+		}
+	}
+
+	attach = dma_buf_attach(buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	dobj = armada_gem_alloc_private_object(dev, buf->size);
+	if (!dobj) {
+		dma_buf_detach(buf, attach);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	dobj->obj.import_attach = attach;
+
+	/*
+	 * Don't call dma_buf_map_attachment() here - it maps the
+	 * scatterlist immediately for DMA, and this is not always
+	 * an appropriate thing to do.
+	 */
+	return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+	int ret;
+
+	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+					  DMA_TO_DEVICE);
+	if (!dobj->sgt) {
+		DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+		return -EINVAL;
+	}
+	if (IS_ERR(dobj->sgt)) {
+		ret = PTR_ERR(dobj->sgt);
+		dobj->sgt = NULL;
+		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+		return ret;
+	}
+	if (dobj->sgt->nents > 1) {
+		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+		return -EINVAL;
+	}
+	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+		return -EINVAL;
+	}
+	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+	return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644
index 0000000..00b6cd4
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+	struct drm_gem_object	obj;
+	void			*addr;
+	phys_addr_t		phys_addr;
+	resource_size_t		dev_addr;
+	struct drm_mm_node	*linear;	/* for linear backed */
+	struct page		*page;		/* for page backed */
+	struct sg_table		*sgt;		/* for imported */
+	void			(*update)(void *);
+	void			*update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+	size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+	struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+	uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+	uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+	struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+	struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+	struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+	struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+	return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644
index 0000000..27319a8
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *  LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ *  LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ *  LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+	LCD_SPU_ADV_REG			= 0x0084,	/* Armada 510 */
+	LCD_SPU_DMA_START_ADDR_Y0	= 0x00c0,
+	LCD_SPU_DMA_START_ADDR_U0	= 0x00c4,
+	LCD_SPU_DMA_START_ADDR_V0	= 0x00c8,
+	LCD_CFG_DMA_START_ADDR_0	= 0x00cc,
+	LCD_SPU_DMA_START_ADDR_Y1	= 0x00d0,
+	LCD_SPU_DMA_START_ADDR_U1	= 0x00d4,
+	LCD_SPU_DMA_START_ADDR_V1	= 0x00d8,
+	LCD_CFG_DMA_START_ADDR_1	= 0x00dc,
+	LCD_SPU_DMA_PITCH_YC		= 0x00e0,
+	LCD_SPU_DMA_PITCH_UV		= 0x00e4,
+	LCD_SPU_DMA_OVSA_HPXL_VLN	= 0x00e8,
+	LCD_SPU_DMA_HPXL_VLN		= 0x00ec,
+	LCD_SPU_DZM_HPXL_VLN		= 0x00f0,
+	LCD_CFG_GRA_START_ADDR0		= 0x00f4,
+	LCD_CFG_GRA_START_ADDR1		= 0x00f8,
+	LCD_CFG_GRA_PITCH		= 0x00fc,
+	LCD_SPU_GRA_OVSA_HPXL_VLN	= 0x0100,
+	LCD_SPU_GRA_HPXL_VLN		= 0x0104,
+	LCD_SPU_GZM_HPXL_VLN		= 0x0108,
+	LCD_SPU_HWC_OVSA_HPXL_VLN	= 0x010c,
+	LCD_SPU_HWC_HPXL_VLN		= 0x0110,
+	LCD_SPUT_V_H_TOTAL		= 0x0114,
+	LCD_SPU_V_H_ACTIVE		= 0x0118,
+	LCD_SPU_H_PORCH			= 0x011c,
+	LCD_SPU_V_PORCH			= 0x0120,
+	LCD_SPU_BLANKCOLOR		= 0x0124,
+	LCD_SPU_ALPHA_COLOR1		= 0x0128,
+	LCD_SPU_ALPHA_COLOR2		= 0x012c,
+	LCD_SPU_COLORKEY_Y		= 0x0130,
+	LCD_SPU_COLORKEY_U		= 0x0134,
+	LCD_SPU_COLORKEY_V		= 0x0138,
+	LCD_CFG_RDREG4F			= 0x013c,	/* Armada 510 */
+	LCD_SPU_SPI_RXDATA		= 0x0140,
+	LCD_SPU_ISA_RXDATA		= 0x0144,
+	LCD_SPU_HWC_RDDAT		= 0x0158,
+	LCD_SPU_GAMMA_RDDAT		= 0x015c,
+	LCD_SPU_PALETTE_RDDAT		= 0x0160,
+	LCD_SPU_IOPAD_IN		= 0x0178,
+	LCD_CFG_RDREG5F			= 0x017c,
+	LCD_SPU_SPI_CTRL		= 0x0180,
+	LCD_SPU_SPI_TXDATA		= 0x0184,
+	LCD_SPU_SMPN_CTRL		= 0x0188,
+	LCD_SPU_DMA_CTRL0		= 0x0190,
+	LCD_SPU_DMA_CTRL1		= 0x0194,
+	LCD_SPU_SRAM_CTRL		= 0x0198,
+	LCD_SPU_SRAM_WRDAT		= 0x019c,
+	LCD_SPU_SRAM_PARA0		= 0x01a0,	/* Armada 510 */
+	LCD_SPU_SRAM_PARA1		= 0x01a4,
+	LCD_CFG_SCLK_DIV		= 0x01a8,
+	LCD_SPU_CONTRAST		= 0x01ac,
+	LCD_SPU_SATURATION		= 0x01b0,
+	LCD_SPU_CBSH_HUE		= 0x01b4,
+	LCD_SPU_DUMB_CTRL		= 0x01b8,
+	LCD_SPU_IOPAD_CONTROL		= 0x01bc,
+	LCD_SPU_IRQ_ENA			= 0x01c0,
+	LCD_SPU_IRQ_ISR			= 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+	ADV_VSYNC_L_OFF	= 0xfff << 20,
+	ADV_GRACOLORKEY	= 1 << 19,
+	ADV_VIDCOLORKEY	= 1 << 18,
+	ADV_HWC32BLEND	= 1 << 15,
+	ADV_HWC32ARGB	= 1 << 14,
+	ADV_HWC32ENABLE	= 1 << 13,
+	ADV_VSYNCOFFEN	= 1 << 12,
+	ADV_VSYNC_H_OFF	= 0xfff << 0,
+};
+
+enum {
+	CFG_565		= 0,
+	CFG_1555	= 1,
+	CFG_888PACK	= 2,
+	CFG_X888	= 3,
+	CFG_8888	= 4,
+	CFG_422PACK	= 5,
+	CFG_422		= 6,
+	CFG_420		= 7,
+	CFG_PSEUDO4	= 9,
+	CFG_PSEUDO8	= 10,
+	CFG_SWAPRB	= 1 << 4,
+	CFG_SWAPUV	= 1 << 3,
+	CFG_SWAPYU	= 1 << 2,
+	CFG_YUV2RGB	= 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+	CFG_NOBLENDING	= 1 << 31,
+	CFG_GAMMA_ENA	= 1 << 30,
+	CFG_CBSH_ENA	= 1 << 29,
+	CFG_PALETTE_ENA	= 1 << 28,
+	CFG_ARBFAST_ENA	= 1 << 27,
+	CFG_HWC_1BITMOD	= 1 << 26,
+	CFG_HWC_1BITENA	= 1 << 25,
+	CFG_HWC_ENA	= 1 << 24,
+	CFG_DMAFORMAT	= 0xf << 20,
+#define	CFG_DMA_FMT(x)	((x) << 20)
+	CFG_GRAFORMAT	= 0xf << 16,
+#define	CFG_GRA_FMT(x)	((x) << 16)
+#define CFG_GRA_MOD(x)	((x) << 8)
+	CFG_GRA_FTOGGLE	= 1 << 15,
+	CFG_GRA_HSMOOTH	= 1 << 14,
+	CFG_GRA_TSTMODE	= 1 << 13,
+	CFG_GRA_ENA	= 1 << 8,
+#define CFG_DMA_MOD(x)	((x) << 0)
+	CFG_DMA_FTOGGLE	= 1 << 7,
+	CFG_DMA_HSMOOTH	= 1 << 6,
+	CFG_DMA_TSTMODE	= 1 << 5,
+	CFG_DMA_ENA	= 1 << 0,
+};
+
+enum {
+	CKMODE_DISABLE	= 0,
+	CKMODE_Y	= 1,
+	CKMODE_U	= 2,
+	CKMODE_RGB	= 3,
+	CKMODE_V	= 4,
+	CKMODE_R	= 5,
+	CKMODE_G	= 6,
+	CKMODE_B	= 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+	CFG_FRAME_TRIG		= 1 << 31,
+	CFG_VSYNC_INV		= 1 << 27,
+	CFG_CKMODE_MASK		= 0x7 << 24,
+#define CFG_CKMODE(x)		((x) << 24)
+	CFG_CARRY		= 1 << 23,
+	CFG_GATED_CLK		= 1 << 21,
+	CFG_PWRDN_ENA		= 1 << 20,
+	CFG_DSCALE_MASK		= 0x3 << 18,
+	CFG_DSCALE_NONE		= 0x0 << 18,
+	CFG_DSCALE_HALF		= 0x1 << 18,
+	CFG_DSCALE_QUAR		= 0x2 << 18,
+	CFG_ALPHAM_MASK		= 0x3 << 16,
+	CFG_ALPHAM_VIDEO	= 0x0 << 16,
+	CFG_ALPHAM_GRA		= 0x1 << 16,
+	CFG_ALPHAM_CFG		= 0x2 << 16,
+	CFG_ALPHA_MASK		= 0xff << 8,
+	CFG_PIXCMD_MASK		= 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+	SRAM_READ	= 0 << 14,
+	SRAM_WRITE	= 2 << 14,
+	SRAM_INIT	= 3 << 14,
+	SRAM_HWC32_RAM1	= 0xc << 8,
+	SRAM_HWC32_RAM2	= 0xd << 8,
+	SRAM_HWC32_RAMR	= SRAM_HWC32_RAM1,
+	SRAM_HWC32_RAMG	= SRAM_HWC32_RAM2,
+	SRAM_HWC32_RAMB	= 0xe << 8,
+	SRAM_HWC32_TRAN	= 0xf << 8,
+	SRAM_HWC	= 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+	CFG_CSB_256x32	= 1 << 15,	/* cursor */
+	CFG_CSB_256x24	= 1 << 14,	/* palette */
+	CFG_CSB_256x8	= 1 << 13,	/* gamma */
+	CFG_PDWN1920x32	= 1 << 8,	/* Armada 510: power down vscale ram */
+	CFG_PDWN256x32	= 1 << 7,	/* power down cursor */
+	CFG_PDWN256x24	= 1 << 6,	/* power down palette */
+	CFG_PDWN256x8	= 1 << 5,	/* power down gamma */
+	CFG_PDWNHWC	= 1 << 4,	/* Armada 510: power down all hwc ram */
+	CFG_PDWN32x32	= 1 << 3,	/* power down slave->smart ram */
+	CFG_PDWN16x66	= 1 << 2,	/* power down UV fifo */
+	CFG_PDWN32x66	= 1 << 1,	/* power down Y fifo */
+	CFG_PDWN64x66	= 1 << 0,	/* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+	/* Armada 510 */
+	SCLK_510_AXI		= 0x0 << 30,
+	SCLK_510_EXTCLK0	= 0x1 << 30,
+	SCLK_510_PLL		= 0x2 << 30,
+	SCLK_510_EXTCLK1	= 0x3 << 30,
+	SCLK_510_DIV_CHANGE	= 1 << 29,
+	SCLK_510_FRAC_DIV_MASK	= 0xfff << 16,
+	SCLK_510_INT_DIV_MASK	= 0xffff << 0,
+
+	/* Armada 16x */
+	SCLK_16X_AHB		= 0x0 << 28,
+	SCLK_16X_PCLK		= 0x1 << 28,
+	SCLK_16X_AXI		= 0x4 << 28,
+	SCLK_16X_PLL		= 0x8 << 28,
+	SCLK_16X_FRAC_DIV_MASK	= 0xfff << 16,
+	SCLK_16X_INT_DIV_MASK	= 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+	DUMB16_RGB565_0	= 0x0 << 28,
+	DUMB16_RGB565_1	= 0x1 << 28,
+	DUMB18_RGB666_0	= 0x2 << 28,
+	DUMB18_RGB666_1	= 0x3 << 28,
+	DUMB12_RGB444_0	= 0x4 << 28,
+	DUMB12_RGB444_1	= 0x5 << 28,
+	DUMB24_RGB888_0	= 0x6 << 28,
+	DUMB_BLANK	= 0x7 << 28,
+	DUMB_MASK	= 0xf << 28,
+	CFG_BIAS_OUT	= 1 << 8,
+	CFG_REV_RGB	= 1 << 7,
+	CFG_INV_CBLANK	= 1 << 6,
+	CFG_INV_CSYNC	= 1 << 5,	/* Normally active high */
+	CFG_INV_HENA	= 1 << 4,
+	CFG_INV_VSYNC	= 1 << 3,	/* Normally active high */
+	CFG_INV_HSYNC	= 1 << 2,	/* Normally active high */
+	CFG_INV_PCLK	= 1 << 1,
+	CFG_DUMB_ENA	= 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+	CFG_VSCALE_LN_EN	= 3 << 18,
+	CFG_GRA_VM_ENA		= 1 << 15,
+	CFG_DMA_VM_ENA		= 1 << 13,
+	CFG_CMD_VM_ENA		= 1 << 11,
+	CFG_CSC_MASK		= 3 << 8,
+	CFG_CSC_YUV_CCIR709	= 1 << 9,
+	CFG_CSC_YUV_CCIR601	= 0 << 9,
+	CFG_CSC_RGB_STUDIO	= 1 << 8,
+	CFG_CSC_RGB_COMPUTER	= 0 << 8,
+	CFG_IOPAD_MASK		= 0xf << 0,
+	CFG_IOPAD_DUMB24	= 0x0 << 0,
+	CFG_IOPAD_DUMB18SPI	= 0x1 << 0,
+	CFG_IOPAD_DUMB18GPIO	= 0x2 << 0,
+	CFG_IOPAD_DUMB16SPI	= 0x3 << 0,
+	CFG_IOPAD_DUMB16GPIO	= 0x4 << 0,
+	CFG_IOPAD_DUMB12GPIO	= 0x5 << 0,
+	CFG_IOPAD_SMART18	= 0x6 << 0,
+	CFG_IOPAD_SMART16	= 0x7 << 0,
+	CFG_IOPAD_SMART8	= 0x8 << 0,
+};
+
+#define IOPAD_DUMB24                0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+	DMA_FRAME_IRQ0_ENA	= 1 << 31,
+	DMA_FRAME_IRQ1_ENA	= 1 << 30,
+	DMA_FRAME_IRQ_ENA	= DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+	DMA_FF_UNDERFLOW_ENA	= 1 << 29,
+	GRA_FRAME_IRQ0_ENA	= 1 << 27,
+	GRA_FRAME_IRQ1_ENA	= 1 << 26,
+	GRA_FRAME_IRQ_ENA	= GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+	GRA_FF_UNDERFLOW_ENA	= 1 << 25,
+	VSYNC_IRQ_ENA		= 1 << 23,
+	DUMB_FRAMEDONE_ENA	= 1 << 22,
+	TWC_FRAMEDONE_ENA	= 1 << 21,
+	HWC_FRAMEDONE_ENA	= 1 << 20,
+	SLV_IRQ_ENA		= 1 << 19,
+	SPI_IRQ_ENA		= 1 << 18,
+	PWRDN_IRQ_ENA		= 1 << 17,
+	ERR_IRQ_ENA		= 1 << 16,
+	CLEAN_SPU_IRQ_ISR	= 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+	DMA_FRAME_IRQ0		= 1 << 31,
+	DMA_FRAME_IRQ1		= 1 << 30,
+	DMA_FRAME_IRQ		= DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+	DMA_FF_UNDERFLOW	= 1 << 29,
+	GRA_FRAME_IRQ0		= 1 << 27,
+	GRA_FRAME_IRQ1		= 1 << 26,
+	GRA_FRAME_IRQ		= GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+	GRA_FF_UNDERFLOW	= 1 << 25,
+	VSYNC_IRQ		= 1 << 23,
+	DUMB_FRAMEDONE		= 1 << 22,
+	TWC_FRAMEDONE		= 1 << 21,
+	HWC_FRAMEDONE		= 1 << 20,
+	SLV_IRQ			= 1 << 19,
+	SPI_IRQ			= 1 << 18,
+	PWRDN_IRQ		= 1 << 17,
+	ERR_IRQ			= 1 << 16,
+	DMA_FRAME_IRQ0_LEVEL	= 1 << 15,
+	DMA_FRAME_IRQ1_LEVEL	= 1 << 14,
+	DMA_FRAME_CNT_ISR	= 3 << 12,
+	GRA_FRAME_IRQ0_LEVEL	= 1 << 11,
+	GRA_FRAME_IRQ1_LEVEL	= 1 << 10,
+	GRA_FRAME_CNT_ISR	= 3 << 8,
+	VSYNC_IRQ_LEVEL		= 1 << 7,
+	DUMB_FRAMEDONE_LEVEL	= 1 << 6,
+	TWC_FRAMEDONE_LEVEL	= 1 << 5,
+	HWC_FRAMEDONE_LEVEL	= 1 << 4,
+	SLV_FF_EMPTY		= 1 << 3,
+	DMA_FF_ALLEMPTY		= 1 << 2,
+	GRA_FF_ALLEMPTY		= 1 << 1,
+	PWRDN_IRQ_LEVEL		= 1 << 0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644
index 0000000..bd8c456
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_ioctlP.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644
index 0000000..d685a54
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+	struct drm_connector conn;
+	const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+	struct drm_encoder *enc = conn->encoder;
+
+	return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+	struct drm_connector *conn, bool force)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+	enum drm_connector_status status = connector_status_disconnected;
+
+	if (dconn->type->detect) {
+		status = dconn->type->detect(conn, force);
+	} else {
+		struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+		if (enc)
+			status = encoder_helper_funcs(enc)->detect(enc, conn);
+	}
+
+	return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+	drm_sysfs_connector_remove(conn);
+	drm_connector_cleanup(conn);
+	kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value)
+{
+	struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+	if (!dconn->type->set_property)
+		return -EINVAL;
+
+	return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+	.dpms		= drm_helper_connector_dpms,
+	.fill_modes	= drm_helper_probe_single_connector_modes,
+	.detect		= armada_drm_connector_detect,
+	.destroy	= armada_drm_connector_destroy,
+	.set_property	= armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+	encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+	encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+	const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+	struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+	int valid = MODE_BAD;
+
+	if (encoder) {
+		struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+		valid = slave->slave_funcs->mode_valid(encoder, mode);
+	}
+	return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value)
+{
+	struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+	int rc = -EINVAL;
+
+	if (encoder) {
+		struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+		rc = slave->slave_funcs->set_property(encoder, conn, property,
+						      value);
+	}
+	return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+	const struct armada_output_type *type, const void *data)
+{
+	struct armada_connector *dconn;
+	int ret;
+
+	dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+	if (!dconn)
+		return -ENOMEM;
+
+	dconn->type = type;
+
+	ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+				 type->connector_type);
+	if (ret) {
+		DRM_ERROR("unable to init connector\n");
+		goto err_destroy_dconn;
+	}
+
+	ret = type->create(&dconn->conn, data);
+	if (ret)
+		goto err_conn;
+
+	ret = drm_sysfs_connector_add(&dconn->conn);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+ err_sysfs:
+	if (dconn->conn.encoder)
+		dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+	drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+	kfree(dconn);
+	return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644
index 0000000..4126d43
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_output.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+	((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+	int connector_type;
+	enum drm_connector_status (*detect)(struct drm_connector *, bool);
+	int (*create)(struct drm_connector *, const void *);
+	int (*set_property)(struct drm_connector *, struct drm_property *,
+			    uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+	const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+	struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+	struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+	const struct armada_output_type *type, const void *data);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644
index 0000000..c5b06fd
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+	uint32_t colorkey_yr;
+	uint32_t colorkey_ug;
+	uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+	int16_t  brightness;
+	uint16_t contrast;
+	uint16_t saturation;
+	uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+	struct drm_plane base;
+	spinlock_t lock;
+	struct drm_framebuffer *old_fb;
+	uint32_t src_hw;
+	uint32_t dst_hw;
+	uint32_t dst_yx;
+	uint32_t ctrl0;
+	struct {
+		struct armada_vbl_event update;
+		struct armada_regs regs[13];
+		wait_queue_head_t wait;
+	} vbl;
+	struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+	struct armada_crtc *dcrtc)
+{
+	writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+	writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+	writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+	writel_relaxed(prop->brightness << 16 | prop->contrast,
+		       dcrtc->base + LCD_SPU_CONTRAST);
+	/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+	writel_relaxed(prop->saturation << 16,
+		       dcrtc->base + LCD_SPU_SATURATION);
+	writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+	spin_lock_irq(&dcrtc->irq_lock);
+	armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+		     CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+		     dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+	armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+	spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+	struct armada_plane *dplane = data;
+	struct drm_framebuffer *fb;
+
+	armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+	spin_lock(&dplane->lock);
+	fb = dplane->old_fb;
+	dplane->old_fb = NULL;
+	spin_unlock(&dplane->lock);
+
+	if (fb)
+		armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+	int end = start + size;
+	if (end < 0)
+		return 0;
+	if (start < 0)
+		start = 0;
+	return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+	struct drm_framebuffer *fb,
+	int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+	uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+	uint32_t val, ctrl0;
+	unsigned idx = 0;
+	int ret;
+
+	crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+	crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+	ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+		CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+		CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+	/* Does the position/size result in nothing to display? */
+	if (crtc_w == 0 || crtc_h == 0) {
+		ctrl0 &= ~CFG_DMA_ENA;
+	}
+
+	/*
+	 * FIXME: if the starting point is off screen, we need to
+	 * adjust src_x, src_y, src_w, src_h appropriately, and
+	 * according to the scale.
+	 */
+
+	if (!dcrtc->plane) {
+		dcrtc->plane = plane;
+		armada_ovl_update_attr(&dplane->prop, dcrtc);
+	}
+
+	/* FIXME: overlay on an interlaced display */
+	/* Just updating the position/size? */
+	if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+		val = (src_h & 0xffff0000) | src_w >> 16;
+		dplane->src_hw = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+		val = crtc_h << 16 | crtc_w;
+		dplane->dst_hw = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+		val = crtc_y << 16 | crtc_x;
+		dplane->dst_yx = val;
+		writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+		return 0;
+	} else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+		/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+		armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+			       dcrtc->base + LCD_SPU_SRAM_PARA1);
+	}
+
+	ret = wait_event_timeout(dplane->vbl.wait,
+				 list_empty(&dplane->vbl.update.node),
+				 HZ/25);
+	if (ret < 0)
+		return ret;
+
+	if (plane->fb != fb) {
+		struct armada_gem_object *obj = drm_fb_obj(fb);
+		uint32_t sy, su, sv;
+
+		/*
+		 * Take a reference on the new framebuffer - we want to
+		 * hold on to it while the hardware is displaying it.
+		 */
+		drm_framebuffer_reference(fb);
+
+		if (plane->fb) {
+			struct drm_framebuffer *older_fb;
+
+			spin_lock_irq(&dplane->lock);
+			older_fb = dplane->old_fb;
+			dplane->old_fb = plane->fb;
+			spin_unlock_irq(&dplane->lock);
+			if (older_fb)
+				armada_drm_queue_unref_work(dcrtc->crtc.dev,
+							    older_fb);
+		}
+
+		src_y >>= 16;
+		src_x >>= 16;
+		sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+			src_x * fb->bits_per_pixel / 8;
+		su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+			src_x;
+		sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+			src_x;
+
+		armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+				     LCD_SPU_DMA_START_ADDR_Y0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, su,
+				     LCD_SPU_DMA_START_ADDR_U0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+				     LCD_SPU_DMA_START_ADDR_V0);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+				     LCD_SPU_DMA_START_ADDR_Y1);
+		armada_reg_queue_set(dplane->vbl.regs, idx, su,
+				     LCD_SPU_DMA_START_ADDR_U1);
+		armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+				     LCD_SPU_DMA_START_ADDR_V1);
+
+		val = fb->pitches[0] << 16 | fb->pitches[0];
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_PITCH_YC);
+		val = fb->pitches[1] << 16 | fb->pitches[2];
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_PITCH_UV);
+	}
+
+	val = (src_h & 0xffff0000) | src_w >> 16;
+	if (dplane->src_hw != val) {
+		dplane->src_hw = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_HPXL_VLN);
+	}
+	val = crtc_h << 16 | crtc_w;
+	if (dplane->dst_hw != val) {
+		dplane->dst_hw = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DZM_HPXL_VLN);
+	}
+	val = crtc_y << 16 | crtc_x;
+	if (dplane->dst_yx != val) {
+		dplane->dst_yx = val;
+		armada_reg_queue_set(dplane->vbl.regs, idx, val,
+				     LCD_SPU_DMA_OVSA_HPXL_VLN);
+	}
+	if (dplane->ctrl0 != ctrl0) {
+		dplane->ctrl0 = ctrl0;
+		armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+			CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+			CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+			CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+			CFG_YUV2RGB) | CFG_DMA_ENA,
+			LCD_SPU_DMA_CTRL0);
+	}
+	if (idx) {
+		armada_reg_queue_end(dplane->vbl.regs, idx);
+		armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+	}
+	return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	struct drm_framebuffer *fb;
+	struct armada_crtc *dcrtc;
+
+	if (!dplane->base.crtc)
+		return 0;
+
+	dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+	dcrtc->plane = NULL;
+
+	spin_lock_irq(&dcrtc->irq_lock);
+	armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+	armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+	dplane->ctrl0 = 0;
+	spin_unlock_irq(&dcrtc->irq_lock);
+
+	/* Power down the Y/U/V FIFOs */
+	armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+		       dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+	if (plane->fb)
+		drm_framebuffer_unreference(plane->fb);
+
+	spin_lock_irq(&dplane->lock);
+	fb = dplane->old_fb;
+	dplane->old_fb = NULL;
+	spin_unlock_irq(&dplane->lock);
+	if (fb)
+		drm_framebuffer_unreference(fb);
+
+	return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+	kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+	struct drm_property *property, uint64_t val)
+{
+	struct armada_private *priv = plane->dev->dev_private;
+	struct armada_plane *dplane = drm_to_armada_plane(plane);
+	bool update_attr = false;
+
+	if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+		dplane->prop.colorkey_yr = CCC(K2R(val));
+		dplane->prop.colorkey_ug = CCC(K2G(val));
+		dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+		update_attr = true;
+	} else if (property == priv->colorkey_min_prop) {
+		dplane->prop.colorkey_yr &= ~0x00ff0000;
+		dplane->prop.colorkey_yr |= K2R(val) << 16;
+		dplane->prop.colorkey_ug &= ~0x00ff0000;
+		dplane->prop.colorkey_ug |= K2G(val) << 16;
+		dplane->prop.colorkey_vb &= ~0x00ff0000;
+		dplane->prop.colorkey_vb |= K2B(val) << 16;
+		update_attr = true;
+	} else if (property == priv->colorkey_max_prop) {
+		dplane->prop.colorkey_yr &= ~0xff000000;
+		dplane->prop.colorkey_yr |= K2R(val) << 24;
+		dplane->prop.colorkey_ug &= ~0xff000000;
+		dplane->prop.colorkey_ug |= K2G(val) << 24;
+		dplane->prop.colorkey_vb &= ~0xff000000;
+		dplane->prop.colorkey_vb |= K2B(val) << 24;
+		update_attr = true;
+	} else if (property == priv->colorkey_val_prop) {
+		dplane->prop.colorkey_yr &= ~0x0000ff00;
+		dplane->prop.colorkey_yr |= K2R(val) << 8;
+		dplane->prop.colorkey_ug &= ~0x0000ff00;
+		dplane->prop.colorkey_ug |= K2G(val) << 8;
+		dplane->prop.colorkey_vb &= ~0x0000ff00;
+		dplane->prop.colorkey_vb |= K2B(val) << 8;
+		update_attr = true;
+	} else if (property == priv->colorkey_alpha_prop) {
+		dplane->prop.colorkey_yr &= ~0x000000ff;
+		dplane->prop.colorkey_yr |= K2R(val);
+		dplane->prop.colorkey_ug &= ~0x000000ff;
+		dplane->prop.colorkey_ug |= K2G(val);
+		dplane->prop.colorkey_vb &= ~0x000000ff;
+		dplane->prop.colorkey_vb |= K2B(val);
+		update_attr = true;
+	} else if (property == priv->colorkey_mode_prop) {
+		dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+		dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+		update_attr = true;
+	} else if (property == priv->brightness_prop) {
+		dplane->prop.brightness = val - 256;
+		update_attr = true;
+	} else if (property == priv->contrast_prop) {
+		dplane->prop.contrast = val;
+		update_attr = true;
+	} else if (property == priv->saturation_prop) {
+		dplane->prop.saturation = val;
+		update_attr = true;
+	}
+
+	if (update_attr && dplane->base.crtc)
+		armada_ovl_update_attr(&dplane->prop,
+				       drm_to_armada_crtc(dplane->base.crtc));
+
+	return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+	.update_plane	= armada_plane_update,
+	.disable_plane	= armada_plane_disable,
+	.destroy	= armada_plane_destroy,
+	.set_property	= armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YUV420,
+	DRM_FORMAT_YVU420,
+	DRM_FORMAT_YUV422,
+	DRM_FORMAT_YVU422,
+	DRM_FORMAT_VYUY,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_BGR888,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_ABGR1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+	{ CKMODE_DISABLE, "disabled" },
+	{ CKMODE_Y,       "Y component" },
+	{ CKMODE_U,       "U component" },
+	{ CKMODE_V,       "V component" },
+	{ CKMODE_RGB,     "RGB" },
+	{ CKMODE_R,       "R component" },
+	{ CKMODE_G,       "G component" },
+	{ CKMODE_B,       "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+	struct armada_private *priv = dev->dev_private;
+
+	if (priv->colorkey_prop)
+		return 0;
+
+	priv->colorkey_prop = drm_property_create_range(dev, 0,
+				"colorkey", 0, 0xffffff);
+	priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+				"colorkey_min", 0, 0xffffff);
+	priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+				"colorkey_max", 0, 0xffffff);
+	priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+				"colorkey_val", 0, 0xffffff);
+	priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+				"colorkey_alpha", 0, 0xffffff);
+	priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+				"colorkey_mode",
+				armada_drm_colorkey_enum_list,
+				ARRAY_SIZE(armada_drm_colorkey_enum_list));
+	priv->brightness_prop = drm_property_create_range(dev, 0,
+				"brightness", 0, 256 + 255);
+	priv->contrast_prop = drm_property_create_range(dev, 0,
+				"contrast", 0, 0x7fff);
+	priv->saturation_prop = drm_property_create_range(dev, 0,
+				"saturation", 0, 0x7fff);
+
+	if (!priv->colorkey_prop)
+		return -ENOMEM;
+
+	return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+	struct armada_private *priv = dev->dev_private;
+	struct drm_mode_object *mobj;
+	struct armada_plane *dplane;
+	int ret;
+
+	ret = armada_overlay_create_properties(dev);
+	if (ret)
+		return ret;
+
+	dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+	if (!dplane)
+		return -ENOMEM;
+
+	spin_lock_init(&dplane->lock);
+	init_waitqueue_head(&dplane->vbl.wait);
+	armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+				  dplane);
+
+	drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+		       armada_formats, ARRAY_SIZE(armada_formats), false);
+
+	dplane->prop.colorkey_yr = 0xfefefe00;
+	dplane->prop.colorkey_ug = 0x01010100;
+	dplane->prop.colorkey_vb = 0x01010100;
+	dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+	dplane->prop.brightness = 0;
+	dplane->prop.contrast = 0x4000;
+	dplane->prop.saturation = 0x4000;
+
+	mobj = &dplane->base.base;
+	drm_object_attach_property(mobj, priv->colorkey_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_min_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_max_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_val_prop,
+				   0x0101fe);
+	drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+				   0x000000);
+	drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+				   CKMODE_RGB);
+	drm_object_attach_property(mobj, priv->brightness_prop, 256);
+	drm_object_attach_property(mobj, priv->contrast_prop,
+				   dplane->prop.contrast);
+	drm_object_attach_property(mobj, priv->saturation_prop,
+				   dplane->prop.saturation);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644
index 0000000..00d0fac
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+	struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+	int count = 0;
+
+	if (enc) {
+		struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+		count = slave->slave_funcs->get_modes(enc, conn);
+	}
+
+	return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+	struct drm_encoder_slave *slave = to_encoder_slave(enc);
+	struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+	if (slave->slave_funcs)
+		slave->slave_funcs->destroy(enc);
+	if (client)
+		i2c_put_adapter(client->adapter);
+
+	drm_encoder_cleanup(&slave->base);
+	kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+	.destroy	= armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+	.get_modes	= armada_drm_slave_get_modes,
+	.mode_valid	= armada_drm_slave_encoder_mode_valid,
+	.best_encoder	= armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+	.dpms = drm_i2c_encoder_dpms,
+	.save = drm_i2c_encoder_save,
+	.restore = drm_i2c_encoder_restore,
+	.mode_fixup = drm_i2c_encoder_mode_fixup,
+	.prepare = drm_i2c_encoder_prepare,
+	.commit = drm_i2c_encoder_commit,
+	.mode_set = drm_i2c_encoder_mode_set,
+	.detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+	const struct armada_drm_slave_config *config = data;
+	struct drm_encoder_slave *slave;
+	struct i2c_adapter *adap;
+	int ret;
+
+	conn->interlace_allowed = config->interlace_allowed;
+	conn->doublescan_allowed = config->doublescan_allowed;
+	conn->polled = config->polled;
+
+	drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	if (!slave)
+		return -ENOMEM;
+
+	slave->base.possible_crtcs = config->crtcs;
+
+	adap = i2c_get_adapter(config->i2c_adapter_id);
+	if (!adap) {
+		kfree(slave);
+		return -EPROBE_DEFER;
+	}
+
+	ret = drm_encoder_init(conn->dev, &slave->base,
+			       &armada_drm_slave_encoder_funcs,
+			       DRM_MODE_ENCODER_TMDS);
+	if (ret) {
+		DRM_ERROR("unable to init encoder\n");
+		i2c_put_adapter(adap);
+		kfree(slave);
+		return ret;
+	}
+
+	ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+	i2c_put_adapter(adap);
+	if (ret) {
+		DRM_ERROR("unable to init encoder slave\n");
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+	ret = slave->slave_funcs->create_resources(&slave->base, conn);
+	if (ret) {
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+	if (ret) {
+		armada_drm_slave_destroy(&slave->base);
+		return ret;
+	}
+
+	conn->encoder = &slave->base;
+
+	return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+	.connector_type	= DRM_MODE_CONNECTOR_HDMIA,
+	.create		= armada_drm_conn_slave_create,
+	.set_property	= armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+	const struct armada_drm_slave_config *config)
+{
+	return armada_output_create(dev, &armada_drm_conn_slave, config);
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644
index 0000000..bf2374c
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+	int i2c_adapter_id;
+	uint32_t crtcs;
+	uint8_t polled;
+	bool interlace_allowed;
+	bool doublescan_allowed;
+	struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+	const struct armada_drm_slave_config *);
+
+#endif
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index da4a51e..8a784c4 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -6,6 +6,7 @@
 	select FB_SYS_FILLRECT
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	help
 	 Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 32e270d..5137f15 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -211,7 +211,6 @@
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
-	.gem_init_object = ast_gem_init_object,
 	.gem_free_object = ast_gem_free_object,
 	.dumb_create = ast_dumb_create,
 	.dumb_map_offset = ast_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8492b68e..9833a1b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -323,7 +323,6 @@
 			   struct drm_device *dev,
 			   struct drm_mode_create_dumb *args);
 
-extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
 extern int ast_dumb_mmap_offset(struct drm_file *file,
 				struct drm_device *dev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7f6152d..af0b868 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,12 +449,6 @@
 	return 0;
 }
 
-int ast_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void ast_bo_unref(struct ast_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index bf67b22..9864559 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -5,6 +5,7 @@
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	help
 	 This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 138364d..953fc8a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -97,7 +97,6 @@
 	.major = DRIVER_MAJOR,
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
-	.gem_init_object = cirrus_gem_init_object,
 	.gem_free_object = cirrus_gem_free_object,
 	.dumb_create = cirrus_dumb_create,
 	.dumb_map_offset = cirrus_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 9b0bb91..b6aded7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -191,7 +191,6 @@
 		      struct pci_dev *pdev,
 		      uint32_t flags);
 void cirrus_device_fini(struct cirrus_device *cdev);
-int cirrus_gem_init_object(struct drm_gem_object *obj);
 void cirrus_gem_free_object(struct drm_gem_object *obj);
 int cirrus_dumb_mmap_offset(struct drm_file *file,
 			    struct drm_device *dev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index f130a53..78e76f2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,12 +255,6 @@
 	return 0;
 }
 
-int cirrus_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void cirrus_bo_unref(struct cirrus_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 60685b2..adabc3d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -494,13 +494,12 @@
 
 int cirrus_vga_get_modes(struct drm_connector *connector)
 {
-	/* Just add a static list of modes */
-	drm_add_modes_noedid(connector, 640, 480);
-	drm_add_modes_noedid(connector, 800, 600);
-	drm_add_modes_noedid(connector, 1024, 768);
-	drm_add_modes_noedid(connector, 1280, 1024);
+	int count;
 
-	return 4;
+	/* Just add a static list of modes */
+	count = drm_add_modes_noedid(connector, 1280, 1024);
+	drm_set_preferred_mode(connector, 1024, 768);
+	return count;
 }
 
 static int cirrus_vga_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 224ff96..a4b017b 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -334,7 +334,6 @@
 
 	mutex_lock(&dev->ctxlist_mutex);
 	list_add(&ctx_entry->head, &dev->ctxlist);
-	++dev->ctx_count;
 	mutex_unlock(&dev->ctxlist_mutex);
 
 	return 0;
@@ -432,7 +431,6 @@
 			if (pos->handle == ctx->handle) {
 				list_del(&pos->head);
 				kfree(pos);
-				--dev->ctx_count;
 			}
 		}
 	}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bff2fa9..d6cf77c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -202,6 +202,7 @@
 	{ DRM_MODE_CONNECTOR_TV, "TV" },
 	{ DRM_MODE_CONNECTOR_eDP, "eDP" },
 	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+	{ DRM_MODE_CONNECTOR_DSI, "DSI" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
 	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+	{ DRM_MODE_ENCODER_DSI, "DSI" },
 };
 
 void drm_connector_ida_init(void)
@@ -1301,7 +1303,7 @@
 }
 
 /**
- * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
  * @out: drm_display_mode to return to the user
  * @in: drm_mode_modeinfo to use
  *
@@ -1317,6 +1319,9 @@
 	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
 		return -ERANGE;
 
+	if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+		return -EINVAL;
+
 	out->clock = in->clock;
 	out->hdisplay = in->hdisplay;
 	out->hsync_start = in->hsync_start;
@@ -1552,7 +1557,7 @@
 	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -1579,6 +1584,19 @@
 	return ret;
 }
 
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+					 const struct drm_file *file_priv)
+{
+	/*
+	 * If user-space hasn't configured the driver to expose the stereo 3D
+	 * modes, don't expose them.
+	 */
+	if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+		return false;
+
+	return true;
+}
+
 /**
  * drm_mode_getconnector - get connector configuration
  * @dev: drm device for the ioctl
@@ -1623,7 +1641,7 @@
 	obj = drm_mode_object_find(dev, out_resp->connector_id,
 				   DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	connector = obj_to_connector(obj);
@@ -1644,7 +1662,8 @@
 
 	/* delayed so we get modes regardless of pre-fill_modes state */
 	list_for_each_entry(mode, &connector->modes, head)
-		mode_count++;
+		if (drm_mode_expose_to_userspace(mode, file_priv))
+			mode_count++;
 
 	out_resp->connector_id = connector->base.id;
 	out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@
 		copied = 0;
 		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
 		list_for_each_entry(mode, &connector->modes, head) {
+			if (!drm_mode_expose_to_userspace(mode, file_priv))
+				continue;
+
 			drm_crtc_convert_to_umode(&u_mode, mode);
 			if (copy_to_user(mode_ptr + copied,
 					 &u_mode, sizeof(u_mode))) {
@@ -1735,7 +1757,7 @@
 	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
 				   DRM_MODE_OBJECT_ENCODER);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	encoder = obj_to_encoder(obj);
@@ -2040,6 +2062,45 @@
 }
 EXPORT_SYMBOL(drm_mode_set_config_internal);
 
+/*
+ * Checks that the framebuffer is big enough for the CRTC viewport
+ * (x, y, hdisplay, vdisplay)
+ */
+static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+				   int x, int y,
+				   const struct drm_display_mode *mode,
+				   const struct drm_framebuffer *fb)
+
+{
+	int hdisplay, vdisplay;
+
+	hdisplay = mode->hdisplay;
+	vdisplay = mode->vdisplay;
+
+	if (drm_mode_is_stereo(mode)) {
+		struct drm_display_mode adjusted = *mode;
+
+		drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+		hdisplay = adjusted.crtc_hdisplay;
+		vdisplay = adjusted.crtc_vdisplay;
+	}
+
+	if (crtc->invert_dimensions)
+		swap(hdisplay, vdisplay);
+
+	if (hdisplay > fb->width ||
+	    vdisplay > fb->height ||
+	    x > fb->width - hdisplay ||
+	    y > fb->height - vdisplay) {
+		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+			      fb->width, fb->height, hdisplay, vdisplay, x, y,
+			      crtc->invert_dimensions ? " (inverted)" : "");
+		return -ENOSPC;
+	}
+
+	return 0;
+}
+
 /**
  * drm_mode_setcrtc - set CRTC configuration
  * @dev: drm device for the ioctl
@@ -2080,14 +2141,13 @@
 				   DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
 	if (crtc_req->mode_valid) {
-		int hdisplay, vdisplay;
 		/* If we have a mode we need a framebuffer. */
 		/* If we pass -1, set the mode with the currently bound fb */
 		if (crtc_req->fb_id == -1) {
@@ -2104,7 +2164,7 @@
 			if (!fb) {
 				DRM_DEBUG_KMS("Unknown FB ID%d\n",
 						crtc_req->fb_id);
-				ret = -EINVAL;
+				ret = -ENOENT;
 				goto out;
 			}
 		}
@@ -2123,23 +2183,11 @@
 
 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
-		hdisplay = mode->hdisplay;
-		vdisplay = mode->vdisplay;
-
-		if (crtc->invert_dimensions)
-			swap(hdisplay, vdisplay);
-
-		if (hdisplay > fb->width ||
-		    vdisplay > fb->height ||
-		    crtc_req->x > fb->width - hdisplay ||
-		    crtc_req->y > fb->height - vdisplay) {
-			DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-				      fb->width, fb->height,
-				      hdisplay, vdisplay, crtc_req->x, crtc_req->y,
-				      crtc->invert_dimensions ? " (inverted)" : "");
-			ret = -ENOSPC;
+		ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+					      mode, fb);
+		if (ret)
 			goto out;
-		}
+
 	}
 
 	if (crtc_req->count_connectors == 0 && mode) {
@@ -2184,7 +2232,7 @@
 			if (!obj) {
 				DRM_DEBUG_KMS("Connector id %d unknown\n",
 						out_id);
-				ret = -EINVAL;
+				ret = -ENOENT;
 				goto out;
 			}
 			connector = obj_to_connector(obj);
@@ -2232,7 +2280,7 @@
 	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	crtc = obj_to_crtc(obj);
 
@@ -2441,6 +2489,8 @@
 	case DRM_FORMAT_YVU444:
 		return 0;
 	default:
+		DRM_DEBUG_KMS("invalid pixel format %s\n",
+			      drm_get_format_name(r->pixel_format));
 		return -EINVAL;
 	}
 }
@@ -2606,7 +2656,7 @@
 	mutex_unlock(&dev->mode_config.fb_lock);
 	mutex_unlock(&file_priv->fbs_lock);
 
-	return -EINVAL;
+	return -ENOENT;
 }
 
 /**
@@ -2634,7 +2684,7 @@
 
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	if (!fb)
-		return -EINVAL;
+		return -ENOENT;
 
 	r->height = fb->height;
 	r->width = fb->width;
@@ -2679,7 +2729,7 @@
 
 	fb = drm_framebuffer_lookup(dev, r->fb_id);
 	if (!fb)
-		return -EINVAL;
+		return -ENOENT;
 
 	num_clips = r->num_clips;
 	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -3011,7 +3061,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto done;
 	}
 	property = obj_to_property(obj);
@@ -3140,7 +3190,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto done;
 	}
 	blob = obj_to_blob(obj);
@@ -3301,7 +3351,7 @@
 
 	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	if (!obj->properties) {
@@ -3354,8 +3404,10 @@
 	drm_modeset_lock_all(dev);
 
 	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
-	if (!arg_obj)
+	if (!arg_obj) {
+		ret = -ENOENT;
 		goto out;
+	}
 	if (!arg_obj->properties)
 		goto out;
 
@@ -3368,8 +3420,10 @@
 
 	prop_obj = drm_mode_object_find(dev, arg->prop_id,
 					DRM_MODE_OBJECT_PROPERTY);
-	if (!prop_obj)
+	if (!prop_obj) {
+		ret = -ENOENT;
 		goto out;
+	}
 	property = obj_to_property(prop_obj);
 
 	if (!drm_property_change_is_valid(property, arg->value))
@@ -3454,7 +3508,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -3513,7 +3567,7 @@
 	drm_modeset_lock_all(dev);
 	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 	crtc = obj_to_crtc(obj);
@@ -3556,7 +3610,6 @@
 	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
 	struct drm_pending_vblank_event *e = NULL;
 	unsigned long flags;
-	int hdisplay, vdisplay;
 	int ret = -EINVAL;
 
 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3568,7 +3621,7 @@
 
 	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj)
-		return -EINVAL;
+		return -ENOENT;
 	crtc = obj_to_crtc(obj);
 
 	mutex_lock(&crtc->mutex);
@@ -3585,26 +3638,15 @@
 		goto out;
 
 	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
-	if (!fb)
-		goto out;
-
-	hdisplay = crtc->mode.hdisplay;
-	vdisplay = crtc->mode.vdisplay;
-
-	if (crtc->invert_dimensions)
-		swap(hdisplay, vdisplay);
-
-	if (hdisplay > fb->width ||
-	    vdisplay > fb->height ||
-	    crtc->x > fb->width - hdisplay ||
-	    crtc->y > fb->height - vdisplay) {
-		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
-			      crtc->invert_dimensions ? " (inverted)" : "");
-		ret = -ENOSPC;
+	if (!fb) {
+		ret = -ENOENT;
 		goto out;
 	}
 
+	ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+	if (ret)
+		goto out;
+
 	if (crtc->fb->pixel_format != fb->pixel_format) {
 		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
 		ret = -EINVAL;
@@ -3788,7 +3830,8 @@
 		*bpp = 32;
 		break;
 	default:
-		DRM_DEBUG_KMS("unsupported pixel format\n");
+		DRM_DEBUG_KMS("unsupported pixel format %s\n",
+			      drm_get_format_name(format));
 		*depth = 0;
 		*bpp = 0;
 		break;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c722c3b..01361ab 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,10 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
 /**
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
  * 						connector list
@@ -76,7 +80,8 @@
 {
 	struct drm_display_mode *mode;
 
-	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+		      DRM_MODE_FLAG_3D_MASK))
 		return;
 
 	list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@
 		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
 				!(flags & DRM_MODE_FLAG_DBLSCAN))
 			mode->status = MODE_NO_DBLESCAN;
+		if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+				!(flags & DRM_MODE_FLAG_3D_MASK))
+			mode->status = MODE_NO_STEREO;
 	}
 
 	return;
@@ -105,9 +113,9 @@
  * then culled (based on validity and the @maxX, @maxY parameters) and put into
  * the normal modes list.
  *
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
  *
  * RETURNS:
  * Number of modes found on @connector.
@@ -175,6 +183,8 @@
 		mode_flags |= DRM_MODE_FLAG_INTERLACE;
 	if (connector->doublescan_allowed)
 		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+	if (connector->stereo_allowed)
+		mode_flags |= DRM_MODE_FLAG_3D_MASK;
 	drm_mode_validate_flag(connector, mode_flags);
 
 	list_for_each_entry(mode, &connector->modes, head) {
@@ -395,22 +405,25 @@
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+	struct drm_display_mode *adjusted_mode, saved_mode;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	int saved_x, saved_y;
+	bool saved_enabled;
 	struct drm_encoder *encoder;
 	bool ret = true;
 
+	saved_enabled = crtc->enabled;
 	crtc->enabled = drm_helper_crtc_in_use(crtc);
 	if (!crtc->enabled)
 		return true;
 
 	adjusted_mode = drm_mode_duplicate(dev, mode);
-	if (!adjusted_mode)
+	if (!adjusted_mode) {
+		crtc->enabled = saved_enabled;
 		return false;
+	}
 
-	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
@@ -529,7 +542,7 @@
 done:
 	drm_mode_destroy(dev, adjusted_mode);
 	if (!ret) {
-		crtc->hwmode = saved_hwmode;
+		crtc->enabled = saved_enabled;
 		crtc->mode = saved_mode;
 		crtc->x = saved_x;
 		crtc->y = saved_y;
@@ -557,6 +570,14 @@
 				continue;
 
 			connector->encoder = NULL;
+
+			/*
+			 * drm_helper_disable_unused_functions() ought to be
+			 * doing this, but since we've decoupled the encoder
+			 * from the connector above, the required connection
+			 * between them is henceforth no longer available.
+			 */
+			connector->dpms = DRM_MODE_DPMS_OFF;
 		}
 	}
 
@@ -583,9 +604,8 @@
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
 	struct drm_device *dev;
-	struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+	struct drm_crtc *new_crtc;
 	struct drm_encoder *save_encoders, *new_encoder, *encoder;
-	struct drm_framebuffer *old_fb = NULL;
 	bool mode_changed = false; /* if true do a full mode set */
 	bool fb_changed = false; /* if true and !mode_changed just do a flip */
 	struct drm_connector *save_connectors, *connector;
@@ -621,38 +641,28 @@
 
 	dev = set->crtc->dev;
 
-	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
-	 * connector data. */
-	save_crtcs = kzalloc(dev->mode_config.num_crtc *
-			     sizeof(struct drm_crtc), GFP_KERNEL);
-	if (!save_crtcs)
-		return -ENOMEM;
-
+	/*
+	 * Allocate space for the backup of all (non-pointer) encoder and
+	 * connector data.
+	 */
 	save_encoders = kzalloc(dev->mode_config.num_encoder *
 				sizeof(struct drm_encoder), GFP_KERNEL);
-	if (!save_encoders) {
-		kfree(save_crtcs);
+	if (!save_encoders)
 		return -ENOMEM;
-	}
 
 	save_connectors = kzalloc(dev->mode_config.num_connector *
 				sizeof(struct drm_connector), GFP_KERNEL);
 	if (!save_connectors) {
-		kfree(save_crtcs);
 		kfree(save_encoders);
 		return -ENOMEM;
 	}
 
-	/* Copy data. Note that driver private data is not affected.
+	/*
+	 * Copy data. Note that driver private data is not affected.
 	 * Should anything bad happen only the expected state is
 	 * restored, not the drivers personal bookkeeping.
 	 */
 	count = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		save_crtcs[count++] = *crtc;
-	}
-
-	count = 0;
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		save_encoders[count++] = *encoder;
 	}
@@ -775,19 +785,17 @@
 		mode_changed = true;
 
 	if (mode_changed) {
-		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
-		if (set->crtc->enabled) {
+		if (drm_helper_crtc_in_use(set->crtc)) {
 			DRM_DEBUG_KMS("attempting to set mode from"
 					" userspace\n");
 			drm_mode_debug_printmodeline(set->mode);
-			old_fb = set->crtc->fb;
 			set->crtc->fb = set->fb;
 			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
 						      set->x, set->y,
-						      old_fb)) {
+						      save_set.fb)) {
 				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
 					  set->crtc->base.id);
-				set->crtc->fb = old_fb;
+				set->crtc->fb = save_set.fb;
 				ret = -EINVAL;
 				goto fail;
 			}
@@ -802,31 +810,24 @@
 	} else if (fb_changed) {
 		set->crtc->x = set->x;
 		set->crtc->y = set->y;
-
-		old_fb = set->crtc->fb;
-		if (set->crtc->fb != set->fb)
-			set->crtc->fb = set->fb;
+		set->crtc->fb = set->fb;
 		ret = crtc_funcs->mode_set_base(set->crtc,
-						set->x, set->y, old_fb);
+						set->x, set->y, save_set.fb);
 		if (ret != 0) {
-			set->crtc->fb = old_fb;
+			set->crtc->x = save_set.x;
+			set->crtc->y = save_set.y;
+			set->crtc->fb = save_set.fb;
 			goto fail;
 		}
 	}
 
 	kfree(save_connectors);
 	kfree(save_encoders);
-	kfree(save_crtcs);
 	return 0;
 
 fail:
 	/* Restore all previous data. */
 	count = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		*crtc = save_crtcs[count++];
-	}
-
-	count = 0;
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		*encoder = save_encoders[count++];
 	}
@@ -844,7 +845,6 @@
 
 	kfree(save_connectors);
 	kfree(save_encoders);
-	kfree(save_crtcs);
 	return ret;
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1125,14 +1125,14 @@
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
-void drm_helper_hpd_irq_event(struct drm_device *dev)
+bool drm_helper_hpd_irq_event(struct drm_device *dev)
 {
 	struct drm_connector *connector;
 	enum drm_connector_status old_status;
 	bool changed = false;
 
 	if (!dev->mode_config.poll_enabled)
-		return;
+		return false;
 
 	mutex_lock(&dev->mode_config.mutex);
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -1157,5 +1157,7 @@
 
 	if (changed)
 		drm_kms_helper_hotplug_event(dev);
+
+	return changed;
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index a05087c..b4b51d4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -42,7 +42,7 @@
  * Initialization, etc.
  **************************************************/
 
-static struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_info_list drm_debugfs_list[] = {
 	{"name", drm_name_info, 0},
 	{"vm", drm_vm_info, 0},
 	{"clients", drm_clients_info, 0},
@@ -84,7 +84,7 @@
  * Create a given set of debugfs files represented by an array of
  * gdm_debugfs_lists in the given root directory.
  */
-int drm_debugfs_create_files(struct drm_info_list *files, int count,
+int drm_debugfs_create_files(const struct drm_info_list *files, int count,
 			     struct dentry *root, struct drm_minor *minor)
 {
 	struct drm_device *dev = minor->dev;
@@ -188,7 +188,7 @@
  *
  * Remove all debugfs entries created by debugfs_init().
  */
-int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
 			     struct drm_minor *minor)
 {
 	struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 89e1966..9e978aa 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -228,12 +228,12 @@
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 
 /* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
 {
 	return link_status[r - DP_LANE0_1_STATUS];
 }
 
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
 			     int lane)
 {
 	int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@
 	return (l >> s) & 0xf;
 }
 
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			  int lane_count)
 {
 	u8 lane_align;
@@ -262,7 +262,7 @@
 }
 EXPORT_SYMBOL(drm_dp_channel_eq_ok);
 
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			      int lane_count)
 {
 	int lane;
@@ -277,7 +277,7 @@
 }
 EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
 
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
 				     int lane)
 {
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
 
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane)
 {
 	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 		udelay(100);
 	else
@@ -311,7 +311,7 @@
 }
 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
 		udelay(400);
 	else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 05ad9ba..d9137e4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,7 +61,7 @@
 
 /** Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
-	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
@@ -69,6 +69,7 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
 
-/**
- * drm_legacy_dev_reinit
- *
- * Reinitializes a legacy/ums drm device in it's lastclose function.
- */
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
-	int i;
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		return;
-
-	atomic_set(&dev->ioctl_count, 0);
-	atomic_set(&dev->vma_count, 0);
-
-	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-		atomic_set(&dev->counts[i], 0);
-
-	dev->sigdata.lock = NULL;
-
-	dev->context_flag = 0;
-	dev->last_context = 0;
-	dev->if_version = 0;
-}
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
-	struct drm_vma_entry *vma, *vma_temp;
-
-	DRM_DEBUG("\n");
-
-	if (dev->driver->lastclose)
-		dev->driver->lastclose(dev);
-	DRM_DEBUG("driver lastclose completed\n");
-
-	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_irq_uninstall(dev);
-
-	mutex_lock(&dev->struct_mutex);
-
-	drm_agp_clear(dev);
-
-	drm_legacy_sg_cleanup(dev);
-
-	/* Clear vma list (only built for debugging) */
-	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
-		list_del(&vma->head);
-		kfree(vma);
-	}
-
-	drm_legacy_dma_takedown(dev);
-
-	dev->dev_mapping = NULL;
-	mutex_unlock(&dev->struct_mutex);
-
-	drm_legacy_dev_reinit(dev);
-
-	DRM_DEBUG("lastclose completed\n");
-	return 0;
-}
-
 /** File operations structure */
 static const struct file_operations drm_stub_fops = {
 	.owner = THIS_MODULE,
@@ -385,7 +316,6 @@
 		return -ENODEV;
 
 	atomic_inc(&dev->ioctl_count);
-	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
 	++file_priv->ioctl_count;
 
 	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -473,7 +403,7 @@
 
       err_i1:
 	if (!ioctl)
-		DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+		DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
 			  task_pid_nr(current),
 			  (long)old_encode_dev(file_priv->minor->device),
 			  file_priv->authenticated, cmd, nr);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 830f750..2f325bc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -458,6 +458,15 @@
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 };
 
+/*
+ * These more or less come from the DMT spec.  The 720x400 modes are
+ * inferred from historical 80x25 practice.  The 640x480@67 and 832x624@75
+ * modes are old-school Mac modes.  The EDID spec says the 1152x864@75 mode
+ * should be 1152x870, again for the Mac, but instead we use the x864 DMT
+ * mode.
+ *
+ * The DMT modes have been fact-checked; the rest are mild guesses.
+ */
 static const struct drm_display_mode edid_est_modes[] = {
 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
 		   968, 1056, 0, 600, 601, 605, 628, 0,
@@ -560,7 +569,7 @@
 	{ 1600, 1200, 75, 0 },
 	{ 1600, 1200, 85, 0 },
 	{ 1792, 1344, 60, 0 },
-	{ 1792, 1344, 85, 0 },
+	{ 1792, 1344, 75, 0 },
 	{ 1856, 1392, 60, 0 },
 	{ 1856, 1392, 75, 0 },
 	{ 1920, 1200, 60, 1 },
@@ -1264,6 +1273,18 @@
 }
 EXPORT_SYMBOL(drm_get_edid);
 
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return duplicate edid or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+	return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
 /*** EDID parsing ***/
 
 /**
@@ -2068,7 +2089,7 @@
 	u8 *est = ((u8 *)timing) + 5;
 
 	for (i = 0; i < 6; i++) {
-		for (j = 7; j > 0; j--) {
+		for (j = 7; j >= 0; j--) {
 			m = (i * 8) + (7 - j);
 			if (m >= ARRAY_SIZE(est3_modes))
 				break;
@@ -2404,7 +2425,7 @@
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, cea_mode))
+		    drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
 			return mode + 1;
 	}
 	return 0;
@@ -2453,7 +2474,7 @@
 
 		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
 		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-		    drm_mode_equal_no_clocks(to_match, hdmi_mode))
+		    drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
 			return mode + 1;
 	}
 	return 0;
@@ -2507,6 +2528,9 @@
 		if (!newmode)
 			continue;
 
+		/* Carry over the stereo flags */
+		newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
 		/*
 		 * The current mode could be either variant. Make
 		 * sure to pick the "other" clock for the new mode.
@@ -2553,20 +2577,151 @@
 	return modes;
 }
 
+struct stereo_mandatory_mode {
+	int width, height, vrefresh;
+	unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+	{ 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+	{ 1920, 1080, 50,
+	  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+	{ 1920, 1080, 60,
+	  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+	{ 1280, 720,  50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1280, 720,  50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+	{ 1280, 720,  60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+	{ 1280, 720,  60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+		       const struct stereo_mandatory_mode *stereo_mode)
+{
+	unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+	return mode->hdisplay == stereo_mode->width &&
+	       mode->vdisplay == stereo_mode->height &&
+	       interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+	       drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	const struct drm_display_mode *mode;
+	struct list_head stereo_modes;
+	int modes = 0, i;
+
+	INIT_LIST_HEAD(&stereo_modes);
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+			const struct stereo_mandatory_mode *mandatory;
+			struct drm_display_mode *new_mode;
+
+			if (!stereo_match_mandatory(mode,
+						    &stereo_mandatory_modes[i]))
+				continue;
+
+			mandatory = &stereo_mandatory_modes[i];
+			new_mode = drm_mode_duplicate(dev, mode);
+			if (!new_mode)
+				continue;
+
+			new_mode->flags |= mandatory->flags;
+			list_add_tail(&new_mode->head, &stereo_modes);
+			modes++;
+		}
+	}
+
+	list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+	return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+
+	vic--; /* VICs start at 1 */
+	if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+		DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+		return 0;
+	}
+
+	newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+	if (!newmode)
+		return 0;
+
+	drm_mode_probed_add(connector, newmode);
+
+	return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+			       const u8 *video_db, u8 video_len, u8 video_index)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *newmode;
+	int modes = 0;
+	u8 cea_mode;
+
+	if (video_db == NULL || video_index > video_len)
+		return 0;
+
+	/* CEA modes are numbered 1..127 */
+	cea_mode = (video_db[video_index] & 127) - 1;
+	if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+		return 0;
+
+	if (structure & (1 << 0)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+	if (structure & (1 << 6)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+	if (structure & (1 << 8)) {
+		newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+		if (newmode) {
+			newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+
+	return modes;
+}
+
 /*
  * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
  * @connector: connector corresponding to the HDMI sink
  * @db: start of the CEA vendor specific block
  * @len: length of the CEA block payload, ie. one can access up to db[len]
  *
- * Parses the HDMI VSDB looking for modes to add to @connector.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
  */
 static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+		   const u8 *video_db, u8 video_len)
 {
-	struct drm_device *dev = connector->dev;
-	int modes = 0, offset = 0, i;
-	u8 vic_len;
+	int modes = 0, offset = 0, i, multi_present = 0;
+	u8 vic_len, hdmi_3d_len = 0;
+	u16 mask;
+	u16 structure_all;
 
 	if (len < 8)
 		goto out;
@@ -2585,30 +2740,56 @@
 
 	/* the declared length is not long enough for the 2 first bytes
 	 * of additional video format capabilities */
-	offset += 2;
-	if (len < (8 + offset))
+	if (len < (8 + offset + 2))
 		goto out;
 
+	/* 3D_Present */
+	offset++;
+	if (db[8 + offset] & (1 << 7)) {
+		modes += add_hdmi_mandatory_stereo_modes(connector);
+
+		/* 3D_Multi_present */
+		multi_present = (db[8 + offset] & 0x60) >> 5;
+	}
+
+	offset++;
 	vic_len = db[8 + offset] >> 5;
+	hdmi_3d_len = db[8 + offset] & 0x1f;
 
 	for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
-		struct drm_display_mode *newmode;
 		u8 vic;
 
 		vic = db[9 + offset + i];
+		modes += add_hdmi_mode(connector, vic);
+	}
+	offset += 1 + vic_len;
 
-		vic--; /* VICs start at 1 */
-		if (vic >= ARRAY_SIZE(edid_4k_modes)) {
-			DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
-			continue;
-		}
+	if (!(multi_present == 1 || multi_present == 2))
+		goto out;
 
-		newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
-		if (!newmode)
-			continue;
+	if ((multi_present == 1 && len < (9 + offset)) ||
+	    (multi_present == 2 && len < (11 + offset)))
+		goto out;
 
-		drm_mode_probed_add(connector, newmode);
-		modes++;
+	if ((multi_present == 1 && hdmi_3d_len < 2) ||
+	    (multi_present == 2 && hdmi_3d_len < 4))
+		goto out;
+
+	/* 3D_Structure_ALL */
+	structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+	/* check if 3D_MASK is present */
+	if (multi_present == 2)
+		mask = (db[10 + offset] << 8) | db[11 + offset];
+	else
+		mask = 0xffff;
+
+	for (i = 0; i < 16; i++) {
+		if (mask & (1 << i))
+			modes += add_3d_struct_modes(connector,
+						     structure_all,
+						     video_db,
+						     video_len, i);
 	}
 
 out:
@@ -2668,8 +2849,8 @@
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
 	const u8 *cea = drm_find_cea_extension(edid);
-	const u8 *db;
-	u8 dbl;
+	const u8 *db, *hdmi = NULL, *video = NULL;
+	u8 dbl, hdmi_len, video_len = 0;
 	int modes = 0;
 
 	if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2863,26 @@
 			db = &cea[i];
 			dbl = cea_db_payload_len(db);
 
-			if (cea_db_tag(db) == VIDEO_BLOCK)
-				modes += do_cea_modes(connector, db + 1, dbl);
-			else if (cea_db_is_hdmi_vsdb(db))
-				modes += do_hdmi_vsdb_modes(connector, db, dbl);
+			if (cea_db_tag(db) == VIDEO_BLOCK) {
+				video = db + 1;
+				video_len = dbl;
+				modes += do_cea_modes(connector, video, dbl);
+			}
+			else if (cea_db_is_hdmi_vsdb(db)) {
+				hdmi = db;
+				hdmi_len = dbl;
+			}
 		}
 	}
 
+	/*
+	 * We parse the HDMI VSDB after having added the cea modes as we will
+	 * be patching their flags when the sink supports stereo 3D.
+	 */
+	if (hdmi)
+		modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+					    video_len);
+
 	return modes;
 }
 
@@ -3288,6 +3482,19 @@
 }
 EXPORT_SYMBOL(drm_add_modes_noedid);
 
+void drm_set_preferred_mode(struct drm_connector *connector,
+			   int hpref, int vpref)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (drm_mode_width(mode)  == hpref &&
+		    drm_mode_height(mode) == vpref)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+}
+EXPORT_SYMBOL(drm_set_preferred_mode);
+
 /**
  * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
  *                                              data from a DRM display mode
@@ -3321,6 +3528,33 @@
 }
 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+	u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+	switch (layout) {
+	case DRM_MODE_FLAG_3D_FRAME_PACKING:
+		return HDMI_3D_STRUCTURE_FRAME_PACKING;
+	case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+		return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+	case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+		return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+	case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+		return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+	case DRM_MODE_FLAG_3D_L_DEPTH:
+		return HDMI_3D_STRUCTURE_L_DEPTH;
+	case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+		return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+	case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+		return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+	case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+		return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+	default:
+		return HDMI_3D_STRUCTURE_INVALID;
+	}
+}
+
 /**
  * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
  * data from a DRM display mode
@@ -3338,20 +3572,29 @@
 					    const struct drm_display_mode *mode)
 {
 	int err;
+	u32 s3d_flags;
 	u8 vic;
 
 	if (!frame || !mode)
 		return -EINVAL;
 
 	vic = drm_match_hdmi_mode(mode);
-	if (!vic)
+	s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+	if (!vic && !s3d_flags)
+		return -EINVAL;
+
+	if (vic && s3d_flags)
 		return -EINVAL;
 
 	err = hdmi_vendor_infoframe_init(frame);
 	if (err < 0)
 		return err;
 
-	frame->vic = vic;
+	if (vic)
+		frame->vic = vic;
+	else
+		frame->s3d_struct = s3d_structure_from_display_mode(mode);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 271b42b..9081172 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -32,7 +32,7 @@
 	"from built-in data or /lib/firmware instead. ");
 
 #define GENERIC_EDIDS 5
-static char *generic_edid_name[GENERIC_EDIDS] = {
+static const char *generic_edid_name[GENERIC_EDIDS] = {
 	"edid/1024x768.bin",
 	"edid/1280x1024.bin",
 	"edid/1600x1200.bin",
@@ -40,7 +40,7 @@
 	"edid/1920x1080.bin",
 };
 
-static u8 generic_edid[GENERIC_EDIDS][128] = {
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
 	{
 	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
 	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@
 	},
 };
 
+static int edid_size(const u8 *edid, int data_size)
+{
+	if (data_size < EDID_LENGTH)
+		return 0;
+
+	return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
 static u8 *edid_load(struct drm_connector *connector, const char *name,
 			const char *connector_name)
 {
-	const struct firmware *fw;
-	struct platform_device *pdev;
-	u8 *fwdata = NULL, *edid, *new_edid;
-	int fwsize, expected;
-	int builtin = 0, err = 0;
+	const struct firmware *fw = NULL;
+	const u8 *fwdata;
+	u8 *edid;
+	int fwsize, builtin;
 	int i, valid_extensions = 0;
 	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
-	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
-	if (IS_ERR(pdev)) {
-		DRM_ERROR("Failed to register EDID firmware platform device "
-		    "for connector \"%s\"\n", connector_name);
-		err = -EINVAL;
-		goto out;
-	}
-
-	err = request_firmware(&fw, name, &pdev->dev);
-	platform_device_unregister(pdev);
-
-	if (err) {
-		i = 0;
-		while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
-			i++;
-		if (i < GENERIC_EDIDS) {
-			err = 0;
-			builtin = 1;
+	builtin = 0;
+	for (i = 0; i < GENERIC_EDIDS; i++) {
+		if (strcmp(name, generic_edid_name[i]) == 0) {
 			fwdata = generic_edid[i];
 			fwsize = sizeof(generic_edid[i]);
+			builtin = 1;
+			break;
 		}
 	}
+	if (!builtin) {
+		struct platform_device *pdev;
+		int err;
 
-	if (err) {
-		DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
-		    name, err);
-		goto out;
-	}
+		pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+		if (IS_ERR(pdev)) {
+			DRM_ERROR("Failed to register EDID firmware platform device "
+				  "for connector \"%s\"\n", connector_name);
+			return ERR_CAST(pdev);
+		}
 
-	if (fwdata == NULL) {
-		fwdata = (u8 *) fw->data;
+		err = request_firmware(&fw, name, &pdev->dev);
+		platform_device_unregister(pdev);
+		if (err) {
+			DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+				  name, err);
+			return ERR_PTR(err);
+		}
+
+		fwdata = fw->data;
 		fwsize = fw->size;
 	}
 
-	expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
-	if (expected != fwsize) {
+	if (edid_size(fwdata, fwsize) != fwsize) {
 		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
-		    "(expected %d, got %d)\n", name, expected, (int) fwsize);
-		err = -EINVAL;
-		goto relfw_out;
+			  "(expected %d, got %d\n", name,
+			  edid_size(fwdata, fwsize), (int)fwsize);
+		edid = ERR_PTR(-EINVAL);
+		goto out;
 	}
 
 	edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
 	if (edid == NULL) {
-		err = -ENOMEM;
-		goto relfw_out;
+		edid = ERR_PTR(-ENOMEM);
+		goto out;
 	}
 
 	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@
 		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
 		    name);
 		kfree(edid);
-		err = -EINVAL;
-		goto relfw_out;
+		edid = ERR_PTR(-EINVAL);
+		goto out;
 	}
 
 	for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@
 	}
 
 	if (valid_extensions != edid[0x7e]) {
+		u8 *new_edid;
+
 		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
 		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
 		    "\"%s\" for connector \"%s\"\n", valid_extensions,
 		    edid[0x7e], name, connector_name);
 		edid[0x7e] = valid_extensions;
+
 		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
-		    GFP_KERNEL);
-		if (new_edid == NULL) {
-			err = -ENOMEM;
-			kfree(edid);
-			goto relfw_out;
-		}
-		edid = new_edid;
+				    GFP_KERNEL);
+		if (new_edid)
+			edid = new_edid;
 	}
 
 	DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@
 	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
 	    name, connector_name);
 
-relfw_out:
-	release_firmware(fw);
-
 out:
-	if (err)
-		return ERR_PTR(err);
-
+	if (fw)
+		release_firmware(fw);
 	return edid;
 }
 
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3d13ca6e2..0a19401 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -39,10 +39,6 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
 static LIST_HEAD(kernel_fb_helper_list);
 
 /**
@@ -844,7 +840,6 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_mode_set *modeset;
-	struct drm_crtc *crtc;
 	int ret = 0;
 	int i;
 
@@ -855,8 +850,6 @@
 	}
 
 	for (i = 0; i < fb_helper->crtc_count; i++) {
-		crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
 		modeset = &fb_helper->crtc_info[i].mode_set;
 
 		modeset->x = var->xoffset;
@@ -1352,7 +1345,6 @@
 	struct drm_connector *connector;
 	struct drm_connector_helper_funcs *connector_funcs;
 	struct drm_encoder *encoder;
-	struct drm_fb_helper_crtc *best_crtc;
 	int my_score, best_score, score;
 	struct drm_fb_helper_crtc **crtcs, *crtc;
 	struct drm_fb_helper_connector *fb_helper_conn;
@@ -1364,7 +1356,6 @@
 	connector = fb_helper_conn->connector;
 
 	best_crtcs[n] = NULL;
-	best_crtc = NULL;
 	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
 	if (modes[n] == NULL)
 		return best_score;
@@ -1413,7 +1404,6 @@
 		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
 						  width, height);
 		if (score > best_score) {
-			best_crtc = crtc;
 			best_score = score;
 			memcpy(best_crtcs, crtcs,
 			       dev->mode_config.num_connector *
@@ -1580,8 +1570,7 @@
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 {
 	struct drm_device *dev = fb_helper->dev;
-	int count = 0;
-	u32 max_width, max_height, bpp_sel;
+	u32 max_width, max_height;
 
 	if (!fb_helper->fb)
 		return 0;
@@ -1596,10 +1585,8 @@
 
 	max_width = fb_helper->fb->width;
 	max_height = fb_helper->fb->height;
-	bpp_sel = fb_helper->fb->bits_per_pixel;
 
-	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
-						    max_height);
+	drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
 	mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
 	drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3f84277..3a7176c 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -113,7 +113,6 @@
 	retcode = drm_open_helper(inode, filp, dev);
 	if (retcode)
 		goto err_undo;
-	atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
 	if (need_setup) {
 		retcode = drm_setup(dev);
 		if (retcode)
@@ -240,7 +239,8 @@
 
 	priv->ioctl_count = 0;
 	/* for compatibility root is always authenticated */
-	priv->authenticated = capable(CAP_SYS_ADMIN);
+	priv->always_authenticated = capable(CAP_SYS_ADMIN);
+	priv->authenticated = priv->always_authenticated;
 	priv->lock_count = 0;
 
 	INIT_LIST_HEAD(&priv->lhead);
@@ -379,13 +379,80 @@
 		}
 
 	/* Remove unconsumed events */
-	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+		list_del(&e->link);
 		e->destroy(e);
+	}
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
 /**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+
+	dev->sigdata.lock = NULL;
+
+	dev->context_flag = 0;
+	dev->last_context = 0;
+	dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+	struct drm_vma_entry *vma, *vma_temp;
+
+	DRM_DEBUG("\n");
+
+	if (dev->driver->lastclose)
+		dev->driver->lastclose(dev);
+	DRM_DEBUG("driver lastclose completed\n");
+
+	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	drm_agp_clear(dev);
+
+	drm_legacy_sg_cleanup(dev);
+
+	/* Clear vma list (only built for debugging) */
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		kfree(vma);
+	}
+
+	drm_legacy_dma_takedown(dev);
+
+	dev->dev_mapping = NULL;
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_legacy_dev_reinit(dev);
+
+	DRM_DEBUG("lastclose completed\n");
+	return 0;
+}
+
+/**
  * Release file.
  *
  * \param inode device inode
@@ -454,7 +521,6 @@
 
 				list_del(&pos->head);
 				kfree(pos);
-				--dev->ctx_count;
 			}
 		}
 	}
@@ -468,7 +534,7 @@
 		list_for_each_entry(temp, &dev->filelist, lhead) {
 			if ((temp->master == file_priv->master) &&
 			    (temp != file_priv))
-				temp->authenticated = 0;
+				temp->authenticated = temp->always_authenticated;
 		}
 
 		/**
@@ -516,7 +582,6 @@
 	 * End inline drm_release
 	 */
 
-	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
 	if (!--dev->open_count) {
 		if (atomic_read(&dev->ioctl_count)) {
 			DRM_ERROR("Device busy: %d\n",
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 49293bdc..4761ade 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -160,35 +160,6 @@
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
-	struct drm_gem_object *obj;
-
-	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-	if (!obj)
-		goto free;
-
-	if (drm_gem_object_init(dev, obj, size) != 0)
-		goto free;
-
-	if (dev->driver->gem_init_object != NULL &&
-	    dev->driver->gem_init_object(obj) != 0) {
-		goto fput;
-	}
-	return obj;
-fput:
-	/* Object_init mangles the global counters - readjust them. */
-	fput(obj->filp);
-free:
-	kfree(obj);
-	return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
index f731116..3d2e91c 100644
--- a/drivers/gpu/drm/drm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -67,7 +67,6 @@
 {
 	int ret;
 	struct drm_global_item *item = &glob[ref->global_type];
-	void *object;
 
 	mutex_lock(&item->mutex);
 	if (item->refcount == 0) {
@@ -85,7 +84,6 @@
 	}
 	++item->refcount;
 	ref->object = item->object;
-	object = item->object;
 	mutex_unlock(&item->mutex);
 	return 0;
 out_err:
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 5329832..7d5a152 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -163,13 +163,13 @@
 	mutex_lock(&dev->struct_mutex);
 	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
 		seq_printf(m, "CRTC %d enable:     %d\n",
-			   crtc, atomic_read(&dev->vblank_refcount[crtc]));
+			   crtc, atomic_read(&dev->vblank[crtc].refcount));
 		seq_printf(m, "CRTC %d counter:    %d\n",
 			   crtc, drm_vblank_count(dev, crtc));
 		seq_printf(m, "CRTC %d last wait:  %d\n",
-			   crtc, dev->last_vblank_wait[crtc]);
+			   crtc, dev->vblank[crtc].last_wait);
 		seq_printf(m, "CRTC %d in modeset: %d\n",
-			   crtc, dev->vblank_inmodeset[crtc]);
+			   crtc, dev->vblank[crtc].inmodeset);
 	}
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2..dffc836 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@
 }
 
 /**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_set_client_cap *req = data;
+
+	switch (req->capability) {
+	case DRM_CLIENT_CAP_STEREO_3D:
+		if (req->value > 1)
+			return -EINVAL;
+		file_priv->stereo_allowed = req->value;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
  * Setversion ioctl.
  *
  * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f92da0a..d80d952 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,9 +43,8 @@
 #include <linux/export.h>
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) ( \
-	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
-	((count) % DRM_VBLANKTIME_RBSIZE)])
+#define vblanktimestamp(dev, crtc, count) \
+	((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@
  */
 static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
 {
-	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
-		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+	memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
 }
 
 /*
@@ -115,7 +113,7 @@
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
 	dev->driver->disable_vblank(dev, crtc);
-	dev->vblank_enabled[crtc] = 0;
+	dev->vblank[crtc].enabled = false;
 
 	/* No further vblank irq's will be processed after
 	 * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@
 	 * delayed gpu counter increment.
 	 */
 	do {
-		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+		dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
 		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+	} while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
 
 	if (!count)
 		vblrc = 0;
@@ -140,7 +138,7 @@
 	/* Compute time difference to stored timestamp of last vblank
 	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
 	 */
-	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	vblcount = atomic_read(&dev->vblank[crtc].count);
 	diff_ns = timeval_to_ns(&tvblank) -
 		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
 
@@ -157,7 +155,7 @@
 	 * hope for the best.
 	 */
 	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
-		atomic_inc(&dev->_vblank_count[crtc]);
+		atomic_inc(&dev->vblank[crtc].count);
 		smp_mb__after_atomic_inc();
 	}
 
@@ -178,8 +176,8 @@
 
 	for (i = 0; i < dev->num_crtcs; i++) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
-		    dev->vblank_enabled[i]) {
+		if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+		    dev->vblank[i].enabled) {
 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
 			vblank_disable_and_save(dev, i);
 		}
@@ -197,14 +195,7 @@
 
 	vblank_disable_fn((unsigned long)dev);
 
-	kfree(dev->vbl_queue);
-	kfree(dev->_vblank_count);
-	kfree(dev->vblank_refcount);
-	kfree(dev->vblank_enabled);
-	kfree(dev->last_vblank);
-	kfree(dev->last_vblank_wait);
-	kfree(dev->vblank_inmodeset);
-	kfree(dev->_vblank_time);
+	kfree(dev->vblank);
 
 	dev->num_crtcs = 0;
 }
@@ -221,42 +212,14 @@
 
 	dev->num_crtcs = num_crtcs;
 
-	dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
-				 GFP_KERNEL);
-	if (!dev->vbl_queue)
+	dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+	if (!dev->vblank)
 		goto err;
 
-	dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
-	if (!dev->_vblank_count)
-		goto err;
+	for (i = 0; i < num_crtcs; i++)
+		init_waitqueue_head(&dev->vblank[i].queue);
 
-	dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
-				       GFP_KERNEL);
-	if (!dev->vblank_refcount)
-		goto err;
-
-	dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-	if (!dev->vblank_enabled)
-		goto err;
-
-	dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-	if (!dev->last_vblank)
-		goto err;
-
-	dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-	if (!dev->last_vblank_wait)
-		goto err;
-
-	dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-	if (!dev->vblank_inmodeset)
-		goto err;
-
-	dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
-				    sizeof(struct timeval), GFP_KERNEL);
-	if (!dev->_vblank_time)
-		goto err;
-
-	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+	DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
 
 	/* Driver specific high-precision vblank timestamping supported? */
 	if (dev->driver->get_vblank_timestamp)
@@ -264,14 +227,8 @@
 	else
 		DRM_INFO("No driver support for vblank timestamp query.\n");
 
-	/* Zero per-crtc vblank stuff */
-	for (i = 0; i < num_crtcs; i++) {
-		init_waitqueue_head(&dev->vbl_queue[i]);
-		atomic_set(&dev->_vblank_count[i], 0);
-		atomic_set(&dev->vblank_refcount[i], 0);
-	}
+	dev->vblank_disable_allowed = false;
 
-	dev->vblank_disable_allowed = 0;
 	return 0;
 
 err:
@@ -336,7 +293,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		return -EBUSY;
 	}
-	dev->irq_enabled = 1;
+	dev->irq_enabled = true;
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
@@ -373,7 +330,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		if (!drm_core_check_feature(dev, DRIVER_MODESET))
 			vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@
 int drm_irq_uninstall(struct drm_device *dev)
 {
 	unsigned long irqflags;
-	int irq_enabled, i;
+	bool irq_enabled;
+	int i;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
 	irq_enabled = dev->irq_enabled;
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 	mutex_unlock(&dev->struct_mutex);
 
 	/*
@@ -410,9 +368,9 @@
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vbl_queue[i]);
-			dev->vblank_enabled[i] = 0;
-			dev->last_vblank[i] =
+			DRM_WAKEUP(&dev->vblank[i].queue);
+			dev->vblank[i].enabled = false;
+			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -628,24 +586,20 @@
 	 * code gets preempted or delayed for some reason.
 	 */
 	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
-		/* Disable preemption to make it very likely to
-		 * succeed in the first iteration even on PREEMPT_RT kernel.
+		/*
+		 * Get vertical and horizontal scanout position vpos, hpos,
+		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		preempt_disable();
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos,
+							       &hpos, &stime, &etime);
 
-		/* Get system timestamp before query. */
-		stime = ktime_get();
-
-		/* Get vertical and horizontal scanout pos. vpos, hpos. */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
-
-		/* Get system timestamp after query. */
-		etime = ktime_get();
+		/*
+		 * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
+		 * CLOCK_REALTIME is requested.
+		 */
 		if (!drm_timestamp_monotonic)
 			mono_time_offset = ktime_get_monotonic_offset();
 
-		preempt_enable();
-
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
 			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
@@ -653,6 +607,7 @@
 			return -EIO;
 		}
 
+		/* Compute uncertainty in timestamp of scanout position query. */
 		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
 		/* Accept result with <  max_error nsecs timing uncertainty. */
@@ -795,7 +750,7 @@
  */
 u32 drm_vblank_count(struct drm_device *dev, int crtc)
 {
-	return atomic_read(&dev->_vblank_count[crtc]);
+	return atomic_read(&dev->vblank[crtc].count);
 }
 EXPORT_SYMBOL(drm_vblank_count);
 
@@ -824,10 +779,10 @@
 	 * a seqlock.
 	 */
 	do {
-		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+		cur_vblank = atomic_read(&dev->vblank[crtc].count);
 		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
 		smp_rmb();
-	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+	} while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
 
 	return cur_vblank;
 }
@@ -914,12 +869,12 @@
 	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
 
 	/* Deal with counter wrap */
-	diff = cur_vblank - dev->last_vblank[crtc];
-	if (cur_vblank < dev->last_vblank[crtc]) {
+	diff = cur_vblank - dev->vblank[crtc].last;
+	if (cur_vblank < dev->vblank[crtc].last) {
 		diff += dev->max_vblank_count;
 
 		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+			  crtc, dev->vblank[crtc].last, cur_vblank, diff);
 	}
 
 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +885,12 @@
 	 * reinitialize delayed at next vblank interrupt in that case.
 	 */
 	if (rc) {
-		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+		tslot = atomic_read(&dev->vblank[crtc].count) + diff;
 		vblanktimestamp(dev, crtc, tslot) = t_vblank;
 	}
 
 	smp_mb__before_atomic_inc();
-	atomic_add(diff, &dev->_vblank_count[crtc]);
+	atomic_add(diff, &dev->vblank[crtc].count);
 	smp_mb__after_atomic_inc();
 }
 
@@ -957,9 +912,9 @@
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
-	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+	if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
 		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
-		if (!dev->vblank_enabled[crtc]) {
+		if (!dev->vblank[crtc].enabled) {
 			/* Enable vblank irqs under vblank_time_lock protection.
 			 * All vblank count & timestamp updates are held off
 			 * until we are done reinitializing master counter and
@@ -970,16 +925,16 @@
 			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
 				  crtc, ret);
 			if (ret)
-				atomic_dec(&dev->vblank_refcount[crtc]);
+				atomic_dec(&dev->vblank[crtc].refcount);
 			else {
-				dev->vblank_enabled[crtc] = 1;
+				dev->vblank[crtc].enabled = true;
 				drm_update_vblank_count(dev, crtc);
 			}
 		}
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
 	} else {
-		if (!dev->vblank_enabled[crtc]) {
-			atomic_dec(&dev->vblank_refcount[crtc]);
+		if (!dev->vblank[crtc].enabled) {
+			atomic_dec(&dev->vblank[crtc].refcount);
 			ret = -EINVAL;
 		}
 	}
@@ -999,10 +954,10 @@
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
-	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+	BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
 
 	/* Last user schedules interrupt disable */
-	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+	if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
 	    (drm_vblank_offdelay > 0))
 		mod_timer(&dev->vblank_disable_timer,
 			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +980,7 @@
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	vblank_disable_and_save(dev, crtc);
-	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	DRM_WAKEUP(&dev->vblank[crtc].queue);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
 	seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1022,10 @@
 	 * to avoid corrupting the count if multiple, mismatch calls occur),
 	 * so that interrupts remain enabled in the interim.
 	 */
-	if (!dev->vblank_inmodeset[crtc]) {
-		dev->vblank_inmodeset[crtc] = 0x1;
+	if (!dev->vblank[crtc].inmodeset) {
+		dev->vblank[crtc].inmodeset = 0x1;
 		if (drm_vblank_get(dev, crtc) == 0)
-			dev->vblank_inmodeset[crtc] |= 0x2;
+			dev->vblank[crtc].inmodeset |= 0x2;
 	}
 }
 EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1038,15 @@
 	if (!dev->num_crtcs)
 		return;
 
-	if (dev->vblank_inmodeset[crtc]) {
+	if (dev->vblank[crtc].inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		dev->vblank_disable_allowed = 1;
+		dev->vblank_disable_allowed = true;
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
-		if (dev->vblank_inmodeset[crtc] & 0x2)
+		if (dev->vblank[crtc].inmodeset & 0x2)
 			drm_vblank_put(dev, crtc);
 
-		dev->vblank_inmodeset[crtc] = 0;
+		dev->vblank[crtc].inmodeset = 0;
 	}
 }
 EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1243,8 @@
 
 	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
 		  vblwait->request.sequence, crtc);
-	dev->last_vblank_wait[crtc] = vblwait->request.sequence;
-	DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+	dev->vblank[crtc].last_wait = vblwait->request.sequence;
+	DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
 		    (((drm_vblank_count(dev, crtc) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !dev->irq_enabled));
@@ -1367,7 +1322,7 @@
 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
 	/* Vblank irq handling disabled. Nothing to do. */
-	if (!dev->vblank_enabled[crtc]) {
+	if (!dev->vblank[crtc].enabled) {
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		return false;
 	}
@@ -1377,7 +1332,7 @@
 	 */
 
 	/* Get current timestamp and count. */
-	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	vblcount = atomic_read(&dev->vblank[crtc].count);
 	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
 	/* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1356,14 @@
 		 * the timestamp computed above.
 		 */
 		smp_mb__before_atomic_inc();
-		atomic_inc(&dev->_vblank_count[crtc]);
+		atomic_inc(&dev->vblank[crtc].count);
 		smp_mb__after_atomic_inc();
 	} else {
 		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
 			  crtc, (int) diff_ns);
 	}
 
-	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	DRM_WAKEUP(&dev->vblank[crtc].queue);
 	drm_handle_vblank_events(dev, crtc);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index d752c96..f645268 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -86,7 +86,6 @@
 		if (drm_lock_take(&master->lock, lock->context)) {
 			master->lock.file_priv = file_priv;
 			master->lock.lock_time = jiffies;
-			atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
 			break;	/* Got lock */
 		}
 
@@ -157,8 +156,6 @@
 		return -EINVAL;
 	}
 
-	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
 	if (drm_lock_free(&master->lock, lock->context)) {
 		/* FIXME: Should really bail out here. */
 	}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb6..85071a1 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@
 /**
  * drm_mode_set_crtcinfo - set CRTC modesetting parameters
  * @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
  *
  * LOCKING:
  * None.
  *
  * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ *   interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ *   buffers containing two eyes (only adjust the timings when needed, eg. for
+ *   "frame packing" or "side by side full").
  */
 void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 {
 	if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
 		return;
 
+	p->crtc_clock = p->clock;
 	p->crtc_hdisplay = p->hdisplay;
 	p->crtc_hsync_start = p->hsync_start;
 	p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@
 		p->crtc_vtotal *= p->vscan;
 	}
 
+	if (adjust_flags & CRTC_STEREO_DOUBLE) {
+		unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+		switch (layout) {
+		case DRM_MODE_FLAG_3D_FRAME_PACKING:
+			p->crtc_clock *= 2;
+			p->crtc_vdisplay += p->crtc_vtotal;
+			p->crtc_vsync_start += p->crtc_vtotal;
+			p->crtc_vsync_end += p->crtc_vtotal;
+			p->crtc_vtotal += p->crtc_vtotal;
+			break;
+		}
+	}
+
 	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
 	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
 	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@
 	} else if (mode1->clock != mode2->clock)
 		return false;
 
-	return drm_mode_equal_no_clocks(mode1, mode2);
+	if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+	    (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+		return false;
+
+	return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
 }
 EXPORT_SYMBOL(drm_mode_equal);
 
 /**
- * drm_mode_equal_no_clocks - test modes for equality
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
  * @mode1: first mode
  * @mode2: second mode
  *
@@ -843,12 +868,13 @@
  * None.
  *
  * Check to see if @mode1 and @mode2 are equivalent, but
- * don't check the pixel clocks.
+ * don't check the pixel clocks nor the stereo layout.
  *
  * RETURNS:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+					const struct drm_display_mode *mode2)
 {
 	if (mode1->hdisplay == mode2->hdisplay &&
 	    mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@
 	    mode1->vsync_end == mode2->vsync_end &&
 	    mode1->vtotal == mode2->vtotal &&
 	    mode1->vscan == mode2->vscan &&
-	    mode1->flags == mode2->flags)
+	    (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+	     (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
 		return true;
 
 	return false;
 }
-EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
 /**
  * drm_mode_validate_size - make sure modes adhere to size constraints
@@ -1014,7 +1041,7 @@
 				/* if equal delete the probed mode */
 				mode->status = pmode->status;
 				/* Merge type bits together */
-				mode->type |= pmode->type;
+				mode->type = pmode->type;
 				list_del(&pmode->head);
 				drm_mode_destroy(connector->dev, pmode);
 				break;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1f96cee..0267979 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -80,7 +80,7 @@
 	/* Reserve */
 	for (addr = (unsigned long)dmah->vaddr, sz = size;
 	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-		SetPageReserved(virt_to_page(addr));
+		SetPageReserved(virt_to_page((void *)addr));
 	}
 
 	return dmah;
@@ -103,7 +103,7 @@
 		/* Unreserve */
 		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
 		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-			ClearPageReserved(virt_to_page(addr));
+			ClearPageReserved(virt_to_page((void *)addr));
 		}
 		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
 				  dmah->busaddr);
@@ -322,83 +322,36 @@
 
 	DRM_DEBUG("\n");
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &pdev->dev);
 	if (!dev)
 		return -ENOMEM;
 
 	ret = pci_enable_device(pdev);
 	if (ret)
-		goto err_g1;
+		goto err_free;
 
 	dev->pdev = pdev;
-	dev->dev = &pdev->dev;
-
-	dev->pci_device = pdev->device;
-	dev->pci_vendor = pdev->vendor;
-
 #ifdef __alpha__
 	dev->hose = pdev->sysdata;
 #endif
 
-	mutex_lock(&drm_global_mutex);
-
-	if ((ret = drm_fill_in_dev(dev, ent, driver))) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g2;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		pci_set_drvdata(pdev, dev);
-		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-		if (ret)
-			goto err_g2;
-	}
 
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g21;
-	}
-
-	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
-		goto err_g3;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, ent->driver_data);
-		if (ret)
-			goto err_g4;
-	}
-
-	/* setup the grouping for the legacy output */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-						&dev->primary->mode_group);
-		if (ret)
-			goto err_g4;
-	}
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
+	ret = drm_dev_register(dev, ent->driver_data);
+	if (ret)
+		goto err_pci;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
 		 driver->date, pci_name(pdev), dev->primary->index);
 
-	mutex_unlock(&drm_global_mutex);
 	return 0;
 
-err_g4:
-	drm_put_minor(&dev->primary);
-err_g3:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g21:
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-err_g2:
+err_pci:
 	pci_disable_device(pdev);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index f7a18c6..fc24fee 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -47,55 +47,15 @@
 
 	DRM_DEBUG("\n");
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &platdev->dev);
 	if (!dev)
 		return -ENOMEM;
 
 	dev->platformdev = platdev;
-	dev->dev = &platdev->dev;
 
-	mutex_lock(&drm_global_mutex);
-
-	ret = drm_fill_in_dev(dev, NULL, driver);
-
-	if (ret) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g1;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-		if (ret)
-			goto err_g1;
-	}
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g11;
-	}
-
-	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	ret = drm_dev_register(dev, 0);
 	if (ret)
-		goto err_g2;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, 0);
-		if (ret)
-			goto err_g3;
-	}
-
-	/* setup the grouping for the legacy output */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-				&dev->primary->mode_group);
-		if (ret)
-			goto err_g3;
-	}
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
-
-	mutex_unlock(&drm_global_mutex);
+		goto err_free;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@
 
 	return 0;
 
-err_g3:
-	drm_put_minor(&dev->primary);
-err_g2:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g11:
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 276d470..56805c3 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -637,14 +637,13 @@
 	unsigned count;
 	struct scatterlist *sg;
 	struct page *page;
-	u32 len, offset;
+	u32 len;
 	int pg_index;
 	dma_addr_t addr;
 
 	pg_index = 0;
 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
 		len = sg->length;
-		offset = sg->offset;
 		page = sg_page(sg);
 		addr = sg_dma_address(sg);
 
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 39d8645..c200136 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -254,81 +254,21 @@
 	return 0;
 }
 
-int drm_fill_in_dev(struct drm_device *dev,
-			   const struct pci_device_id *ent,
-			   struct drm_driver *driver)
-{
-	int retcode;
-
-	INIT_LIST_HEAD(&dev->filelist);
-	INIT_LIST_HEAD(&dev->ctxlist);
-	INIT_LIST_HEAD(&dev->vmalist);
-	INIT_LIST_HEAD(&dev->maplist);
-	INIT_LIST_HEAD(&dev->vblank_event_list);
-
-	spin_lock_init(&dev->count_lock);
-	spin_lock_init(&dev->event_lock);
-	mutex_init(&dev->struct_mutex);
-	mutex_init(&dev->ctxlist_mutex);
-
-	if (drm_ht_create(&dev->map_hash, 12)) {
-		return -ENOMEM;
-	}
-
-	/* the DRM has 6 basic counters */
-	dev->counters = 6;
-	dev->types[0] = _DRM_STAT_LOCK;
-	dev->types[1] = _DRM_STAT_OPENS;
-	dev->types[2] = _DRM_STAT_CLOSES;
-	dev->types[3] = _DRM_STAT_IOCTLS;
-	dev->types[4] = _DRM_STAT_LOCKS;
-	dev->types[5] = _DRM_STAT_UNLOCKS;
-
-	dev->driver = driver;
-
-	if (dev->driver->bus->agp_init) {
-		retcode = dev->driver->bus->agp_init(dev);
-		if (retcode)
-			goto error_out_unreg;
-	}
-
-
-
-	retcode = drm_ctxbitmap_init(dev);
-	if (retcode) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto error_out_unreg;
-	}
-
-	if (driver->driver_features & DRIVER_GEM) {
-		retcode = drm_gem_init(dev);
-		if (retcode) {
-			DRM_ERROR("Cannot initialize graphics execution "
-				  "manager (GEM)\n");
-			goto error_out_unreg;
-		}
-	}
-
-	return 0;
-
-      error_out_unreg:
-	drm_lastclose(dev);
-	return retcode;
-}
-EXPORT_SYMBOL(drm_fill_in_dev);
-
-
 /**
- * Get a secondary minor number.
+ * drm_get_minor - Allocate and register new DRM minor
+ * @dev: DRM device
+ * @minor: Pointer to where new minor is stored
+ * @type: Type of minor
  *
- * \param dev device data structure
- * \param sec-minor structure to hold the assigned minor
- * \return negative number on failure.
+ * Allocate a new minor of the given type and register it. A pointer to the new
+ * minor is returned in @minor.
+ * Caller must hold the global DRM mutex.
  *
- * Search an empty entry and initialize it to the given parameters. This
- * routines assigns minor numbers to secondary heads of multi-headed cards
+ * RETURNS:
+ * 0 on success, negative error code on failure.
  */
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor,
+			 int type)
 {
 	struct drm_minor *new_minor;
 	int ret;
@@ -385,37 +325,48 @@
 	*minor = NULL;
 	return ret;
 }
-EXPORT_SYMBOL(drm_get_minor);
 
 /**
- * Put a secondary minor number.
+ * drm_unplug_minor - Unplug DRM minor
+ * @minor: Minor to unplug
  *
- * \param sec_minor - structure to be released
- * \return always zero
+ * Unplugs the given DRM minor but keeps the object. So after this returns,
+ * minor->dev is still valid so existing open-files can still access it to get
+ * device information from their drm_file ojects.
+ * If the minor is already unplugged or if @minor is NULL, nothing is done.
+ * The global DRM mutex must be held by the caller.
  */
-int drm_put_minor(struct drm_minor **minor_p)
+static void drm_unplug_minor(struct drm_minor *minor)
 {
-	struct drm_minor *minor = *minor_p;
-
-	DRM_DEBUG("release secondary minor %d\n", minor->index);
+	if (!minor || !device_is_registered(minor->kdev))
+		return;
 
 #if defined(CONFIG_DEBUG_FS)
 	drm_debugfs_cleanup(minor);
 #endif
 
 	drm_sysfs_device_remove(minor);
-
 	idr_remove(&drm_minors_idr, minor->index);
-
-	kfree(minor);
-	*minor_p = NULL;
-	return 0;
 }
-EXPORT_SYMBOL(drm_put_minor);
 
-static void drm_unplug_minor(struct drm_minor *minor)
+/**
+ * drm_put_minor - Destroy DRM minor
+ * @minor: Minor to destroy
+ *
+ * This calls drm_unplug_minor() on the given minor and then frees it. Nothing
+ * is done if @minor is NULL. It is fine to call this on already unplugged
+ * minors.
+ * The global DRM mutex must be held by the caller.
+ */
+static void drm_put_minor(struct drm_minor *minor)
 {
-	drm_sysfs_device_remove(minor);
+	if (!minor)
+		return;
+
+	DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+	drm_unplug_minor(minor);
+	kfree(minor);
 }
 
 /**
@@ -427,47 +378,15 @@
  */
 void drm_put_dev(struct drm_device *dev)
 {
-	struct drm_driver *driver;
-	struct drm_map_list *r_list, *list_temp;
-
 	DRM_DEBUG("\n");
 
 	if (!dev) {
 		DRM_ERROR("cleanup called no dev\n");
 		return;
 	}
-	driver = dev->driver;
 
-	drm_lastclose(dev);
-
-	if (dev->driver->unload)
-		dev->driver->unload(dev);
-
-	if (dev->driver->bus->agp_destroy)
-		dev->driver->bus->agp_destroy(dev);
-
-	drm_vblank_cleanup(dev);
-
-	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
-		drm_rmmap(dev, r_list->map);
-	drm_ht_remove(&dev->map_hash);
-
-	drm_ctxbitmap_cleanup(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_put_minor(&dev->control);
-
-	if (dev->render)
-		drm_put_minor(&dev->render);
-
-	if (driver->driver_features & DRIVER_GEM)
-		drm_gem_destroy(dev);
-
-	drm_put_minor(&dev->primary);
-
-	list_del(&dev->driver_item);
-	kfree(dev->devname);
-	kfree(dev);
+	drm_dev_unregister(dev);
+	drm_dev_free(dev);
 }
 EXPORT_SYMBOL(drm_put_dev);
 
@@ -490,3 +409,206 @@
 	mutex_unlock(&drm_global_mutex);
 }
 EXPORT_SYMBOL(drm_unplug_dev);
+
+/**
+ * drm_dev_alloc - Allocate new drm device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+				 struct device *parent)
+{
+	struct drm_device *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	dev->dev = parent;
+	dev->driver = driver;
+
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->vmalist);
+	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
+
+	spin_lock_init(&dev->count_lock);
+	spin_lock_init(&dev->event_lock);
+	mutex_init(&dev->struct_mutex);
+	mutex_init(&dev->ctxlist_mutex);
+
+	if (drm_ht_create(&dev->map_hash, 12))
+		goto err_free;
+
+	ret = drm_ctxbitmap_init(dev);
+	if (ret) {
+		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+		goto err_ht;
+	}
+
+	if (driver->driver_features & DRIVER_GEM) {
+		ret = drm_gem_init(dev);
+		if (ret) {
+			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+			goto err_ctxbitmap;
+		}
+	}
+
+	return dev;
+
+err_ctxbitmap:
+	drm_ctxbitmap_cleanup(dev);
+err_ht:
+	drm_ht_remove(&dev->map_hash);
+err_free:
+	kfree(dev);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+/**
+ * drm_dev_free - Free DRM device
+ * @dev: DRM device to free
+ *
+ * Free a DRM device that has previously been allocated via drm_dev_alloc().
+ * You must not use kfree() instead or you will leak memory.
+ *
+ * This must not be called once the device got registered. Use drm_put_dev()
+ * instead, which then calls drm_dev_free().
+ */
+void drm_dev_free(struct drm_device *dev)
+{
+	drm_put_minor(dev->control);
+	drm_put_minor(dev->render);
+	drm_put_minor(dev->primary);
+
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_destroy(dev);
+
+	drm_ctxbitmap_cleanup(dev);
+	drm_ht_remove(&dev->map_hash);
+
+	kfree(dev->devname);
+	kfree(dev);
+}
+EXPORT_SYMBOL(drm_dev_free);
+
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
+{
+	int ret;
+
+	mutex_lock(&drm_global_mutex);
+
+	if (dev->driver->bus->agp_init) {
+		ret = dev->driver->bus->agp_init(dev);
+		if (ret)
+			goto out_unlock;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+		if (ret)
+			goto err_agp;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+		if (ret)
+			goto err_control_node;
+	}
+
+	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	if (ret)
+		goto err_render_node;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev, flags);
+		if (ret)
+			goto err_primary_node;
+	}
+
+	/* setup grouping for legacy outputs */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_mode_group_init_legacy_group(dev,
+				&dev->primary->mode_group);
+		if (ret)
+			goto err_unload;
+	}
+
+	list_add_tail(&dev->driver_item, &dev->driver->device_list);
+
+	ret = 0;
+	goto out_unlock;
+
+err_unload:
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+err_primary_node:
+	drm_put_minor(dev->primary);
+err_render_node:
+	drm_put_minor(dev->render);
+err_control_node:
+	drm_put_minor(dev->control);
+err_agp:
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
+out_unlock:
+	mutex_unlock(&drm_global_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_free() to free all resources.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+	struct drm_map_list *r_list, *list_temp;
+
+	drm_lastclose(dev);
+
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+
+	if (dev->driver->bus->agp_destroy)
+		dev->driver->bus->agp_destroy(dev);
+
+	drm_vblank_cleanup(dev);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+		drm_rmmap(dev, r_list->map);
+
+	drm_unplug_minor(dev->control);
+	drm_unplug_minor(dev->render);
+	drm_unplug_minor(dev->primary);
+
+	list_del(&dev->driver_item);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 2290b3b..1a35ea5 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,8 +22,8 @@
 #include <drm/drm_core.h>
 #include <drm/drmP.h>
 
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
 
 static struct device_type drm_sysfs_device_minor = {
 	.name = "drm_minor"
@@ -162,20 +162,6 @@
 	drm_class = NULL;
 }
 
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff.  But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
-	memset(dev, 0, sizeof(struct device));
-	return;
-}
-
 /*
  * Connector properties
  */
@@ -380,11 +366,6 @@
  * properties (so far, connection status, dpms, mode list & edid) and
  * generate a hotplug event so userspace knows there's a new connector
  * available.
- *
- * Note:
- * This routine should only be called *once* for each registered connector.
- * A second call for an already registered connector will trigger the BUG_ON
- * below.
  */
 int drm_sysfs_connector_add(struct drm_connector *connector)
 {
@@ -394,29 +375,25 @@
 	int i;
 	int ret;
 
-	/* We shouldn't get called more than once for the same connector */
-	BUG_ON(device_is_registered(&connector->kdev));
+	if (connector->kdev)
+		return 0;
 
-	connector->kdev.parent = &dev->primary->kdev;
-	connector->kdev.class = drm_class;
-	connector->kdev.release = drm_sysfs_device_release;
-
+	connector->kdev = device_create(drm_class, dev->primary->kdev,
+					0, connector, "card%d-%s",
+					dev->primary->index, drm_get_connector_name(connector));
 	DRM_DEBUG("adding \"%s\" to sysfs\n",
 		  drm_get_connector_name(connector));
 
-	dev_set_name(&connector->kdev, "card%d-%s",
-		     dev->primary->index, drm_get_connector_name(connector));
-	ret = device_register(&connector->kdev);
-
-	if (ret) {
-		DRM_ERROR("failed to register connector device: %d\n", ret);
+	if (IS_ERR(connector->kdev)) {
+		DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+		ret = PTR_ERR(connector->kdev);
 		goto out;
 	}
 
 	/* Standard attributes */
 
 	for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
-		ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+		ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
 		if (ret)
 			goto err_out_files;
 	}
@@ -433,7 +410,7 @@
 		case DRM_MODE_CONNECTOR_Component:
 		case DRM_MODE_CONNECTOR_TV:
 			for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
-				ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+				ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
 				if (ret)
 					goto err_out_files;
 			}
@@ -442,7 +419,7 @@
 			break;
 	}
 
-	ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+	ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
 	if (ret)
 		goto err_out_files;
 
@@ -453,10 +430,10 @@
 
 err_out_files:
 	for (i = 0; i < opt_cnt; i++)
-		device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+		device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
 	for (i = 0; i < attr_cnt; i++)
-		device_remove_file(&connector->kdev, &connector_attrs[i]);
-	device_unregister(&connector->kdev);
+		device_remove_file(connector->kdev, &connector_attrs[i]);
+	device_unregister(connector->kdev);
 
 out:
 	return ret;
@@ -480,16 +457,16 @@
 {
 	int i;
 
-	if (!connector->kdev.parent)
+	if (!connector->kdev)
 		return;
 	DRM_DEBUG("removing \"%s\" from sysfs\n",
 		  drm_get_connector_name(connector));
 
 	for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
-		device_remove_file(&connector->kdev, &connector_attrs[i]);
-	sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
-	device_unregister(&connector->kdev);
-	connector->kdev.parent = NULL;
+		device_remove_file(connector->kdev, &connector_attrs[i]);
+	sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+	device_unregister(connector->kdev);
+	connector->kdev = NULL;
 }
 EXPORT_SYMBOL(drm_sysfs_connector_remove);
 
@@ -508,7 +485,7 @@
 
 	DRM_DEBUG("generating hotplug event\n");
 
-	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 }
 EXPORT_SYMBOL(drm_sysfs_hotplug_event);
 
@@ -523,15 +500,8 @@
  */
 int drm_sysfs_device_add(struct drm_minor *minor)
 {
-	int err;
 	char *minor_str;
 
-	minor->kdev.parent = minor->dev->dev;
-
-	minor->kdev.class = drm_class;
-	minor->kdev.release = drm_sysfs_device_release;
-	minor->kdev.devt = minor->device;
-	minor->kdev.type = &drm_sysfs_device_minor;
 	if (minor->type == DRM_MINOR_CONTROL)
 		minor_str = "controlD%d";
         else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +509,14 @@
         else
                 minor_str = "card%d";
 
-	dev_set_name(&minor->kdev, minor_str, minor->index);
-
-	err = device_register(&minor->kdev);
-	if (err) {
-		DRM_ERROR("device add failed: %d\n", err);
-		goto err_out;
+	minor->kdev = device_create(drm_class, minor->dev->dev,
+				    MKDEV(DRM_MAJOR, minor->index),
+				    minor, minor_str, minor->index);
+	if (IS_ERR(minor->kdev)) {
+		DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
+		return PTR_ERR(minor->kdev);
 	}
-
 	return 0;
-
-err_out:
-	return err;
 }
 
 /**
@@ -562,9 +528,9 @@
  */
 void drm_sysfs_device_remove(struct drm_minor *minor)
 {
-	if (minor->kdev.parent)
-		device_unregister(&minor->kdev);
-	minor->kdev.parent = NULL;
+	if (minor->kdev)
+		device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+	minor->kdev = NULL;
 }
 
 
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 8766472..b179b70 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -7,57 +7,20 @@
 		    struct drm_driver *driver)
 {
 	struct drm_device *dev;
-	struct usb_device *usbdev;
 	int ret;
 
 	DRM_DEBUG("\n");
 
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	dev = drm_dev_alloc(driver, &interface->dev);
 	if (!dev)
 		return -ENOMEM;
 
-	usbdev = interface_to_usbdev(interface);
-	dev->usbdev = usbdev;
-	dev->dev = &interface->dev;
-
-	mutex_lock(&drm_global_mutex);
-
-	ret = drm_fill_in_dev(dev, NULL, driver);
-	if (ret) {
-		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-		goto err_g1;
-	}
-
+	dev->usbdev = interface_to_usbdev(interface);
 	usb_set_intfdata(interface, dev);
-	ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+
+	ret = drm_dev_register(dev, 0);
 	if (ret)
-		goto err_g1;
-
-	if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-		ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-		if (ret)
-			goto err_g11;
-	}
-
-	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
-	if (ret)
-		goto err_g2;
-
-	if (dev->driver->load) {
-		ret = dev->driver->load(dev, 0);
-		if (ret)
-			goto err_g3;
-	}
-
-	/* setup the grouping for the legacy output */
-	ret = drm_mode_group_init_legacy_group(dev,
-					       &dev->primary->mode_group);
-	if (ret)
-		goto err_g3;
-
-	list_add_tail(&dev->driver_item, &driver->device_list);
-
-	mutex_unlock(&drm_global_mutex);
+		goto err_free;
 
 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
 		 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@
 
 	return 0;
 
-err_g3:
-	drm_put_minor(&dev->primary);
-err_g2:
-	if (dev->render)
-		drm_put_minor(&dev->render);
-err_g11:
-	drm_put_minor(&dev->control);
-err_g1:
-	kfree(dev);
-	mutex_unlock(&drm_global_mutex);
+err_free:
+	drm_dev_free(dev);
 	return ret;
 
 }
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index b5c5af7..93e95d7 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -301,7 +301,7 @@
 
 	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
-	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+	page = virt_to_page((void *)dma->pagelist[page_nr]);
 
 	get_page(page);
 	vmf->page = page;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 45b6ef5..f227f54 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@
 	tristate "DRM Support for Samsung SoC EXYNOS Series"
 	depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index bb82ef7..3a1e6d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -264,7 +264,6 @@
 	.get_vblank_counter	= drm_vblank_count,
 	.enable_vblank		= exynos_drm_crtc_enable_vblank,
 	.disable_vblank		= exynos_drm_crtc_disable_vblank,
-	.gem_init_object	= exynos_drm_gem_init_object,
 	.gem_free_object	= exynos_drm_gem_free_object,
 	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
 	.dumb_create		= exynos_drm_gem_dumb_create,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 868a14d..23da72b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -716,20 +716,20 @@
 {
 	/*
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *	just specific driver own one instead because
 	 *	drm framework supports only one irq handler.
 	 */
-	drm_dev->irq_enabled = 1;
+	drm_dev->irq_enabled = true;
 
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	drm_dev->vblank_disable_allowed = 1;
+	drm_dev->vblank_disable_allowed = true;
 
 	/* attach this sub driver to iommu mapping if supported. */
 	if (is_drm_iommu_supported(drm_dev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 49f9cd2..1ade191 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -630,11 +630,6 @@
 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
 }
 
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
-{
-	return 0;
-}
-
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 09555af..702ec3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -135,9 +135,6 @@
 						unsigned int gem_handle,
 						struct drm_file *file_priv);
 
-/* initialize gem object. */
-int exynos_drm_gem_init_object(struct drm_gem_object *obj);
-
 /* free gem object. */
 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 4400330..ddaaedd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -101,7 +101,6 @@
 {
 	struct vidi_context *ctx = get_vidi_context(dev);
 	struct edid *edid;
-	int edid_len;
 
 	/*
 	 * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@
 		return ERR_PTR(-EFAULT);
 	}
 
-	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
-	edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+	edid = drm_edid_duplicate(ctx->raw_edid);
 	if (!edid) {
 		DRM_DEBUG_KMS("failed to allocate edid\n");
 		return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@
 {
 	/*
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *	just specific driver own one instead because
 	 *	drm framework supports only one irq handler.
 	 */
-	drm_dev->irq_enabled = 1;
+	drm_dev->irq_enabled = true;
 
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	drm_dev->vblank_disable_allowed = 1;
+	drm_dev->vblank_disable_allowed = true;
 
 	return 0;
 }
@@ -485,7 +483,6 @@
 	struct exynos_drm_manager *manager;
 	struct exynos_drm_display_ops *display_ops;
 	struct drm_exynos_vidi_connection *vidi = data;
-	int edid_len;
 
 	if (!vidi) {
 		DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@
 			DRM_DEBUG_KMS("edid data is invalid.\n");
 			return -EINVAL;
 		}
-		edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
-		ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+		ctx->raw_edid = drm_edid_duplicate(raw_edid);
 		if (!ctx->raw_edid) {
 			DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
 			return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 1f6e2df..508cf99 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -5,6 +5,7 @@
 	select FB_CFB_FILLRECT
 	select FB_CFB_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
 	select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index f4eb435..f88a181 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -666,7 +666,7 @@
 	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 	intel_dp->adapter.algo_data = &intel_dp->algo;
-	intel_dp->adapter.dev.parent = &connector->base.kdev;
+	intel_dp->adapter.dev.parent = connector->base.kdev;
 
 	if (is_edp(encoder))
 		cdv_intel_edp_panel_vdd_on(encoder);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 10ae8c5..e2db48a 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -29,11 +29,6 @@
 #include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 
-int psb_gem_init_object(struct drm_gem_object *obj)
-{
-	return -EINVAL;
-}
-
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
 	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index fcb4e9f..679f953 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -359,7 +359,7 @@
 
 	drm_irq_install(dev);
 
-	dev->vblank_disable_allowed = 1;
+	dev->vblank_disable_allowed = true;
 
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
@@ -449,7 +449,7 @@
 	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
 	if (!obj) {
 		dev_dbg(dev->dev, "Invalid Connector object.\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 
 	connector = obj_to_connector(obj);
@@ -491,7 +491,7 @@
 		obj = drm_mode_object_find(dev, obj_id,
 					DRM_MODE_OBJECT_CONNECTOR);
 		if (!obj) {
-			ret = -EINVAL;
+			ret = -ENOENT;
 			goto mode_op_out;
 		}
 
@@ -646,7 +646,6 @@
 	.preclose = psb_driver_preclose,
 	.postclose = psb_driver_close,
 
-	.gem_init_object = psb_gem_init_object,
 	.gem_free_object = psb_gem_free_object,
 	.gem_vm_ops = &psb_gem_vm_ops,
 	.dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 4535ac7..0bab46b 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -44,10 +44,10 @@
 	CHIP_MFLD_0130 = 3,		/* Medfield */
 };
 
-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
 
 /*
  * Driver definitions
@@ -837,7 +837,6 @@
 extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
 
 /* gem.c */
-extern int psb_gem_init_object(struct drm_gem_object *obj);
 extern void psb_gem_free_object(struct drm_gem_object *obj);
 extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
 			struct drm_file *file);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 97f8a03..c8841ac 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -572,7 +572,7 @@
 
 	if (!drmmode_obj) {
 		dev_err(dev->dev, "no such CRTC id\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 
 	crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 029eccf..ba48303 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -271,15 +271,15 @@
 
 	if (gma_power_is_on(dev))
 		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
 
 	/* FIXME: Handle Medfield irq mask
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
 	*/
 
@@ -305,17 +305,17 @@
 	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 	else
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@
 
 	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-	if (dev->vblank_enabled[0])
+	if (dev->vblank[0].enabled)
 		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[1])
+	if (dev->vblank[1].enabled)
 		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-	if (dev->vblank_enabled[2])
+	if (dev->vblank[2].enabled)
 		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 
 	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@
 {
 	unsigned int cur_vblank;
 	int ret = 0;
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+	DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
 		    (((cur_vblank = atomic_read(counter))
 		      - *sequence) <= (1 << 23)));
 	*sequence = cur_vblank;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60e8404..400b0c4 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -17,6 +17,7 @@
 
 
 
+#include <linux/hdmi.h>
 #include <linux/module.h>
 
 #include <drm/drmP.h>
@@ -549,6 +550,8 @@
 	buf[HB(0)] = 0x82;
 	buf[HB(1)] = 0x02;
 	buf[HB(2)] = 13;
+	buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+	buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
 	buf[PB(4)] = drm_match_cea_mode(mode);
 
 	tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ab1892eb..249fdff 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -944,8 +944,6 @@
 				 dma->buflist[vertex->idx],
 				 vertex->discard, vertex->used);
 
-	atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
-	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1105,8 +1103,6 @@
 	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
 			     mc->last_render);
 
-	atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
-	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
 	sarea_priv->last_enqueue = dev_priv->counter - 1;
 	sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1197,13 +1193,6 @@
 
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
-	/* i810 has 4 more counters */
-	dev->counters += 4;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-	dev->types[9] = _DRM_STAT_DMA;
-
 	pci_set_master(dev->pdev);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644
index 0000000..6199d0b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -0,0 +1,67 @@
+config DRM_I915
+	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+	depends on DRM
+	depends on AGP
+	depends on AGP_INTEL
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
+	select SHMEM
+	select TMPFS
+	select DRM_KMS_HELPER
+	# i915 depends on ACPI_VIDEO when ACPI is enabled
+	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select BACKLIGHT_LCD_SUPPORT if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select INPUT if ACPI
+	select ACPI_VIDEO if ACPI
+	select ACPI_BUTTON if ACPI
+	help
+	  Choose this option if you have a system that has "Intel Graphics
+	  Media Accelerator" or "HD Graphics" integrated graphics,
+	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+	  If M is selected, the module will be called i915.  AGP support
+	  is required for this driver to work. This driver is used by
+	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+	  replaces the older i830 module that supported a subset of the
+	  hardware in older X.org releases.
+
+	  Note that the older i810/i815 chipsets require the use of the
+	  i810 driver instead, and the Atom z5xx series has an entirely
+	  different implementation.
+
+config DRM_I915_KMS
+	bool "Enable modesetting on intel by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you want kernel modesetting enabled by default,
+	  and you have a new enough userspace to support this. Running old
+	  userspaces with this enabled will cause pain.  Note that this causes
+	  the driver to bind to PCI devices, which precludes loading things
+	  like intelfb.
+
+config DRM_I915_FBDEV
+	bool "Enable legacy fbdev support for the modesettting intel driver"
+	depends on DRM_I915
+	select DRM_KMS_FB_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provide the linux console
+	  support on top of the intel modesetting driver.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+	bool "Enable preliminary support for prerelease Intel hardware by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you have prerelease Intel hardware and want the
+	  i915 driver to support it by default.  You can enable such support at
+	  runtime with the module option i915.preliminary_hw_support=1; this
+	  option changes the default for that module option.
+
+	  If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8449a8..41838ea 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -21,6 +21,9 @@
 	  intel_display.o \
 	  intel_crt.o \
 	  intel_lvds.o \
+	  intel_dsi.o \
+	  intel_dsi_cmd.o \
+	  intel_dsi_pll.o \
 	  intel_bios.o \
 	  intel_ddi.o \
 	  intel_dp.o \
@@ -30,7 +33,6 @@
 	  intel_panel.o \
 	  intel_pm.o \
 	  intel_i2c.o \
-	  intel_fb.o \
 	  intel_tv.o \
 	  intel_dvo.o \
 	  intel_ringbuffer.o \
@@ -51,6 +53,8 @@
 
 i915-$(CONFIG_ACPI)	+= intel_acpi.o
 
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 33a62ad..3121633 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -77,17 +77,6 @@
 			  struct drm_display_mode *mode);
 
 	/*
-	 * Callback to adjust the mode to be set in the CRTC.
-	 *
-	 * This allows an output to adjust the clock or even the entire set of
-	 * timings, which is used for panels with fixed timings or for
-	 * buses with clock limitations.
-	 */
-	bool (*mode_fixup)(struct intel_dvo_device *dvo,
-			   const struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
-
-	/*
 	 * Callback for preparing mode changes on an output
 	 */
 	void (*prepare)(struct intel_dvo_device *dvo);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a6f4cb5..4386622 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,8 @@
  */
 
 #include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -38,9 +40,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define DRM_I915_RING_DEBUG 1
-
-
 #if defined(CONFIG_DEBUG_FS)
 
 enum {
@@ -54,6 +53,32 @@
 	return v ? "yes" : "no";
 }
 
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
 static int i915_capabilities(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@
 		seq_printf(m, " (%s)", obj->ring->name);
 }
 
+static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+{
+	seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+	seq_putc(m, ' ');
+}
+
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -843,6 +875,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	if (IS_GEN5(dev)) {
 		u16 rgvswctl = I915_READ16(MEMSWCTL);
 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1355,8 @@
 		return 0;
 	}
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
@@ -1395,12 +1431,12 @@
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct intel_fbdev *ifbdev;
+	struct intel_fbdev *ifbdev = NULL;
 	struct intel_framebuffer *fb;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+#ifdef CONFIG_DRM_I915_FBDEV
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
 	if (ret)
 		return ret;
 
@@ -1416,10 +1452,11 @@
 	describe_obj(m, fb->obj);
 	seq_putc(m, '\n');
 	mutex_unlock(&dev->mode_config.mutex);
+#endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
-		if (&fb->base == ifbdev->helper.fb)
+		if (ifbdev && &fb->base == ifbdev->helper.fb)
 			continue;
 
 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1479,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
+	struct i915_hw_context *ctx;
 	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1498,15 @@
 		seq_putc(m, '\n');
 	}
 
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->default_context) {
-			seq_printf(m, "HW default context %s ring ", ring->name);
-			describe_obj(m, ring->default_context->obj);
-			seq_putc(m, '\n');
-		}
+	list_for_each_entry(ctx, &dev_priv->context_list, link) {
+		seq_puts(m, "HW context ");
+		describe_ctx(m, ctx);
+		for_each_ring(ring, dev_priv, i)
+			if (ring->default_context == ctx)
+				seq_printf(m, "(default context %s) ", ring->name);
+
+		describe_obj(m, ctx->obj);
+		seq_putc(m, '\n');
 	}
 
 	mutex_unlock(&dev->mode_config.mutex);
@@ -1610,27 +1651,27 @@
 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
 	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_DIV_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
 	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_DIV_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
 
 	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
 	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
 
 	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
 	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
 
 	seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
 	seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
+		   vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
 
 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-		   vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+		   vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1655,126 +1696,20 @@
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 psrstat, psrperf;
+	u32 psrperf = 0;
+	bool enabled = false;
 
-	if (!IS_HASWELL(dev)) {
-		seq_puts(m, "PSR not supported on this platform\n");
-	} else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
-		seq_puts(m, "PSR enabled\n");
-	} else {
-		seq_puts(m, "PSR disabled: ");
-		switch (dev_priv->no_psr_reason) {
-		case PSR_NO_SOURCE:
-			seq_puts(m, "not supported on this platform");
-			break;
-		case PSR_NO_SINK:
-			seq_puts(m, "not supported by panel");
-			break;
-		case PSR_MODULE_PARAM:
-			seq_puts(m, "disabled by flag");
-			break;
-		case PSR_CRTC_NOT_ACTIVE:
-			seq_puts(m, "crtc not active");
-			break;
-		case PSR_PWR_WELL_ENABLED:
-			seq_puts(m, "power well enabled");
-			break;
-		case PSR_NOT_TILED:
-			seq_puts(m, "not tiled");
-			break;
-		case PSR_SPRITE_ENABLED:
-			seq_puts(m, "sprite enabled");
-			break;
-		case PSR_S3D_ENABLED:
-			seq_puts(m, "stereo 3d enabled");
-			break;
-		case PSR_INTERLACED_ENABLED:
-			seq_puts(m, "interlaced enabled");
-			break;
-		case PSR_HSW_NOT_DDIA:
-			seq_puts(m, "HSW ties PSR to DDI A (eDP)");
-			break;
-		default:
-			seq_puts(m, "unknown reason");
-		}
-		seq_puts(m, "\n");
-		return 0;
-	}
+	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
 
-	psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+	enabled = HAS_PSR(dev) &&
+		I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+	seq_printf(m, "Enabled: %s\n", yesno(enabled));
 
-	seq_puts(m, "PSR Current State: ");
-	switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
-	case EDP_PSR_STATUS_STATE_IDLE:
-		seq_puts(m, "Reset state\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDONACK:
-		seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDENT:
-		seq_puts(m, "SRD entry\n");
-		break;
-	case EDP_PSR_STATUS_STATE_BUFOFF:
-		seq_puts(m, "Wait for buffer turn off\n");
-		break;
-	case EDP_PSR_STATUS_STATE_BUFON:
-		seq_puts(m, "Wait for buffer turn on\n");
-		break;
-	case EDP_PSR_STATUS_STATE_AUXACK:
-		seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
-		break;
-	case EDP_PSR_STATUS_STATE_SRDOFFACK:
-		seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_puts(m, "Link Status: ");
-	switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
-	case EDP_PSR_STATUS_LINK_FULL_OFF:
-		seq_puts(m, "Link is fully off\n");
-		break;
-	case EDP_PSR_STATUS_LINK_FULL_ON:
-		seq_puts(m, "Link is fully on\n");
-		break;
-	case EDP_PSR_STATUS_LINK_STANDBY:
-		seq_puts(m, "Link is in standby\n");
-		break;
-	default:
-		seq_puts(m, "Unknown\n");
-		break;
-	}
-
-	seq_printf(m, "PSR Entry Count: %u\n",
-		   psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
-		   EDP_PSR_STATUS_COUNT_MASK);
-
-	seq_printf(m, "Max Sleep Timer Counter: %u\n",
-		   psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
-		   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
-
-	seq_printf(m, "Had AUX error: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
-
-	seq_printf(m, "Sending AUX: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
-
-	seq_printf(m, "Sending Idle: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
-
-	seq_printf(m, "Sending TP2 TP3: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
-
-	seq_printf(m, "Sending TP1: %s\n",
-		   yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
-
-	seq_printf(m, "Idle Count: %u\n",
-		   psrstat & EDP_PSR_STATUS_IDLE_MASK);
-
-	psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
-	seq_printf(m, "Performance Counter: %u\n", psrperf);
+	if (HAS_PSR(dev))
+		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+			EDP_PSR_PERF_CNT_MASK;
+	seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
 	return 0;
 }
@@ -1825,6 +1760,751 @@
 	return 0;
 }
 
+struct pipe_crc_info {
+	const char *name;
+	struct drm_device *dev;
+	enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+	spin_lock_irq(&pipe_crc->lock);
+
+	if (pipe_crc->opened) {
+		spin_unlock_irq(&pipe_crc->lock);
+		return -EBUSY; /* already open */
+	}
+
+	pipe_crc->opened = true;
+	filep->private_data = inode->i_private;
+
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+	struct pipe_crc_info *info = inode->i_private;
+	struct drm_i915_private *dev_priv = info->dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->opened = false;
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+	assert_spin_locked(&pipe_crc->lock);
+	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+			INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+		   loff_t *pos)
+{
+	struct pipe_crc_info *info = filep->private_data;
+	struct drm_device *dev = info->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+	char buf[PIPE_CRC_BUFFER_LEN];
+	int head, tail, n_entries, n;
+	ssize_t bytes_read;
+
+	/*
+	 * Don't allow user space to provide buffers not big enough to hold
+	 * a line of data.
+	 */
+	if (count < PIPE_CRC_LINE_LEN)
+		return -EINVAL;
+
+	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+		return 0;
+
+	/* nothing to read */
+	spin_lock_irq(&pipe_crc->lock);
+	while (pipe_crc_data_count(pipe_crc) == 0) {
+		int ret;
+
+		if (filep->f_flags & O_NONBLOCK) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return -EAGAIN;
+		}
+
+		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+		if (ret) {
+			spin_unlock_irq(&pipe_crc->lock);
+			return ret;
+		}
+	}
+
+	/* We now have one or more entries to read */
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+	n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+			count / PIPE_CRC_LINE_LEN);
+	spin_unlock_irq(&pipe_crc->lock);
+
+	bytes_read = 0;
+	n = 0;
+	do {
+		struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+		int ret;
+
+		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+				       "%8u %8x %8x %8x %8x %8x\n",
+				       entry->frame, entry->crc[0],
+				       entry->crc[1], entry->crc[2],
+				       entry->crc[3], entry->crc[4]);
+
+		ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+				   buf, PIPE_CRC_LINE_LEN);
+		if (ret == PIPE_CRC_LINE_LEN)
+			return -EFAULT;
+
+		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+		tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+		n++;
+	} while (--n_entries);
+
+	spin_lock_irq(&pipe_crc->lock);
+	pipe_crc->tail = tail;
+	spin_unlock_irq(&pipe_crc->lock);
+
+	return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_pipe_crc_open,
+	.read = i915_pipe_crc_read,
+	.release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+	{
+		.name = "i915_pipe_A_crc",
+		.pipe = PIPE_A,
+	},
+	{
+		.name = "i915_pipe_B_crc",
+		.pipe = PIPE_B,
+	},
+	{
+		.name = "i915_pipe_C_crc",
+		.pipe = PIPE_C,
+	},
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+				enum pipe pipe)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+	info->dev = dev;
+	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+				  &i915_pipe_crc_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+	"none",
+	"plane1",
+	"plane2",
+	"pf",
+	"pipe",
+	"TV",
+	"DP-B",
+	"DP-C",
+	"DP-D",
+	"auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+	return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+	struct drm_device *dev = m->private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		seq_printf(m, "%c %s\n", pipe_name(i),
+			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+	return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+
+	return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
+				     enum intel_pipe_crc_source *source)
+{
+	struct intel_encoder *encoder;
+	struct intel_crtc *crtc;
+	struct intel_digital_port *dig_port;
+	int ret = 0;
+
+	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (!encoder->base.crtc)
+			continue;
+
+		crtc = to_intel_crtc(encoder->base.crtc);
+
+		if (crtc->pipe != pipe)
+			continue;
+
+		switch (encoder->type) {
+		case INTEL_OUTPUT_TVOUT:
+			*source = INTEL_PIPE_CRC_SOURCE_TV;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+		case INTEL_OUTPUT_EDP:
+			dig_port = enc_to_dig_port(&encoder->base);
+			switch (dig_port->port) {
+			case PORT_B:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
+				break;
+			case PORT_C:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
+				break;
+			case PORT_D:
+				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
+				break;
+			default:
+				WARN(1, "nonexisting DP port %c\n",
+				     port_name(dig_port->port));
+				break;
+			}
+			break;
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
+				enum pipe pipe,
+				enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		tmp |= DC_BALANCE_RESET_VLV;
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+				 enum pipe pipe,
+				 enum intel_pipe_crc_source *source,
+				 uint32_t *val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool need_stable_symbols = false;
+
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
+		if (ret)
+			return ret;
+	}
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_TV:
+		if (!SUPPORTS_TV(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_B:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_C:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_DP_D:
+		if (!IS_G4X(dev))
+			return -EINVAL;
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+		need_stable_symbols = true;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * When the pipe CRC tap point is after the transcoders we need
+	 * to tweak symbol-level features to produce a deterministic series of
+	 * symbols for a given frame. We need to reset those features only once
+	 * a frame (instead of every nth symbol):
+	 *   - DC-balance: used to ensure a better clock recovery from the data
+	 *     link (SDVO)
+	 *   - DisplayPort scrambling: used for EMI reduction
+	 */
+	if (need_stable_symbols) {
+		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+		WARN_ON(!IS_G4X(dev));
+
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+		if (pipe == PIPE_A)
+			tmp |= PIPE_A_SCRAMBLE_RESET;
+		else
+			tmp |= PIPE_B_SCRAMBLE_RESET;
+
+		I915_WRITE(PORT_DFT2_G4X, tmp);
+	}
+
+	return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+		tmp &= ~DC_BALANCE_RESET_VLV;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
+					 enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+	if (pipe == PIPE_A)
+		tmp &= ~PIPE_A_SCRAMBLE_RESET;
+	else
+		tmp &= ~PIPE_B_SCRAMBLE_RESET;
+	I915_WRITE(PORT_DFT2_G4X, tmp);
+
+	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+		I915_WRITE(PORT_DFT_I9XX,
+			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+	}
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PIPE:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+				uint32_t *val)
+{
+	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+		*source = INTEL_PIPE_CRC_SOURCE_PF;
+
+	switch (*source) {
+	case INTEL_PIPE_CRC_SOURCE_PLANE1:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PLANE2:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_PF:
+		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+		break;
+	case INTEL_PIPE_CRC_SOURCE_NONE:
+		*val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+			       enum intel_pipe_crc_source source)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	u32 val;
+	int ret;
+
+	if (pipe_crc->source == source)
+		return 0;
+
+	/* forbid changing the source without going back to 'none' */
+	if (pipe_crc->source && source)
+		return -EINVAL;
+
+	if (IS_GEN2(dev))
+		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
+	else if (INTEL_INFO(dev)->gen < 5)
+		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
+	else if (IS_VALLEYVIEW(dev))
+		ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+	else if (IS_GEN5(dev) || IS_GEN6(dev))
+		ret = ilk_pipe_crc_ctl_reg(&source, &val);
+	else
+		ret = ivb_pipe_crc_ctl_reg(&source, &val);
+
+	if (ret != 0)
+		return ret;
+
+	/* none -> real source transition */
+	if (source) {
+		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+				 pipe_name(pipe), pipe_crc_source_name(source));
+
+		pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+					    INTEL_PIPE_CRC_ENTRIES_NR,
+					    GFP_KERNEL);
+		if (!pipe_crc->entries)
+			return -ENOMEM;
+
+		spin_lock_irq(&pipe_crc->lock);
+		pipe_crc->head = 0;
+		pipe_crc->tail = 0;
+		spin_unlock_irq(&pipe_crc->lock);
+	}
+
+	pipe_crc->source = source;
+
+	I915_WRITE(PIPE_CRC_CTL(pipe), val);
+	POSTING_READ(PIPE_CRC_CTL(pipe));
+
+	/* real source -> none transition */
+	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+		struct intel_pipe_crc_entry *entries;
+
+		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+				 pipe_name(pipe));
+
+		intel_wait_for_vblank(dev, pipe);
+
+		spin_lock_irq(&pipe_crc->lock);
+		entries = pipe_crc->entries;
+		pipe_crc->entries = NULL;
+		spin_unlock_irq(&pipe_crc->lock);
+
+		kfree(entries);
+
+		if (IS_G4X(dev))
+			g4x_undo_pipe_scramble_reset(dev, pipe);
+		else if (IS_VALLEYVIEW(dev))
+			vlv_undo_pipe_scramble_reset(dev, pipe);
+	}
+
+	return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ *   command: wsp* object wsp+ name wsp+ source wsp*
+ *   object: 'pipe'
+ *   name: (A | B | C)
+ *   source: (none | plane1 | plane2 | pf)
+ *   wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
+ *  "pipe A none"    ->  Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+	int n_words = 0;
+
+	while (*buf) {
+		char *end;
+
+		/* skip leading white space */
+		buf = skip_spaces(buf);
+		if (!*buf)
+			break;	/* end of buffer */
+
+		/* find end of word */
+		for (end = buf; *end && !isspace(*end); end++)
+			;
+
+		if (n_words == max_words) {
+			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+					 max_words);
+			return -EINVAL;	/* ran out of words[] before bytes */
+		}
+
+		if (*end)
+			*end++ = '\0';
+		words[n_words++] = buf;
+		buf = end;
+	}
+
+	return n_words;
+}
+
+enum intel_pipe_crc_object {
+	PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+	"pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+		if (!strcmp(buf, pipe_crc_objects[i])) {
+			*o = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+	const char name = buf[0];
+
+	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+		return -EINVAL;
+
+	*pipe = name - 'A';
+
+	return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+		if (!strcmp(buf, pipe_crc_sources[i])) {
+			*s = i;
+			return 0;
+		    }
+
+	return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+	int n_words;
+	char *words[N_WORDS];
+	enum pipe pipe;
+	enum intel_pipe_crc_object object;
+	enum intel_pipe_crc_source source;
+
+	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+	if (n_words != N_WORDS) {
+		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+				 N_WORDS);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+		return -EINVAL;
+	}
+
+	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+		return -EINVAL;
+	}
+
+	return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+				     size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_device *dev = m->private;
+	char *tmpbuf;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	if (len > PAGE_SIZE - 1) {
+		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+				 PAGE_SIZE);
+		return -E2BIG;
+	}
+
+	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(tmpbuf, ubuf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+	tmpbuf[len] = '\0';
+
+	ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+	kfree(tmpbuf);
+	if (ret < 0)
+		return ret;
+
+	*offp += len;
+	return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+	.owner = THIS_MODULE,
+	.open = display_crc_ctl_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = display_crc_ctl_write
+};
+
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -1885,6 +2565,72 @@
 			i915_ring_stop_get, i915_ring_stop_set,
 			"0x%08llx\n");
 
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.missed_irq_rings;
+	return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	dev_priv->gpu_error.missed_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+			"0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.test_irq_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+	/* Lock against concurrent debugfs callers */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.test_irq_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+			i915_ring_test_irq_get, i915_ring_test_irq_set,
+			"0x%08llx\n");
+
 #define DROP_UNBOUND 0x1
 #define DROP_BOUND 0x2
 #define DROP_RETIRE 0x4
@@ -1972,6 +2718,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
@@ -1996,6 +2744,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2784,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
 	if (ret)
 		return ret;
@@ -2058,6 +2810,8 @@
 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
 		return -ENODEV;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2890,6 @@
 			i915_cache_sharing_get, i915_cache_sharing_set,
 			"%llu\n");
 
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
-static int
-drm_add_fake_info_node(struct drm_minor *minor,
-		       struct dentry *ent,
-		       const void *key)
-{
-	struct drm_info_node *node;
-
-	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-	if (node == NULL) {
-		debugfs_remove(ent);
-		return -ENOMEM;
-	}
-
-	node->minor = minor;
-	node->dent = ent;
-	node->info_ent = (void *) key;
-
-	mutex_lock(&minor->debugfs_lock);
-	list_add(&node->list, &minor->debugfs_list);
-	mutex_unlock(&minor->debugfs_lock);
-
-	return 0;
-}
-
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
 	struct drm_device *dev = inode->i_private;
@@ -2227,7 +2955,7 @@
 	return drm_add_fake_info_node(minor, ent, fops);
 }
 
-static struct drm_info_list i915_debugfs_list[] = {
+static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
@@ -2269,7 +2997,7 @@
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
-static struct i915_debugfs_files {
+static const struct i915_debugfs_files {
 	const char *name;
 	const struct file_operations *fops;
 } i915_debugfs_files[] = {
@@ -2278,11 +3006,28 @@
 	{"i915_min_freq", &i915_min_freq_fops},
 	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_ring_stop", &i915_ring_stop_fops},
+	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
 	{"i915_error_state", &i915_error_state_fops},
 	{"i915_next_seqno", &i915_next_seqno_fops},
+	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 };
 
+void intel_display_crc_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+
+		pipe_crc->opened = false;
+		spin_lock_init(&pipe_crc->lock);
+		init_waitqueue_head(&pipe_crc->wq);
+	}
+}
+
 int i915_debugfs_init(struct drm_minor *minor)
 {
 	int ret, i;
@@ -2291,6 +3036,12 @@
 	if (ret)
 		return ret;
 
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		ret = i915_debugfs_create(minor->debugfs_root, minor,
 					  i915_debugfs_files[i].name,
@@ -2310,8 +3061,17 @@
 
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
+
 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
 				 1, minor);
+
+	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+		struct drm_info_list *info_list =
+			(struct drm_info_list *)&i915_pipe_crc_data[i];
+
+		drm_debugfs_remove_files(info_list, 1, minor);
+	}
+
 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
 		struct drm_info_list *info_list =
 			(struct drm_info_list *) i915_debugfs_files[i].fops;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d5c784d..0cab2d0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -52,7 +52,7 @@
 	intel_ring_emit(LP_RING(dev_priv), x)
 
 #define ADVANCE_LP_RING() \
-	intel_ring_advance(LP_RING(dev_priv))
+	__intel_ring_advance(LP_RING(dev_priv))
 
 /**
  * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@
 
 	if (batch->num_cliprects) {
 		cliprects = kcalloc(batch->num_cliprects,
-				    sizeof(struct drm_clip_rect),
+				    sizeof(*cliprects),
 				    GFP_KERNEL);
 		if (cliprects == NULL)
 			return -ENOMEM;
@@ -703,7 +703,7 @@
 
 	if (cmdbuf->num_cliprects) {
 		cliprects = kcalloc(cmdbuf->num_cliprects,
-				    sizeof(struct drm_clip_rect), GFP_KERNEL);
+				    sizeof(*cliprects), GFP_KERNEL);
 		if (cliprects == NULL) {
 			ret = -ENOMEM;
 			goto fail_batch_free;
@@ -931,7 +931,7 @@
 		value = READ_BREADCRUMB(dev_priv);
 		break;
 	case I915_PARAM_CHIPSET_ID:
-		value = dev->pci_device;
+		value = dev->pdev->device;
 		break;
 	case I915_PARAM_HAS_GEM:
 		value = 1;
@@ -1311,13 +1311,15 @@
 	if (ret)
 		goto cleanup_gem_stolen;
 
+	intel_power_domains_init_hw(dev);
+
 	/* Important: The output setup functions called by modeset_init need
 	 * working irqs for e.g. gmbus and dp aux transfers. */
 	intel_modeset_init(dev);
 
 	ret = i915_gem_init(dev);
 	if (ret)
-		goto cleanup_irq;
+		goto cleanup_power;
 
 	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
 
@@ -1325,9 +1327,11 @@
 
 	/* Always safe in the mode setting case. */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
-	dev->vblank_disable_allowed = 1;
-	if (INTEL_INFO(dev)->num_pipes == 0)
+	dev->vblank_disable_allowed = true;
+	if (INTEL_INFO(dev)->num_pipes == 0) {
+		intel_display_power_put(dev, POWER_DOMAIN_VGA);
 		return 0;
+	}
 
 	ret = intel_fbdev_init(dev);
 	if (ret)
@@ -1362,7 +1366,8 @@
 	mutex_unlock(&dev->struct_mutex);
 	i915_gem_cleanup_aliasing_ppgtt(dev);
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_irq:
+cleanup_power:
+	intel_display_power_put(dev, POWER_DOMAIN_VGA);
 	drm_irq_uninstall(dev);
 cleanup_gem_stolen:
 	i915_gem_cleanup_stolen(dev);
@@ -1398,6 +1403,7 @@
 	master->driver_priv = NULL;
 }
 
+#ifdef CONFIG_DRM_I915_FBDEV
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
 	struct apertures_struct *ap;
@@ -1418,6 +1424,11 @@
 
 	kfree(ap);
 }
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
 
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
@@ -1459,17 +1470,13 @@
 	info = (struct intel_device_info *) flags;
 
 	/* Refuse to load on gen6+ without kms enabled. */
-	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
 		return -ENODEV;
+	}
 
-	/* i915 has 4 more counters */
-	dev->counters += 4;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-	dev->types[9] = _DRM_STAT_DMA;
-
-	dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (dev_priv == NULL)
 		return -ENOMEM;
 
@@ -1494,6 +1501,8 @@
 	dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
 	INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
 
+	intel_display_crc_init(dev);
+
 	i915_dump_device_info(dev_priv);
 
 	/* Not all pre-production machines fall into this category, only the
@@ -1531,19 +1540,14 @@
 
 	intel_uncore_early_sanitize(dev);
 
-	if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
-		/* The docs do not explain exactly how the calculation can be
-		 * made. It is somewhat guessable, but for now, it's always
-		 * 128MB.
-		 * NB: We can't write IDICR yet because we do not have gt funcs
-		 * set up */
-		dev_priv->ellc_size = 128;
-		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
-	}
+	/* This must be called before any calls to HAS_PCH_* */
+	intel_detect_pch(dev);
+
+	intel_uncore_init(dev);
 
 	ret = i915_gem_gtt_init(dev);
 	if (ret)
-		goto put_bridge;
+		goto out_regs;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kick_out_firmware_fb(dev_priv);
@@ -1572,7 +1576,7 @@
 				     aperture_size);
 	if (dev_priv->gtt.mappable == NULL) {
 		ret = -EIO;
-		goto out_rmmap;
+		goto out_gtt;
 	}
 
 	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1598,13 +1602,9 @@
 		goto out_mtrrfree;
 	}
 
-	/* This must be called before any calls to HAS_PCH_* */
-	intel_detect_pch(dev);
-
 	intel_irq_init(dev);
 	intel_pm_init(dev);
 	intel_uncore_sanitize(dev);
-	intel_uncore_init(dev);
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	intel_setup_mchbar(dev);
@@ -1640,13 +1640,13 @@
 	}
 
 	if (HAS_POWER_WELL(dev))
-		i915_init_power_well(dev);
+		intel_power_domains_init(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		ret = i915_load_modeset_init(dev);
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
-			goto out_gem_unload;
+			goto out_power_well;
 		}
 	} else {
 		/* Start out suspended in ums mode. */
@@ -1666,6 +1666,10 @@
 
 	return 0;
 
+out_power_well:
+	if (HAS_POWER_WELL(dev))
+		intel_power_domains_remove(dev);
+	drm_vblank_cleanup(dev);
 out_gem_unload:
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1679,12 +1683,18 @@
 out_mtrrfree:
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
 	io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+	list_del(&dev_priv->gtt.base.global_link);
+	drm_mm_takedown(&dev_priv->gtt.base.mm);
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
-out_rmmap:
+out_regs:
+	intel_uncore_fini(dev);
 	pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 free_priv:
+	if (dev_priv->slab)
+		kmem_cache_destroy(dev_priv->slab);
 	kfree(dev_priv);
 	return ret;
 }
@@ -1700,8 +1710,8 @@
 		/* The i915.ko module is still not prepared to be loaded when
 		 * the power well is not enabled, so just enable it in case
 		 * we're going to unload/reload. */
-		intel_set_power_well(dev, true);
-		i915_remove_power_well(dev);
+		intel_display_set_init_power(dev, true);
+		intel_power_domains_remove(dev);
 	}
 
 	i915_teardown_sysfs(dev);
@@ -1709,15 +1719,9 @@
 	if (dev_priv->mm.inactive_shrinker.scan_objects)
 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
-	mutex_lock(&dev->struct_mutex);
-	ret = i915_gpu_idle(dev);
+	ret = i915_gem_suspend(dev);
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	i915_gem_retire_requests(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	/* Cancel the retire work handler, which should be idle now. */
-	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
 	io_mapping_free(dev_priv->gtt.mappable);
 	arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1774,8 +1778,8 @@
 	list_del(&dev_priv->gtt.base.global_link);
 	WARN_ON(!list_empty(&dev_priv->vm_list));
 	drm_mm_takedown(&dev_priv->gtt.base.mm);
-	if (dev_priv->regs != NULL)
-		pci_iounmap(dev->pdev, dev_priv->regs);
+
+	drm_vblank_cleanup(dev);
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
@@ -1785,6 +1789,10 @@
 
 	dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
+	intel_uncore_fini(dev);
+	if (dev_priv->regs != NULL)
+		pci_iounmap(dev->pdev, dev_priv->regs);
+
 	if (dev_priv->slab)
 		kmem_cache_destroy(dev_priv->slab);
 
@@ -1796,19 +1804,11 @@
 
 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
-	struct drm_i915_file_private *file_priv;
+	int ret;
 
-	DRM_DEBUG_DRIVER("\n");
-	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
-	if (!file_priv)
-		return -ENOMEM;
-
-	file->driver_priv = file_priv;
-
-	spin_lock_init(&file_priv->mm.lock);
-	INIT_LIST_HEAD(&file_priv->mm.request_list);
-
-	idr_init(&file_priv->context_idr);
+	ret = i915_gem_open(dev, file);
+	if (ret)
+		return ret;
 
 	return 0;
 }
@@ -1836,7 +1836,7 @@
 		return;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		intel_fb_restore_mode(dev);
+		intel_fbdev_restore_mode(dev);
 		vga_switcheroo_process_delayed_switch();
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2ad2788..a0804fa 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -160,49 +160,58 @@
 static const struct intel_device_info intel_i830_info = {
 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_845g_info = {
 	.gen = 2, .num_pipes = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i85x_info = {
 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i865g_info = {
 	.gen = 2, .num_pipes = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i915g_info = {
 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i915gm_info = {
 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
 	.cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945g_info = {
 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
 	.has_overlay = 1, .overlay_needs_physical = 1,
+	.ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945gm_info = {
 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_overlay = 1, .overlay_needs_physical = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i965g_info = {
 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
 	.has_hotplug = 1,
 	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@
 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_g33_info = {
 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_overlay = 1,
+	.ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_g45_info = {
 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.supports_tv = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@
 static const struct intel_device_info intel_ironlake_d_info = {
 	.gen = 5, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 1,
-	.has_bsd_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
 	.gen = 6, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
-	.has_bsd_ring = 1,
-	.has_blt_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
-	.has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 1,
-	.has_bsd_ring = 1,
-	.has_blt_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
 	.has_llc = 1,
-	.has_force_wake = 1,
 };
 
 #define GEN7_FEATURES  \
 	.gen = 7, .num_pipes = 3, \
 	.need_gfx_hws = 1, .has_hotplug = 1, \
-	.has_bsd_ring = 1, \
-	.has_blt_ring = 1, \
-	.has_llc = 1, \
-	.has_force_wake = 1
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+	.has_llc = 1
 
 static const struct intel_device_info intel_ivybridge_d_info = {
 	GEN7_FEATURES,
@@ -318,7 +323,7 @@
 	.is_haswell = 1,
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
-	.has_vebox_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,7 @@
 	.has_ddi = 1,
 	.has_fpga_dbg = 1,
 	.has_fbc = 1,
-	.has_vebox_ring = 1,
+	.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 /*
@@ -416,7 +421,7 @@
 			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
 				/* PantherPoint is CPT compatible */
 				dev_priv->pch_type = PCH_CPT;
-				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+				DRM_DEBUG_KMS("Found PantherPoint PCH\n");
 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
 			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 				dev_priv->pch_type = PCH_LPT;
@@ -472,7 +477,7 @@
 	/* We do a lot of poking in a lot of registers, make sure they work
 	 * properly. */
 	hsw_disable_package_c8(dev_priv);
-	intel_set_power_well(dev, true);
+	intel_display_set_init_power(dev, true);
 
 	drm_kms_helper_poll_disable(dev);
 
@@ -482,9 +487,7 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		int error;
 
-		mutex_lock(&dev->struct_mutex);
-		error = i915_gem_idle(dev);
-		mutex_unlock(&dev->struct_mutex);
+		error = i915_gem_suspend(dev);
 		if (error) {
 			dev_err(&dev->pdev->dev,
 				"GEM idle failed, resume might fail\n");
@@ -578,11 +581,24 @@
 	drm_helper_hpd_irq_event(dev);
 }
 
-static int __i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
+	intel_uncore_early_sanitize(dev);
+
+	intel_uncore_sanitize(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+	    restore_gtt_mappings) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	intel_power_domains_init_hw(dev);
+
 	i915_restore_state(dev);
 	intel_opregion_setup(dev);
 
@@ -642,20 +658,10 @@
 
 static int i915_drm_thaw(struct drm_device *dev)
 {
-	int error = 0;
-
-	intel_uncore_sanitize(dev);
-
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	} else if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_check_and_clear_faults(dev);
 
-	__i915_drm_thaw(dev);
-
-	return error;
+	return __i915_drm_thaw(dev, true);
 }
 
 int i915_resume(struct drm_device *dev)
@@ -671,20 +677,12 @@
 
 	pci_set_master(dev->pdev);
 
-	intel_uncore_sanitize(dev);
-
 	/*
 	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
-	 * earlier) need this since the BIOS might clear all our scratch PTEs.
+	 * earlier) need to restore the GTT mappings since the BIOS might clear
+	 * all our scratch PTEs.
 	 */
-	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
-	    !dev_priv->opregion.header) {
-		mutex_lock(&dev->struct_mutex);
-		i915_gem_restore_gtt_mappings(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	ret = __i915_drm_thaw(dev);
+	ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
 	if (ret)
 		return ret;
 
@@ -722,24 +720,19 @@
 
 	simulated = dev_priv->gpu_error.stop_rings != 0;
 
-	if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
-		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-		ret = -ENODEV;
-	} else {
-		ret = intel_gpu_reset(dev);
+	ret = intel_gpu_reset(dev);
 
-		/* Also reset the gpu hangman. */
-		if (simulated) {
-			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-			dev_priv->gpu_error.stop_rings = 0;
-			if (ret == -ENODEV) {
-				DRM_ERROR("Reset not implemented, but ignoring "
-					  "error for simulated gpu hangs\n");
-				ret = 0;
-			}
-		} else
-			dev_priv->gpu_error.last_reset = get_seconds();
+	/* Also reset the gpu hangman. */
+	if (simulated) {
+		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->gpu_error.stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_ERROR("Reset not implemented, but ignoring "
+				  "error for simulated gpu hangs\n");
+			ret = 0;
+		}
 	}
+
 	if (ret) {
 		DRM_ERROR("Failed to reset chip.\n");
 		mutex_unlock(&dev->struct_mutex);
@@ -762,30 +755,17 @@
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->ums.mm_suspended) {
-		struct intel_ring_buffer *ring;
-		int i;
-
+		bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
 		dev_priv->ums.mm_suspended = 0;
 
-		i915_gem_init_swizzling(dev);
-
-		for_each_ring(ring, dev_priv, i)
-			ring->init(ring);
-
-		i915_gem_context_init(dev);
-		if (dev_priv->mm.aliasing_ppgtt) {
-			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-			if (ret)
-				i915_gem_cleanup_aliasing_ppgtt(dev);
-		}
-
-		/*
-		 * It would make sense to re-init all the other hw state, at
-		 * least the rps/rc6/emon init done within modeset_init_hw. For
-		 * some unknown reason, this blows up my ilk, so don't.
-		 */
-
+		ret = i915_gem_init_hw(dev);
+		if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
+			DRM_ERROR("HW contexts didn't survive reset\n");
 		mutex_unlock(&dev->struct_mutex);
+		if (ret) {
+			DRM_ERROR("Failed hw init on reset %d\n", ret);
+			return ret;
+		}
 
 		drm_irq_uninstall(dev);
 		drm_irq_install(dev);
@@ -802,6 +782,12 @@
 	struct intel_device_info *intel_info =
 		(struct intel_device_info *) ent->driver_data;
 
+	if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+		DRM_INFO("This hardware requires preliminary hardware support.\n"
+			 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+		return -ENODEV;
+	}
+
 	/* Only bind to function 0 of the device. Early generations
 	 * used function 1 as a placeholder for multi-head. This causes
 	 * us confusion instead, especially on the systems where both
@@ -949,7 +935,6 @@
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_cleanup = i915_debugfs_cleanup,
 #endif
-	.gem_init_object = i915_gem_init_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ab0f2c0..b0dd4ea 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
 #define DRIVER_DATE		"20080730"
 
 enum pipe {
+	INVALID_PIPE = -1,
 	PIPE_A = 0,
 	PIPE_B,
 	PIPE_C,
@@ -98,13 +99,25 @@
 	POWER_DOMAIN_TRANSCODER_A,
 	POWER_DOMAIN_TRANSCODER_B,
 	POWER_DOMAIN_TRANSCODER_C,
-	POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+	POWER_DOMAIN_TRANSCODER_EDP,
+	POWER_DOMAIN_VGA,
+	POWER_DOMAIN_INIT,
+
+	POWER_DOMAIN_NUM,
 };
 
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
 		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+	 (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (		\
+	BIT(POWER_DOMAIN_PIPE_A) |		\
+	BIT(POWER_DOMAIN_TRANSCODER_EDP))
 
 enum hpd_pin {
 	HPD_NONE = 0,
@@ -225,9 +238,12 @@
 	struct opregion_header __iomem *header;
 	struct opregion_acpi __iomem *acpi;
 	struct opregion_swsci __iomem *swsci;
+	u32 swsci_gbda_sub_functions;
+	u32 swsci_sbcb_sub_functions;
 	struct opregion_asle __iomem *asle;
 	void __iomem *vbt;
 	u32 __iomem *lid_state;
+	struct work_struct asle_work;
 };
 #define OPREGION_SIZE            (8*1024)
 
@@ -285,6 +301,7 @@
 	u32 cpu_ring_tail[I915_NUM_RINGS];
 	u32 error; /* gen6+ */
 	u32 err_int; /* gen7 */
+	u32 bbstate[I915_NUM_RINGS];
 	u32 instpm[I915_NUM_RINGS];
 	u32 instps[I915_NUM_RINGS];
 	u32 extra_instdone[I915_NUM_INSTDONE_REG];
@@ -321,11 +338,13 @@
 		u32 dirty:1;
 		u32 purgeable:1;
 		s32 ring:4;
-		u32 cache_level:2;
+		u32 cache_level:3;
 	} **active_bo, **pinned_bo;
 	u32 *active_bo_count, *pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
 	struct intel_display_error_state *display;
+	int hangcheck_score[I915_NUM_RINGS];
+	enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 
 struct intel_crtc_config;
@@ -357,7 +376,7 @@
 			  int target, int refclk,
 			  struct dpll *match_clock,
 			  struct dpll *best_clock);
-	void (*update_wm)(struct drm_device *dev);
+	void (*update_wm)(struct drm_crtc *crtc);
 	void (*update_sprite_wm)(struct drm_plane *plane,
 				 struct drm_crtc *crtc,
 				 uint32_t sprite_width, int pixel_size,
@@ -367,7 +386,6 @@
 	 * fills out the pipe-config with the hw state. */
 	bool (*get_pipe_config)(struct intel_crtc *,
 				struct intel_crtc_config *);
-	void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
 	int (*crtc_mode_set)(struct drm_crtc *crtc,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
@@ -375,7 +393,8 @@
 	void (*crtc_disable)(struct drm_crtc *crtc);
 	void (*off)(struct drm_crtc *crtc);
 	void (*write_eld)(struct drm_connector *connector,
-			  struct drm_crtc *crtc);
+			  struct drm_crtc *crtc,
+			  struct drm_display_mode *mode);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
 	void (*init_clock_gating)(struct drm_device *dev);
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +414,20 @@
 struct intel_uncore_funcs {
 	void (*force_wake_get)(struct drm_i915_private *dev_priv);
 	void (*force_wake_put)(struct drm_i915_private *dev_priv);
+
+	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+				uint8_t val, bool trace);
+	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+				uint16_t val, bool trace);
+	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+				uint32_t val, bool trace);
+	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+				uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -404,6 +437,8 @@
 
 	unsigned fifo_count;
 	unsigned forcewake_count;
+
+	struct delayed_work force_wake_work;
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +455,7 @@
 	func(is_ivybridge) sep \
 	func(is_valleyview) sep \
 	func(is_haswell) sep \
-	func(has_force_wake) sep \
+	func(is_preliminary) sep \
 	func(has_fbc) sep \
 	func(has_pipe_cxsr) sep \
 	func(has_hotplug) sep \
@@ -428,9 +463,6 @@
 	func(has_overlay) sep \
 	func(overlay_needs_physical) sep \
 	func(supports_tv) sep \
-	func(has_bsd_ring) sep \
-	func(has_blt_ring) sep \
-	func(has_vebox_ring) sep \
 	func(has_llc) sep \
 	func(has_ddi) sep \
 	func(has_fpga_dbg)
@@ -442,6 +474,7 @@
 	u32 display_mmio_offset;
 	u8 num_pipes:3;
 	u8 gen;
+	u8 ring_mask; /* Rings supported by the HW */
 	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 };
 
@@ -570,6 +603,13 @@
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
 };
 
 struct i915_ctx_hang_stats {
@@ -578,6 +618,12 @@
 
 	/* This context had batch active when hang was declared */
 	unsigned batch_active;
+
+	/* Time when this context was last blamed for a GPU reset */
+	unsigned long guilty_ts;
+
+	/* This context is banned to submit more work */
+	bool banned;
 };
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -586,10 +632,13 @@
 	struct kref ref;
 	int id;
 	bool is_initialized;
+	uint8_t remap_slice;
 	struct drm_i915_file_private *file_priv;
 	struct intel_ring_buffer *ring;
 	struct drm_i915_gem_object *obj;
 	struct i915_ctx_hang_stats hang_stats;
+
+	struct list_head link;
 };
 
 struct i915_fbc {
@@ -623,17 +672,9 @@
 	} no_fbc_reason;
 };
 
-enum no_psr_reason {
-	PSR_NO_SOURCE, /* Not supported on platform */
-	PSR_NO_SINK, /* Not supported by panel */
-	PSR_MODULE_PARAM,
-	PSR_CRTC_NOT_ACTIVE,
-	PSR_PWR_WELL_ENABLED,
-	PSR_NOT_TILED,
-	PSR_SPRITE_ENABLED,
-	PSR_S3D_ENABLED,
-	PSR_INTERLACED_ENABLED,
-	PSR_HSW_NOT_DDIA,
+struct i915_psr {
+	bool sink_support;
+	bool source_ok;
 };
 
 enum intel_pch {
@@ -704,6 +745,9 @@
 	u32 saveBLC_HIST_CTL;
 	u32 saveBLC_PWM_CTL;
 	u32 saveBLC_PWM_CTL2;
+	u32 saveBLC_HIST_CTL_B;
+	u32 saveBLC_PWM_CTL_B;
+	u32 saveBLC_PWM_CTL2_B;
 	u32 saveBLC_CPU_PWM_CTL;
 	u32 saveBLC_CPU_PWM_CTL2;
 	u32 saveFPB0;
@@ -823,17 +867,20 @@
 	struct work_struct work;
 	u32 pm_iir;
 
-	/* On vlv we need to manually drop to Vmin with a delayed work. */
-	struct delayed_work vlv_work;
-
 	/* The below variables an all the rps hw state are protected by
 	 * dev->struct mutext. */
 	u8 cur_delay;
 	u8 min_delay;
 	u8 max_delay;
 	u8 rpe_delay;
+	u8 rp1_delay;
+	u8 rp0_delay;
 	u8 hw_max;
 
+	int last_adj;
+	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+	bool enabled;
 	struct delayed_work delayed_resume_work;
 
 	/*
@@ -870,11 +917,21 @@
 
 /* Power well structure for haswell */
 struct i915_power_well {
-	struct drm_device *device;
-	spinlock_t lock;
 	/* power well enable/disable usage count */
 	int count;
-	int i915_request;
+};
+
+#define I915_MAX_POWER_WELLS 1
+
+struct i915_power_domains {
+	/*
+	 * Power wells needed for initialization at driver init and suspend
+	 * time are on. They are kept on until after the first modeset.
+	 */
+	bool init_power_on;
+
+	struct mutex lock;
+	struct i915_power_well power_wells[I915_MAX_POWER_WELLS];
 };
 
 struct i915_dri1_state {
@@ -902,9 +959,11 @@
 	int mm_suspended;
 };
 
+#define MAX_L3_SLICES 2
 struct intel_l3_parity {
-	u32 *remap_info;
+	u32 *remap_info[MAX_L3_SLICES];
 	struct work_struct error_work;
+	int which_slice;
 };
 
 struct i915_gem_mm {
@@ -942,6 +1001,15 @@
 	struct delayed_work retire_work;
 
 	/**
+	 * When we detect an idle GPU, we want to turn on
+	 * powersaving features. So once we see that there
+	 * are no more requests outstanding and no more
+	 * arrive within a small period of time, we fire
+	 * off the idle_work.
+	 */
+	struct delayed_work idle_work;
+
+	/**
 	 * Are we in a non-interruptible section of code like
 	 * modesetting?
 	 */
@@ -979,6 +1047,9 @@
 	/* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+	/* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
 	struct timer_list hangcheck_timer;
 
 	/* For reset and error_state handling. */
@@ -987,7 +1058,8 @@
 	struct drm_i915_error_state *first_error;
 	struct work_struct work;
 
-	unsigned long last_reset;
+
+	unsigned long missed_irq_rings;
 
 	/**
 	 * State variable and reset counter controlling the reset flow
@@ -1027,6 +1099,9 @@
 
 	/* For gpu hang simulation. */
 	unsigned int stop_rings;
+
+	/* For missed irq/seqno simulation. */
+	unsigned int test_irq_rings;
 };
 
 enum modeset_restore {
@@ -1035,6 +1110,14 @@
 	MODESET_SUSPENDED,
 };
 
+struct ddi_vbt_port_info {
+	uint8_t hdmi_level_shift;
+
+	uint8_t supports_dvi:1;
+	uint8_t supports_hdmi:1;
+	uint8_t supports_dp:1;
+};
+
 struct intel_vbt_data {
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1060,10 +1143,17 @@
 	int edp_bpp;
 	struct edp_power_seq edp_pps;
 
+	/* MIPI DSI */
+	struct {
+		u16 panel_id;
+	} dsi;
+
 	int crt_ddc_pin;
 
 	int child_dev_num;
-	struct child_device_config *child_dev;
+	union child_device_config *child_dev;
+
+	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 };
 
 enum intel_ddb_partitioning {
@@ -1079,6 +1169,15 @@
 	uint32_t fbc_val;
 };
 
+struct hsw_wm_values {
+	uint32_t wm_pipe[3];
+	uint32_t wm_lp[3];
+	uint32_t wm_lp_spr[3];
+	uint32_t wm_linetime[3];
+	bool enable_fbc_wm;
+	enum intel_ddb_partitioning partitioning;
+};
+
 /*
  * This struct tracks the state needed for the Package C8+ feature.
  *
@@ -1148,6 +1247,36 @@
 	} regsave;
 };
 
+enum intel_pipe_crc_source {
+	INTEL_PIPE_CRC_SOURCE_NONE,
+	INTEL_PIPE_CRC_SOURCE_PLANE1,
+	INTEL_PIPE_CRC_SOURCE_PLANE2,
+	INTEL_PIPE_CRC_SOURCE_PF,
+	INTEL_PIPE_CRC_SOURCE_PIPE,
+	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
+	INTEL_PIPE_CRC_SOURCE_TV,
+	INTEL_PIPE_CRC_SOURCE_DP_B,
+	INTEL_PIPE_CRC_SOURCE_DP_C,
+	INTEL_PIPE_CRC_SOURCE_DP_D,
+	INTEL_PIPE_CRC_SOURCE_AUTO,
+	INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+	uint32_t frame;
+	uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR	128
+struct intel_pipe_crc {
+	spinlock_t lock;
+	bool opened;		/* exclusive access to the result file */
+	struct intel_pipe_crc_entry *entries;
+	enum intel_pipe_crc_source source;
+	int head, tail;
+	wait_queue_head_t wq;
+};
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *slab;
@@ -1272,6 +1401,10 @@
 	struct drm_crtc *pipe_to_crtc_mapping[3];
 	wait_queue_head_t pending_flip_queue;
 
+#ifdef CONFIG_DEBUG_FS
+	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
 	int num_shared_dpll;
 	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
 	struct intel_ddi_plls ddi_plls;
@@ -1297,17 +1430,18 @@
 	 * mchdev_lock in intel_pm.c */
 	struct intel_ilk_power_mgmt ips;
 
-	/* Haswell power well */
-	struct i915_power_well power_well;
+	struct i915_power_domains power_domains;
 
-	enum no_psr_reason no_psr_reason;
+	struct i915_psr psr;
 
 	struct i915_gpu_error gpu_error;
 
 	struct drm_i915_gem_object *vlv_pctx;
 
+#ifdef CONFIG_DRM_I915_FBDEV
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
+#endif
 
 	/*
 	 * The console may be contended at resume, but we don't
@@ -1320,6 +1454,7 @@
 
 	bool hw_contexts_disabled;
 	uint32_t hw_context_size;
+	struct list_head context_list;
 
 	u32 fdi_rx_config;
 
@@ -1337,6 +1472,9 @@
 		uint16_t spr_latency[5];
 		/* cursor */
 		uint16_t cur_latency[5];
+
+		/* current hardware state */
+		struct hsw_wm_values hw;
 	} wm;
 
 	struct i915_package_c8 pc8;
@@ -1400,8 +1538,6 @@
 	struct list_head ring_list;
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
-	/** This object's place in the batchbuffer or on the eviction list */
-	struct list_head exec_list;
 
 	/**
 	 * This is set if the object is on the active lists (has pending
@@ -1487,13 +1623,6 @@
 	void *dma_buf_vmapping;
 	int vmapping_count;
 
-	/**
-	 * Used for performing relocations during execbuffer insertion.
-	 */
-	struct hlist_node exec_node;
-	unsigned long exec_handle;
-	struct drm_i915_gem_exec_object2 *exec_entry;
-
 	struct intel_ring_buffer *ring;
 
 	/** Breadcrumb of last rendering to the buffer. */
@@ -1505,11 +1634,14 @@
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
 
+	/** References from framebuffers, locks out tiling changes. */
+	unsigned long framebuffer_references;
+
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 
 	/** User space pin count and filp owning the pin */
-	uint32_t user_pin_count;
+	unsigned long user_pin_count;
 	struct drm_file *pin_filp;
 
 	/** for phy allocated objects */
@@ -1560,48 +1692,55 @@
 };
 
 struct drm_i915_file_private {
+	struct drm_i915_private *dev_priv;
+
 	struct {
 		spinlock_t lock;
 		struct list_head request_list;
+		struct delayed_work idle_work;
 	} mm;
 	struct idr context_idr;
 
 	struct i915_ctx_hang_stats hang_stats;
+	atomic_t rps_wait_boost;
 };
 
 #define INTEL_INFO(dev)	(to_i915(dev)->info)
 
-#define IS_I830(dev)		((dev)->pci_device == 0x3577)
-#define IS_845G(dev)		((dev)->pci_device == 0x2562)
+#define IS_I830(dev)		((dev)->pdev->device == 0x3577)
+#define IS_845G(dev)		((dev)->pdev->device == 0x2562)
 #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
+#define IS_I865G(dev)		((dev)->pdev->device == 0x2572)
 #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
-#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+#define IS_I915GM(dev)		((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev)		((dev)->pdev->device == 0x2772)
 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev)		((dev)->pdev->device == 0x2A42)
 #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW_G(dev)	((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev)	((dev)->pdev->device == 0xa011)
 #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE_M(dev)	((dev)->pdev->device == 0x0046)
 #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)		((dev)->pci_device == 0x0156 || \
-				 (dev)->pci_device == 0x0152 ||	\
-				 (dev)->pci_device == 0x015a)
-#define IS_SNB_GT1(dev)		((dev)->pci_device == 0x0102 || \
-				 (dev)->pci_device == 0x0106 ||	\
-				 (dev)->pci_device == 0x010A)
+#define IS_IVB_GT1(dev)		((dev)->pdev->device == 0x0156 || \
+				 (dev)->pdev->device == 0x0152 || \
+				 (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \
+				 (dev)->pdev->device == 0x0106 || \
+				 (dev)->pdev->device == 0x010A)
 #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
-				 ((dev)->pci_device & 0xFF00) == 0x0C00)
+				 ((dev)->pdev->device & 0xFF00) == 0x0C00)
 #define IS_ULT(dev)		(IS_HASWELL(dev) && \
-				 ((dev)->pci_device & 0xFF00) == 0x0A00)
+				 ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
+				 ((dev)->pdev->device & 0x00F0) == 0x0020)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -1616,9 +1755,13 @@
 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 #define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
 
-#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
+#define RENDER_RING		(1<<RCS)
+#define BSD_RING		(1<<VCS)
+#define BLT_RING		(1<<BCS)
+#define VEBOX_RING		(1<<VECS)
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
@@ -1640,7 +1783,6 @@
 #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
 #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
@@ -1653,6 +1795,7 @@
 #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
 #define HAS_POWER_WELL(dev)	(IS_HASWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev)		(IS_HASWELL(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
@@ -1668,35 +1811,14 @@
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
-
-#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
 
 #include "i915_trace.h"
 
-/**
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage.  This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-#define INTEL_RC6_ENABLE			(1<<0)
-#define INTEL_RC6p_ENABLE			(1<<1)
-#define INTEL_RC6pp_ENABLE			(1<<2)
-
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
@@ -1767,12 +1889,13 @@
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
 
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
 
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -1824,14 +1947,11 @@
 void i915_gem_load(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
-int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1870,9 +1990,8 @@
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-				    struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -1913,7 +2032,7 @@
 	}
 }
 
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
@@ -1933,11 +2052,11 @@
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_l3_remap(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
-int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
 int __i915_add_request(struct intel_ring_buffer *ring,
 		       struct drm_file *file,
 		       struct drm_i915_gem_object *batch_obj,
@@ -1964,6 +2083,7 @@
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 uint32_t
@@ -1995,6 +2115,9 @@
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 				  struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
 	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2031,7 +2154,6 @@
 	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
 				   map_and_fenceable, nonblocking);
 }
-#undef obj_to_ggtt
 
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
@@ -2094,6 +2216,7 @@
 					  unsigned cache_level,
 					  bool mappable,
 					  bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
 
 /* i915_gem_stolen.c */
@@ -2133,6 +2256,11 @@
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
 
 /* i915_gpu_error.c */
 __printf(2, 3)
@@ -2186,15 +2314,30 @@
 extern void intel_i2c_reset(struct drm_device *dev);
 
 /* intel_opregion.c */
+struct intel_encoder;
 extern int intel_opregion_setup(struct drm_device *dev);
 #ifdef CONFIG_ACPI
 extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
 extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+					 bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+					 pci_power_t state);
 #else
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+	return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	return 0;
+}
 #endif
 
 /* intel_acpi.c */
@@ -2256,8 +2399,16 @@
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
 		   enum intel_sbi_destination destination);
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2266,37 +2417,21 @@
 int vlv_gpu_freq(int ddr_freq, int val);
 int vlv_freq_opcode(int ddr_freq, int val);
 
-#define __i915_read(x) \
-	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
+#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
 
-#define __i915_write(x) \
-	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
+#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
 
-#define I915_READ8(reg)		i915_read8(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val), true)
+#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
 
-#define I915_READ16(reg)	i915_read16(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg)	i915_read16(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val)	i915_write16(dev_priv, (reg), (val), false)
-
-#define I915_READ(reg)		i915_read32(dev_priv, (reg), true)
-#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg)		i915_read32(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val)	i915_write32(dev_priv, (reg), (val), false)
-
-#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val), true)
-#define I915_READ64(reg)	i915_read64(dev_priv, (reg), true)
+#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cdfb9da..e7b39d7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -41,6 +41,9 @@
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
 						   bool force);
 static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly);
+static __must_check int
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   unsigned alignment,
@@ -61,8 +64,8 @@
 					     struct shrink_control *sc);
 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
 					    struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@
 		     struct drm_mode_create_dumb *args)
 {
 	/* have to work out size/pitch and return them */
-	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
 	args->size = args->pitch * args->height;
 	return i915_gem_create(file, dev,
 			       args->size, &args->handle);
@@ -432,11 +435,9 @@
 		 * optimizes for the case when the gpu will dirty the data
 		 * anyway again before the next pread happens. */
 		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
-		if (i915_gem_obj_bound_any(obj)) {
-			ret = i915_gem_object_set_to_gtt_domain(obj, false);
-			if (ret)
-				return ret;
-		}
+		ret = i915_gem_object_wait_rendering(obj, true);
+		if (ret)
+			return ret;
 	}
 
 	ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@
 		 * optimizes for the case when the gpu will use the data
 		 * right away and we therefore have to clflush anyway. */
 		needs_clflush_after = cpu_write_needs_clflush(obj);
-		if (i915_gem_obj_bound_any(obj)) {
-			ret = i915_gem_object_set_to_gtt_domain(obj, true);
-			if (ret)
-				return ret;
-		}
+		ret = i915_gem_object_wait_rendering(obj, false);
+		if (ret)
+			return ret;
 	}
 	/* Same trick applies to invalidate partially written cachelines read
 	 * before writing. */
@@ -966,12 +965,31 @@
 	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
 	ret = 0;
-	if (seqno == ring->outstanding_lazy_request)
+	if (seqno == ring->outstanding_lazy_seqno)
 		ret = i915_add_request(ring, NULL);
 
 	return ret;
 }
 
+static void fake_irq(unsigned long data)
+{
+	wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+		       struct intel_ring_buffer *ring)
+{
+	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+	if (file_priv == NULL)
+		return true;
+
+	return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
 /**
  * __wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@
  */
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
 			unsigned reset_counter,
-			bool interruptible, struct timespec *timeout)
+			bool interruptible,
+			struct timespec *timeout,
+			struct drm_i915_file_private *file_priv)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	struct timespec before, now, wait_time={1,0};
-	unsigned long timeout_jiffies;
-	long end;
-	bool wait_forever = true;
+	struct timespec before, now;
+	DEFINE_WAIT(wait);
+	long timeout_jiffies;
 	int ret;
 
 	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@
 	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
 		return 0;
 
-	trace_i915_gem_request_wait_begin(ring, seqno);
+	timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
 
-	if (timeout != NULL) {
-		wait_time = *timeout;
-		wait_forever = false;
+	if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+		gen6_rps_boost(dev_priv);
+		if (file_priv)
+			mod_delayed_work(dev_priv->wq,
+					 &file_priv->mm.idle_work,
+					 msecs_to_jiffies(100));
 	}
 
-	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
-	if (WARN_ON(!ring->irq_get(ring)))
+	if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+	    WARN_ON(!ring->irq_get(ring)))
 		return -ENODEV;
 
-	/* Record current time in case interrupted by signal, or wedged * */
+	/* Record current time in case interrupted by signal, or wedged */
+	trace_i915_gem_request_wait_begin(ring, seqno);
 	getrawmonotonic(&before);
+	for (;;) {
+		struct timer_list timer;
+		unsigned long expire;
 
-#define EXIT_COND \
-	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-	 i915_reset_in_progress(&dev_priv->gpu_error) || \
-	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-	do {
-		if (interruptible)
-			end = wait_event_interruptible_timeout(ring->irq_queue,
-							       EXIT_COND,
-							       timeout_jiffies);
-		else
-			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-						 timeout_jiffies);
+		prepare_to_wait(&ring->irq_queue, &wait,
+				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
 		/* We need to check whether any gpu reset happened in between
 		 * the caller grabbing the seqno and now ... */
-		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-			end = -EAGAIN;
+		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
+			 * is truely gone. */
+			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+			if (ret == 0)
+				ret = -EAGAIN;
+			break;
+		}
 
-		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
-		 * gone. */
-		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-		if (ret)
-			end = ret;
-	} while (end == 0 && wait_forever);
+		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+			ret = 0;
+			break;
+		}
 
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		if (timeout_jiffies <= 0) {
+			ret = -ETIME;
+			break;
+		}
+
+		timer.function = NULL;
+		if (timeout || missed_irq(dev_priv, ring)) {
+			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+			expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+			mod_timer(&timer, expire);
+		}
+
+		io_schedule();
+
+		if (timeout)
+			timeout_jiffies = expire - jiffies;
+
+		if (timer.function) {
+			del_singleshot_timer_sync(&timer);
+			destroy_timer_on_stack(&timer);
+		}
+	}
 	getrawmonotonic(&now);
+	trace_i915_gem_request_wait_end(ring, seqno);
 
 	ring->irq_put(ring);
-	trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+	finish_wait(&ring->irq_queue, &wait);
 
 	if (timeout) {
 		struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@
 			set_normalized_timespec(timeout, 0, 0);
 	}
 
-	switch (end) {
-	case -EIO:
-	case -EAGAIN: /* Wedged */
-	case -ERESTARTSYS: /* Signal */
-		return (int)end;
-	case 0: /* Timeout */
-		return -ETIME;
-	default: /* Completed */
-		WARN_ON(end < 0); /* We're not aware of other errors */
-		return 0;
-	}
+	return ret;
 }
 
 /**
@@ -1097,7 +1134,7 @@
 
 	return __wait_seqno(ring, seqno,
 			    atomic_read(&dev_priv->gpu_error.reset_counter),
-			    interruptible, NULL);
+			    interruptible, NULL, NULL);
 }
 
 static int
@@ -1147,6 +1184,7 @@
  */
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    struct drm_file *file,
 					    bool readonly)
 {
 	struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@
 
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
 	mutex_lock(&dev->struct_mutex);
 	if (ret)
 		return ret;
@@ -1222,7 +1260,7 @@
 	 * We will repeat the flush holding the lock in the normal manner
 	 * to catch cases where we are gazumped.
 	 */
-	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
 	if (ret)
 		goto unref;
 
@@ -1690,13 +1728,13 @@
 	return 0;
 }
 
-static long
+static unsigned long
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
 		  bool purgeable_only)
 {
 	struct list_head still_bound_list;
 	struct drm_i915_gem_object *obj, *next;
-	long count = 0;
+	unsigned long count = 0;
 
 	list_for_each_entry_safe(obj, next,
 				 &dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@
 	return count;
 }
 
-static long
+static unsigned long
 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
 {
 	return __i915_gem_shrink(dev_priv, target, true);
 }
 
-static long
+static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
 	struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@
 
 	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
 				 global_list) {
-		if (obj->pages_pin_count == 0)
+		if (i915_gem_object_put_pages(obj) == 0)
 			freed += obj->base.size >> PAGE_SHIFT;
-		i915_gem_object_put_pages(obj);
 	}
 	return freed;
 }
@@ -1865,6 +1902,9 @@
 			sg->length += PAGE_SIZE;
 		}
 		last_pfn = page_to_pfn(page);
+
+		/* Check that the i965g/gm workaround works. */
+		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
 #ifdef CONFIG_SWIOTLB
 	if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@
 	return 0;
 }
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *ring)
 {
@@ -1957,6 +1997,13 @@
 	}
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct intel_ring_buffer *ring)
+{
+	list_move_tail(&vma->mm_list, &vma->vm->active_list);
+	return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
@@ -2078,11 +2125,10 @@
 	if (ret)
 		return ret;
 
-	request = kmalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL)
+	request = ring->preallocated_lazy_request;
+	if (WARN_ON(request == NULL))
 		return -ENOMEM;
 
-
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
 	 * GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@
 	request_ring_position = intel_ring_get_tail(ring);
 
 	ret = ring->add_request(ring);
-	if (ret) {
-		kfree(request);
+	if (ret)
 		return ret;
-	}
 
 	request->seqno = intel_ring_get_seqno(ring);
 	request->ring = ring;
 	request->head = request_start;
 	request->tail = request_ring_position;
-	request->ctx = ring->last_context;
-	request->batch_obj = obj;
 
 	/* Whilst this request exists, batch_obj will be on the
 	 * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@
 	 * inactive_list and lose its active reference. Hence we do not need
 	 * to explicitly hold another reference here.
 	 */
+	request->batch_obj = obj;
 
+	/* Hold a reference to the current context so that we can inspect
+	 * it later in case a hangcheck error event fires.
+	 */
+	request->ctx = ring->last_context;
 	if (request->ctx)
 		i915_gem_context_reference(request->ctx);
 
@@ -2129,12 +2176,14 @@
 	}
 
 	trace_i915_gem_request_add(ring, request->seqno);
-	ring->outstanding_lazy_request = 0;
+	ring->outstanding_lazy_seqno = 0;
+	ring->preallocated_lazy_request = NULL;
 
 	if (!dev_priv->ums.mm_suspended) {
 		i915_queue_hangcheck(ring->dev);
 
 		if (was_empty) {
+			cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work,
 					   round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@
 		return;
 
 	spin_lock(&file_priv->mm.lock);
-	if (request->file_priv) {
-		list_del(&request->client_list);
-		request->file_priv = NULL;
-	}
+	list_del(&request->client_list);
+	request->file_priv = NULL;
 	spin_unlock(&file_priv->mm.lock);
 }
 
@@ -2224,6 +2271,21 @@
 	return false;
 }
 
+static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+{
+	const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+
+	if (hs->banned)
+		return true;
+
+	if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+		DRM_ERROR("context hanging too fast, declaring banned!\n");
+		return true;
+	}
+
+	return false;
+}
+
 static void i915_set_reset_status(struct intel_ring_buffer *ring,
 				  struct drm_i915_gem_request *request,
 				  u32 acthd)
@@ -2260,10 +2322,13 @@
 		hs = &request->file_priv->hang_stats;
 
 	if (hs) {
-		if (guilty)
+		if (guilty) {
+			hs->banned = i915_context_is_banned(hs);
 			hs->batch_active++;
-		else
+			hs->guilty_ts = get_seconds();
+		} else {
 			hs->batch_pending++;
+		}
 	}
 }
 
@@ -2341,6 +2406,8 @@
 	for_each_ring(ring, dev_priv, i)
 		i915_gem_reset_ring_lists(dev_priv, ring);
 
+	i915_gem_cleanup_ringbuffer(dev);
+
 	i915_gem_restore_fences(dev);
 }
 
@@ -2405,57 +2472,53 @@
 	WARN_ON(i915_verify_lists(ring->dev));
 }
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
+	bool idle = true;
 	int i;
 
-	for_each_ring(ring, dev_priv, i)
+	for_each_ring(ring, dev_priv, i) {
 		i915_gem_retire_requests_ring(ring);
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (idle)
+		mod_delayed_work(dev_priv->wq,
+				   &dev_priv->mm.idle_work,
+				   msecs_to_jiffies(100));
+
+	return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
-	drm_i915_private_t *dev_priv;
-	struct drm_device *dev;
-	struct intel_ring_buffer *ring;
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.retire_work.work);
+	struct drm_device *dev = dev_priv->dev;
 	bool idle;
-	int i;
-
-	dev_priv = container_of(work, drm_i915_private_t,
-				mm.retire_work.work);
-	dev = dev_priv->dev;
 
 	/* Come back later if the device is busy... */
-	if (!mutex_trylock(&dev->struct_mutex)) {
+	idle = false;
+	if (mutex_trylock(&dev->struct_mutex)) {
+		idle = i915_gem_retire_requests(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+	if (!idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
 				   round_jiffies_up_relative(HZ));
-		return;
-	}
+}
 
-	i915_gem_retire_requests(dev);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), mm.idle_work.work);
 
-	/* Send a periodic flush down the ring so we don't hold onto GEM
-	 * objects indefinitely.
-	 */
-	idle = true;
-	for_each_ring(ring, dev_priv, i) {
-		if (ring->gpu_caches_dirty)
-			i915_add_request(ring, NULL);
-
-		idle &= list_empty(&ring->request_list);
-	}
-
-	if (!dev_priv->ums.mm_suspended && !idle)
-		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
-				   round_jiffies_up_relative(HZ));
-	if (idle)
-		intel_mark_idle(dev);
-
-	mutex_unlock(&dev->struct_mutex);
+	intel_mark_idle(dev_priv->dev);
 }
 
 /**
@@ -2553,7 +2616,7 @@
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	mutex_unlock(&dev->struct_mutex);
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
 	if (timeout)
 		args->timeout_ns = timespec_to_ns(timeout);
 	return ret;
@@ -2600,6 +2663,7 @@
 	if (ret)
 		return ret;
 
+	trace_i915_gem_ring_sync_to(from, to, seqno);
 	ret = to->sync_to(to, from, seqno);
 	if (!ret)
 		/* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@
 	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 	int ret;
 
+	/* For now we only ever use 1 vma per object */
+	WARN_ON(!list_is_singular(&obj->vma_list));
+
 	if (list_empty(&vma->vma_link))
 		return 0;
 
-	if (!drm_mm_node_allocated(&vma->node))
-		goto destroy;
+	if (!drm_mm_node_allocated(&vma->node)) {
+		i915_gem_vma_destroy(vma);
+
+		return 0;
+	}
 
 	if (obj->pin_count)
 		return -EBUSY;
@@ -2685,13 +2755,10 @@
 
 	drm_mm_remove_node(&vma->node);
 
-destroy:
 	i915_gem_vma_destroy(vma);
 
 	/* Since the unbound list is global, only move to that list if
-	 * no more VMAs exist.
-	 * NB: Until we have real VMAs there will only ever be one */
-	WARN_ON(!list_empty(&obj->vma_list));
+	 * no more VMAs exist. */
 	if (list_empty(&obj->vma_list))
 		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
@@ -3389,8 +3456,7 @@
 
 	/* And bump the LRU for this access */
 	if (i915_gem_object_is_inactive(obj)) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-							   &dev_priv->gtt.base);
+		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
 		if (vma)
 			list_move_tail(&vma->mm_list,
 				       &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3827,7 @@
 	if (seqno == 0)
 		return 0;
 
-	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
 	if (ret == 0)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -3865,6 +3931,11 @@
 		goto out;
 	}
 
+	if (obj->user_pin_count == ULONG_MAX) {
+		ret = -EBUSY;
+		goto out;
+	}
+
 	if (obj->user_pin_count == 0) {
 		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
 		if (ret)
@@ -4015,7 +4086,6 @@
 {
 	INIT_LIST_HEAD(&obj->global_list);
 	INIT_LIST_HEAD(&obj->ring_list);
-	INIT_LIST_HEAD(&obj->exec_list);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 
@@ -4087,13 +4157,6 @@
 	return obj;
 }
 
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,9 +4210,20 @@
 	i915_gem_object_free(obj);
 }
 
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm)
 {
+	struct i915_vma *vma;
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->vm == vm)
+			return vma;
+
+	return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+					      struct i915_address_space *vm)
+{
 	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -4169,76 +4243,103 @@
 	return vma;
 }
 
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+				  struct i915_address_space *vm)
+{
+	struct i915_vma *vma;
+
+	vma = i915_gem_obj_to_vma(obj, vm);
+	if (!vma)
+		vma = __i915_gem_vma_create(obj, vm);
+
+	return vma;
+}
+
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
 	WARN_ON(vma->node.allocated);
+
+	/* Keep the vma as a placeholder in the execbuffer reservation lists */
+	if (!list_empty(&vma->exec_list))
+		return;
+
 	list_del(&vma->vma_link);
+
 	kfree(vma);
 }
 
 int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret = 0;
 
-	if (dev_priv->ums.mm_suspended) {
-		mutex_unlock(&dev->struct_mutex);
-		return 0;
-	}
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->ums.mm_suspended)
+		goto err;
 
 	ret = i915_gpu_idle(dev);
-	if (ret) {
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
+	if (ret)
+		goto err;
+
 	i915_gem_retire_requests(dev);
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_gem_evict_everything(dev);
 
-	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
 	i915_kernel_lost_context(dev);
 	i915_gem_cleanup_ringbuffer(dev);
 
-	/* Cancel the retire work handler, which should be idle now. */
+	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
+	 * We need to replace this with a semaphore, or something.
+	 * And not confound ums.mm_suspended!
+	 */
+	dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+							     DRIVER_MODESET);
+	mutex_unlock(&dev->struct_mutex);
+
+	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+	cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 
 	return 0;
+
+err:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
 }
 
-void i915_gem_l3_remap(struct drm_device *dev)
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 misccpctl;
-	int i;
+	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+	int i, ret;
 
-	if (!HAS_L3_GPU_CACHE(dev))
-		return;
+	if (!HAS_L3_DPF(dev) || !remap_info)
+		return 0;
 
-	if (!dev_priv->l3_parity.remap_info)
-		return;
+	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	if (ret)
+		return ret;
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-	POSTING_READ(GEN7_MISCCPCTL);
-
+	/*
+	 * Note: We do not worry about the concurrent register cacheline hang
+	 * here because no other code should access these registers other than
+	 * at initialization time.
+	 */
 	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
-		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
-			DRM_DEBUG("0x%x was already programmed to %x\n",
-				  GEN7_L3LOG_BASE + i, remap);
-		if (remap && !dev_priv->l3_parity.remap_info[i/4])
-			DRM_DEBUG_DRIVER("Clearing remapped register\n");
-		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, reg_base + i);
+		intel_ring_emit(ring, remap_info[i/4]);
 	}
 
-	/* Make sure all the writes land before disabling dop clock gating */
-	POSTING_READ(GEN7_L3LOG_BASE);
+	intel_ring_advance(ring);
 
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	return ret;
 }
 
 void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4330,7 +4431,7 @@
 i915_gem_init_hw(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
@@ -4338,20 +4439,26 @@
 	if (dev_priv->ellc_size)
 		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
+	if (IS_HSW_GT3(dev))
+		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
+	else
+		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+
 	if (HAS_PCH_NOP(dev)) {
 		u32 temp = I915_READ(GEN7_MSG_CTL);
 		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
 		I915_WRITE(GEN7_MSG_CTL, temp);
 	}
 
-	i915_gem_l3_remap(dev);
-
 	i915_gem_init_swizzling(dev);
 
 	ret = i915_gem_init_rings(dev);
 	if (ret)
 		return ret;
 
+	for (i = 0; i < NUM_L3_SLICES(dev); i++)
+		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+
 	/*
 	 * XXX: There was some w/a described somewhere suggesting loading
 	 * contexts before PPGTT.
@@ -4454,26 +4561,12 @@
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
 
 	drm_irq_uninstall(dev);
 
-	mutex_lock(&dev->struct_mutex);
-	ret =  i915_gem_idle(dev);
-
-	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
-	 * We need to replace this with a semaphore, or something.
-	 * And not confound ums.mm_suspended!
-	 */
-	if (ret != 0)
-		dev_priv->ums.mm_suspended = 1;
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
+	return i915_gem_suspend(dev);
 }
 
 void
@@ -4484,11 +4577,9 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_idle(dev);
+	ret = i915_gem_suspend(dev);
 	if (ret)
 		DRM_ERROR("failed to idle hardware: %d\n", ret);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -4523,6 +4614,7 @@
 	INIT_LIST_HEAD(&dev_priv->vm_list);
 	i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
+	INIT_LIST_HEAD(&dev_priv->context_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4624,8 @@
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
+	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+			  i915_gem_idle_work_handler);
 	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4676,7 @@
 	if (dev_priv->mm.phys_objs[id - 1] || !size)
 		return 0;
 
-	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+	phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
 	if (!phys_obj)
 		return -ENOMEM;
 
@@ -4756,6 +4850,8 @@
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
+	cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
 	/* Clean up our request list when the client is going away, so that
 	 * later retire_requests won't dereference our soon-to-be-gone
 	 * file_priv.
@@ -4773,6 +4869,38 @@
 	spin_unlock(&file_priv->mm.lock);
 }
 
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_file_private *file_priv =
+		container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+	atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+	file_priv->dev_priv = dev->dev_private;
+
+	spin_lock_init(&file_priv->mm.lock);
+	INIT_LIST_HEAD(&file_priv->mm.request_list);
+	INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+			  i915_gem_file_idle_work_handler);
+
+	idr_init(&file_priv->context_idr);
+
+	return 0;
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
 	if (!mutex_is_locked(mutex))
@@ -4823,6 +4951,7 @@
 
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
+
 	return count;
 }
 
@@ -4859,11 +4988,10 @@
 
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
-	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
-	struct i915_address_space *vm;
+	struct i915_vma *vma;
 
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-		if (i915_gem_obj_bound(o, vm))
+	list_for_each_entry(vma, &o->vma_list, vma_link)
+		if (drm_mm_node_allocated(&vma->node))
 			return true;
 
 	return false;
@@ -4895,7 +5023,6 @@
 			     struct drm_i915_private,
 			     mm.inactive_shrinker);
 	struct drm_device *dev = dev_priv->dev;
-	int nr_to_scan = sc->nr_to_scan;
 	unsigned long freed;
 	bool unlock = true;
 
@@ -4909,38 +5036,30 @@
 		unlock = false;
 	}
 
-	freed = i915_gem_purge(dev_priv, nr_to_scan);
-	if (freed < nr_to_scan)
-		freed += __i915_gem_shrink(dev_priv, nr_to_scan,
-							false);
-	if (freed < nr_to_scan)
+	freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+	if (freed < sc->nr_to_scan)
+		freed += __i915_gem_shrink(dev_priv,
+					   sc->nr_to_scan - freed,
+					   false);
+	if (freed < sc->nr_to_scan)
 		freed += i915_gem_shrink_all(dev_priv);
 
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
+
 	return freed;
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-				     struct i915_address_space *vm)
-{
-	struct i915_vma *vma;
-	list_for_each_entry(vma, &obj->vma_list, vma_link)
-		if (vma->vm == vm)
-			return vma;
-
-	return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-				  struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
 
-	vma = i915_gem_obj_to_vma(obj, vm);
-	if (!vma)
-		vma = i915_gem_vma_create(obj, vm);
+	if (WARN_ON(list_empty(&obj->vma_list)))
+		return NULL;
+
+	vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+	if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+		return NULL;
 
 	return vma;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 403309c..cc619c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -73,7 +73,7 @@
  *
  * There are two confusing terms used above:
  *  The "current context" means the context which is currently running on the
- *  GPU. The GPU has loaded it's state already and has stored away the gtt
+ *  GPU. The GPU has loaded its state already and has stored away the gtt
  *  offset of the BO. The GPU is not actively referencing the data at this
  *  offset, but it will on the next context switch. The only way to avoid this
  *  is to do a GPU reset.
@@ -129,6 +129,7 @@
 	struct i915_hw_context *ctx = container_of(ctx_ref,
 						   typeof(*ctx), ref);
 
+	list_del(&ctx->link);
 	drm_gem_object_unreference(&ctx->obj->base);
 	kfree(ctx);
 }
@@ -147,6 +148,7 @@
 
 	kref_init(&ctx->ref);
 	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+	INIT_LIST_HEAD(&ctx->link);
 	if (ctx->obj == NULL) {
 		kfree(ctx);
 		DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +168,7 @@
 	 * assertion in the context switch code.
 	 */
 	ctx->ring = &dev_priv->ring[RCS];
+	list_add_tail(&ctx->link, &dev_priv->context_list);
 
 	/* Default context will never have a file_priv */
 	if (file_priv == NULL)
@@ -178,6 +181,10 @@
 
 	ctx->file_priv = file_priv;
 	ctx->id = ret;
+	/* NB: Mark all slices as needing a remap so that when the context first
+	 * loads it will restore whatever remap state already exists. If there
+	 * is no remap info, it will be a NOP. */
+	ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
 	return ctx;
 
@@ -213,7 +220,6 @@
 	 * may not be available. To avoid this we always pin the
 	 * default context.
 	 */
-	dev_priv->ring[RCS].default_context = ctx;
 	ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +232,8 @@
 		goto err_unpin;
 	}
 
+	dev_priv->ring[RCS].default_context = ctx;
+
 	DRM_DEBUG_DRIVER("Default HW context loaded\n");
 	return 0;
 
@@ -281,16 +289,24 @@
 	 * other code, leading to spurious errors. */
 	intel_gpu_reset(dev);
 
-	i915_gem_object_unpin(dctx->obj);
-
 	/* When default context is created and switched to, base object refcount
 	 * will be 2 (+1 from object creation and +1 from do_switch()).
 	 * i915_gem_context_fini() will be called after gpu_idle() has switched
 	 * to default context. So we need to unreference the base object once
 	 * to offset the do_switch part, so that i915_gem_context_unreference()
 	 * can then free the base object correctly. */
-	drm_gem_object_unreference(&dctx->obj->base);
+	WARN_ON(!dev_priv->ring[RCS].last_context);
+	if (dev_priv->ring[RCS].last_context == dctx) {
+		/* Fake switch to NULL context */
+		WARN_ON(dctx->obj->active);
+		i915_gem_object_unpin(dctx->obj);
+		i915_gem_context_unreference(dctx);
+	}
+
+	i915_gem_object_unpin(dctx->obj);
 	i915_gem_context_unreference(dctx);
+	dev_priv->ring[RCS].default_context = NULL;
+	dev_priv->ring[RCS].last_context = NULL;
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +409,11 @@
 	struct intel_ring_buffer *ring = to->ring;
 	struct i915_hw_context *from = ring->last_context;
 	u32 hw_flags = 0;
-	int ret;
+	int ret, i;
 
 	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
-	if (from == to)
+	if (from == to && !to->remap_slice)
 		return 0;
 
 	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +436,6 @@
 
 	if (!to->is_initialized || is_default_context(to))
 		hw_flags |= MI_RESTORE_INHIBIT;
-	else if (WARN_ON_ONCE(from == to)) /* not yet expected */
-		hw_flags |= MI_FORCE_RESTORE;
 
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
@@ -429,6 +443,18 @@
 		return ret;
 	}
 
+	for (i = 0; i < MAX_L3_SLICES; i++) {
+		if (!(to->remap_slice & (1<<i)))
+			continue;
+
+		ret = i915_gem_l3_remap(ring, i);
+		/* If it failed, try again next round */
+		if (ret)
+			DRM_DEBUG_DRIVER("L3 remapping failed\n");
+		else
+			to->remap_slice &= ~(1<<i);
+	}
+
 	/* The backing object for the context is done after switching to the
 	 * *next* context. Therefore we cannot retire the previous context until
 	 * the next context has already started running. In fact, the below code
@@ -436,11 +462,8 @@
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-		struct i915_address_space *ggtt = &dev_priv->gtt.base;
 		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
-		i915_gem_object_move_to_active(from->obj, ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -451,17 +474,7 @@
 		from->obj->dirty = 1;
 		BUG_ON(from->obj->ring != ring);
 
-		ret = i915_add_request(ring, NULL);
-		if (ret) {
-			/* Too late, we've already scheduled a context switch.
-			 * Try to undo the change so that the hw state is
-			 * consistent with out tracking. In case of emergency,
-			 * scream.
-			 */
-			WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
-			return ret;
-		}
-
+		/* obj is kept alive until the next request by its active ref */
 		i915_gem_object_unpin(from->obj);
 		i915_gem_context_unreference(from);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 91b7001..b737653 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -37,6 +37,9 @@
 	if (vma->obj->pin_count)
 		return false;
 
+	if (WARN_ON(!list_empty(&vma->exec_list)))
+		return false;
+
 	list_add(&vma->exec_list, unwind);
 	return drm_mm_scan_add_block(&vma->node);
 }
@@ -113,7 +116,7 @@
 	}
 
 	/* We expect the caller to unpin, evict all and try again, or give up.
-	 * So calling i915_gem_evict_everything() is unnecessary.
+	 * So calling i915_gem_evict_vm() is unnecessary.
 	 */
 	return -ENOSPC;
 
@@ -152,12 +155,48 @@
 	return ret;
 }
 
+/**
+ * i915_gem_evict_vm - Try to free up VM space
+ *
+ * @vm: Address space to evict from
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * VM eviction is about freeing up virtual address space. If one wants fine
+ * grained eviction, they should see evict something for more details. In terms
+ * of freeing up actual system memory, this function may not accomplish the
+ * desired result. An object may be shared in multiple address space, and this
+ * function will not assert those objects be freed.
+ *
+ * Using do_idle will result in a more complete eviction because it retires, and
+ * inactivates current BOs.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+	struct i915_vma *vma, *next;
+	int ret;
+
+	trace_i915_gem_evict_vm(vm);
+
+	if (do_idle) {
+		ret = i915_gpu_idle(vm->dev);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests(vm->dev);
+	}
+
+	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+		if (vma->obj->pin_count == 0)
+			WARN_ON(i915_vma_unbind(vma));
+
+	return 0;
+}
+
 int
 i915_gem_evict_everything(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct i915_address_space *vm;
-	struct i915_vma *vma, *next;
 	bool lists_empty = true;
 	int ret;
 
@@ -184,11 +223,8 @@
 	i915_gem_retire_requests(dev);
 
 	/* Having flushed everything, unbind() should never raise an error */
-	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-		list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
-			if (vma->obj->pin_count == 0)
-				WARN_ON(i915_vma_unbind(vma));
-	}
+	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+		WARN_ON(i915_gem_evict_vm(vm, false));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bf34577..0ce0d47 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,35 +33,35 @@
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct eb_objects {
-	struct list_head objects;
+struct eb_vmas {
+	struct list_head vmas;
 	int and;
 	union {
-		struct drm_i915_gem_object *lut[0];
+		struct i915_vma *lut[0];
 		struct hlist_head buckets[0];
 	};
 };
 
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
 {
-	struct eb_objects *eb = NULL;
+	struct eb_vmas *eb = NULL;
 
 	if (args->flags & I915_EXEC_HANDLE_LUT) {
-		int size = args->buffer_count;
-		size *= sizeof(struct drm_i915_gem_object *);
-		size += sizeof(struct eb_objects);
+		unsigned size = args->buffer_count;
+		size *= sizeof(struct i915_vma *);
+		size += sizeof(struct eb_vmas);
 		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
 	}
 
 	if (eb == NULL) {
-		int size = args->buffer_count;
-		int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+		unsigned size = args->buffer_count;
+		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
 		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
 		while (count > 2*size)
 			count >>= 1;
 		eb = kzalloc(count*sizeof(struct hlist_head) +
-			     sizeof(struct eb_objects),
+			     sizeof(struct eb_vmas),
 			     GFP_TEMPORARY);
 		if (eb == NULL)
 			return eb;
@@ -70,64 +70,102 @@
 	} else
 		eb->and = -args->buffer_count;
 
-	INIT_LIST_HEAD(&eb->objects);
+	INIT_LIST_HEAD(&eb->vmas);
 	return eb;
 }
 
 static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
 {
 	if (eb->and >= 0)
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
 static int
-eb_lookup_objects(struct eb_objects *eb,
-		  struct drm_i915_gem_exec_object2 *exec,
-		  const struct drm_i915_gem_execbuffer2 *args,
-		  struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+	       struct drm_i915_gem_exec_object2 *exec,
+	       const struct drm_i915_gem_execbuffer2 *args,
+	       struct i915_address_space *vm,
+	       struct drm_file *file)
 {
-	int i;
+	struct drm_i915_gem_object *obj;
+	struct list_head objects;
+	int i, ret = 0;
 
+	INIT_LIST_HEAD(&objects);
 	spin_lock(&file->table_lock);
+	/* Grab a reference to the object and release the lock so we can lookup
+	 * or create the VMA without using GFP_ATOMIC */
 	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_i915_gem_object *obj;
-
 		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
 		if (obj == NULL) {
 			spin_unlock(&file->table_lock);
 			DRM_DEBUG("Invalid object handle %d at index %d\n",
 				   exec[i].handle, i);
-			return -ENOENT;
+			ret = -ENOENT;
+			goto out;
 		}
 
-		if (!list_empty(&obj->exec_list)) {
+		if (!list_empty(&obj->obj_exec_link)) {
 			spin_unlock(&file->table_lock);
 			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
 				   obj, exec[i].handle, i);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
 		}
 
 		drm_gem_object_reference(&obj->base);
-		list_add_tail(&obj->exec_list, &eb->objects);
-
-		obj->exec_entry = &exec[i];
-		if (eb->and < 0) {
-			eb->lut[i] = obj;
-		} else {
-			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
-			obj->exec_handle = handle;
-			hlist_add_head(&obj->exec_node,
-				       &eb->buckets[handle & eb->and]);
-		}
+		list_add_tail(&obj->obj_exec_link, &objects);
 	}
 	spin_unlock(&file->table_lock);
 
-	return 0;
+	i = 0;
+	list_for_each_entry(obj, &objects, obj_exec_link) {
+		struct i915_vma *vma;
+
+		/*
+		 * NOTE: We can leak any vmas created here when something fails
+		 * later on. But that's no issue since vma_unbind can deal with
+		 * vmas which are not actually bound. And since only
+		 * lookup_or_create exists as an interface to get at the vma
+		 * from the (obj, vm) we don't run the risk of creating
+		 * duplicated vmas for the same vm.
+		 */
+		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+		if (IS_ERR(vma)) {
+			DRM_DEBUG("Failed to lookup VMA\n");
+			ret = PTR_ERR(vma);
+			goto out;
+		}
+
+		list_add_tail(&vma->exec_list, &eb->vmas);
+
+		vma->exec_entry = &exec[i];
+		if (eb->and < 0) {
+			eb->lut[i] = vma;
+		} else {
+			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+			vma->exec_handle = handle;
+			hlist_add_head(&vma->exec_node,
+				       &eb->buckets[handle & eb->and]);
+		}
+		++i;
+	}
+
+
+out:
+	while (!list_empty(&objects)) {
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       obj_exec_link);
+		list_del_init(&obj->obj_exec_link);
+		if (ret)
+			drm_gem_object_unreference(&obj->base);
+	}
+	return ret;
 }
 
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
 	if (eb->and < 0) {
 		if (handle >= -eb->and)
@@ -139,34 +177,33 @@
 
 		head = &eb->buckets[handle & eb->and];
 		hlist_for_each(node, head) {
-			struct drm_i915_gem_object *obj;
+			struct i915_vma *vma;
 
-			obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
-			if (obj->exec_handle == handle)
-				return obj;
+			vma = hlist_entry(node, struct i915_vma, exec_node);
+			if (vma->exec_handle == handle)
+				return vma;
 		}
 		return NULL;
 	}
 }
 
-static void
-eb_destroy(struct eb_objects *eb)
-{
-	while (!list_empty(&eb->objects)) {
-		struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+	while (!list_empty(&eb->vmas)) {
+		struct i915_vma *vma;
 
-		obj = list_first_entry(&eb->objects,
-				       struct drm_i915_gem_object,
+		vma = list_first_entry(&eb->vmas,
+				       struct i915_vma,
 				       exec_list);
-		list_del_init(&obj->exec_list);
-		drm_gem_object_unreference(&obj->base);
+		list_del_init(&vma->exec_list);
+		drm_gem_object_unreference(&vma->obj->base);
 	}
 	kfree(eb);
 }
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
-	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+	return (HAS_LLC(obj->base.dev) ||
+		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		!obj->map_and_fenceable ||
 		obj->cache_level != I915_CACHE_NONE);
 }
@@ -179,7 +216,7 @@
 	char *vaddr;
 	int ret = -EINVAL;
 
-	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
 	if (ret)
 		return ret;
 
@@ -223,22 +260,24 @@
 
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-				   struct eb_objects *eb,
+				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc,
 				   struct i915_address_space *vm)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
+	struct i915_vma *target_vma;
 	uint32_t target_offset;
 	int ret = -EINVAL;
 
 	/* we've already hold a reference to all valid objects */
-	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
-	if (unlikely(target_obj == NULL))
+	target_vma = eb_get_vma(eb, reloc->target_handle);
+	if (unlikely(target_vma == NULL))
 		return -ENOENT;
+	target_i915_obj = target_vma->obj;
+	target_obj = &target_vma->obj->base;
 
-	target_i915_obj = to_intel_bo(target_obj);
 	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -320,14 +359,13 @@
 }
 
 static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-				    struct eb_objects *eb,
-				    struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+				 struct eb_vmas *eb)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
 	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
 	struct drm_i915_gem_relocation_entry __user *user_relocs;
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	int remain, ret;
 
 	user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +384,8 @@
 		do {
 			u64 offset = r->presumed_offset;
 
-			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
-								 vm);
+			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+								 vma->vm);
 			if (ret)
 				return ret;
 
@@ -368,17 +406,16 @@
 }
 
 static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-					 struct eb_objects *eb,
-					 struct drm_i915_gem_relocation_entry *relocs,
-					 struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+				      struct eb_vmas *eb,
+				      struct drm_i915_gem_relocation_entry *relocs)
 {
-	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	int i, ret;
 
 	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
-							 vm);
+		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+							 vma->vm);
 		if (ret)
 			return ret;
 	}
@@ -387,10 +424,10 @@
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
 			     struct i915_address_space *vm)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	int ret = 0;
 
 	/* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +438,8 @@
 	 * lockdep complains vehemently.
 	 */
 	pagefault_disable();
-	list_for_each_entry(obj, &eb->objects, exec_list) {
-		ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
 		if (ret)
 			break;
 	}
@@ -415,31 +452,32 @@
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
 {
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-	return entry->relocation_count && !use_cpu_reloc(obj);
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+		i915_is_ggtt(vma->vm);
 }
 
 static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-				   struct intel_ring_buffer *ring,
-				   struct i915_address_space *vm,
-				   bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+				struct intel_ring_buffer *ring,
+				bool *need_reloc)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	bool need_fence, need_mappable;
+	struct drm_i915_gem_object *obj = vma->obj;
 	int ret;
 
 	need_fence =
 		has_fenced_gpu_access &&
 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 		obj->tiling_mode != I915_TILING_NONE;
-	need_mappable = need_fence || need_reloc_mappable(obj);
+	need_mappable = need_fence || need_reloc_mappable(vma);
 
-	ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
 				  false);
 	if (ret)
 		return ret;
@@ -467,8 +505,8 @@
 		obj->has_aliasing_ppgtt_mapping = 1;
 	}
 
-	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
-		entry->offset = i915_gem_obj_offset(obj, vm);
+	if (entry->offset != vma->node.start) {
+		entry->offset = vma->node.start;
 		*need_reloc = true;
 	}
 
@@ -485,14 +523,15 @@
 }
 
 static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
 	struct drm_i915_gem_exec_object2 *entry;
+	struct drm_i915_gem_object *obj = vma->obj;
 
-	if (!i915_gem_obj_bound_any(obj))
+	if (!drm_mm_node_allocated(&vma->node))
 		return;
 
-	entry = obj->exec_entry;
+	entry = vma->exec_entry;
 
 	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
 		i915_gem_object_unpin_fence(obj);
@@ -505,41 +544,46 @@
 
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-			    struct list_head *objects,
-			    struct i915_address_space *vm,
+			    struct list_head *vmas,
 			    bool *need_relocs)
 {
 	struct drm_i915_gem_object *obj;
-	struct list_head ordered_objects;
+	struct i915_vma *vma;
+	struct i915_address_space *vm;
+	struct list_head ordered_vmas;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
 	int retry;
 
-	INIT_LIST_HEAD(&ordered_objects);
-	while (!list_empty(objects)) {
+	if (list_empty(vmas))
+		return 0;
+
+	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+	INIT_LIST_HEAD(&ordered_vmas);
+	while (!list_empty(vmas)) {
 		struct drm_i915_gem_exec_object2 *entry;
 		bool need_fence, need_mappable;
 
-		obj = list_first_entry(objects,
-				       struct drm_i915_gem_object,
-				       exec_list);
-		entry = obj->exec_entry;
+		vma = list_first_entry(vmas, struct i915_vma, exec_list);
+		obj = vma->obj;
+		entry = vma->exec_entry;
 
 		need_fence =
 			has_fenced_gpu_access &&
 			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 			obj->tiling_mode != I915_TILING_NONE;
-		need_mappable = need_fence || need_reloc_mappable(obj);
+		need_mappable = need_fence || need_reloc_mappable(vma);
 
 		if (need_mappable)
-			list_move(&obj->exec_list, &ordered_objects);
+			list_move(&vma->exec_list, &ordered_vmas);
 		else
-			list_move_tail(&obj->exec_list, &ordered_objects);
+			list_move_tail(&vma->exec_list, &ordered_vmas);
 
 		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
 		obj->base.pending_write_domain = 0;
 		obj->pending_fenced_gpu_access = false;
 	}
-	list_splice(&ordered_objects, objects);
+	list_splice(&ordered_vmas, vmas);
 
 	/* Attempt to pin all of the buffers into the GTT.
 	 * This is done in 3 phases:
@@ -558,52 +602,52 @@
 		int ret = 0;
 
 		/* Unbind any ill-fitting objects or pin. */
-		list_for_each_entry(obj, objects, exec_list) {
-			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+		list_for_each_entry(vma, vmas, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 			bool need_fence, need_mappable;
-			u32 obj_offset;
 
-			if (!i915_gem_obj_bound(obj, vm))
+			obj = vma->obj;
+
+			if (!drm_mm_node_allocated(&vma->node))
 				continue;
 
-			obj_offset = i915_gem_obj_offset(obj, vm);
 			need_fence =
 				has_fenced_gpu_access &&
 				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 				obj->tiling_mode != I915_TILING_NONE;
-			need_mappable = need_fence || need_reloc_mappable(obj);
+			need_mappable = need_fence || need_reloc_mappable(vma);
 
 			WARN_ON((need_mappable || need_fence) &&
-				!i915_is_ggtt(vm));
+			       !i915_is_ggtt(vma->vm));
 
 			if ((entry->alignment &&
-			     obj_offset & (entry->alignment - 1)) ||
+			     vma->node.start & (entry->alignment - 1)) ||
 			    (need_mappable && !obj->map_and_fenceable))
-				ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+				ret = i915_vma_unbind(vma);
 			else
-				ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 			if (ret)
 				goto err;
 		}
 
 		/* Bind fresh objects */
-		list_for_each_entry(obj, objects, exec_list) {
-			if (i915_gem_obj_bound(obj, vm))
+		list_for_each_entry(vma, vmas, exec_list) {
+			if (drm_mm_node_allocated(&vma->node))
 				continue;
 
-			ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
 			if (ret)
 				goto err;
 		}
 
 err:		/* Decrement pin count for bound objects */
-		list_for_each_entry(obj, objects, exec_list)
-			i915_gem_execbuffer_unreserve_object(obj);
+		list_for_each_entry(vma, vmas, exec_list)
+			i915_gem_execbuffer_unreserve_vma(vma);
 
 		if (ret != -ENOSPC || retry++)
 			return ret;
 
-		ret = i915_gem_evict_everything(ring->dev);
+		ret = i915_gem_evict_vm(vm, true);
 		if (ret)
 			return ret;
 	} while (1);
@@ -614,24 +658,27 @@
 				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_file *file,
 				  struct intel_ring_buffer *ring,
-				  struct eb_objects *eb,
-				  struct drm_i915_gem_exec_object2 *exec,
-				  struct i915_address_space *vm)
+				  struct eb_vmas *eb,
+				  struct drm_i915_gem_exec_object2 *exec)
 {
 	struct drm_i915_gem_relocation_entry *reloc;
-	struct drm_i915_gem_object *obj;
+	struct i915_address_space *vm;
+	struct i915_vma *vma;
 	bool need_relocs;
 	int *reloc_offset;
 	int i, total, ret;
-	int count = args->buffer_count;
+	unsigned count = args->buffer_count;
+
+	if (WARN_ON(list_empty(&eb->vmas)))
+		return 0;
+
+	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
 	/* We may process another execbuffer during the unlock... */
-	while (!list_empty(&eb->objects)) {
-		obj = list_first_entry(&eb->objects,
-				       struct drm_i915_gem_object,
-				       exec_list);
-		list_del_init(&obj->exec_list);
-		drm_gem_object_unreference(&obj->base);
+	while (!list_empty(&eb->vmas)) {
+		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+		list_del_init(&vma->exec_list);
+		drm_gem_object_unreference(&vma->obj->base);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -695,20 +742,19 @@
 
 	/* reacquire the objects */
 	eb_reset(eb);
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 	if (ret)
 		goto err;
 
-	list_for_each_entry(obj, &eb->objects, exec_list) {
-		int offset = obj->exec_entry - exec;
-		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-							       reloc + reloc_offset[offset],
-							       vm);
+	list_for_each_entry(vma, &eb->vmas, exec_list) {
+		int offset = vma->exec_entry - exec;
+		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+							    reloc + reloc_offset[offset]);
 		if (ret)
 			goto err;
 	}
@@ -727,14 +773,15 @@
 
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
-				struct list_head *objects)
+				struct list_head *vmas)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
 	int ret;
 
-	list_for_each_entry(obj, objects, exec_list) {
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
 		ret = i915_gem_object_sync(obj, ring);
 		if (ret)
 			return ret;
@@ -771,8 +818,8 @@
 		   int count)
 {
 	int i;
-	int relocs_total = 0;
-	int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+	unsigned relocs_total = 0;
+	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
 	for (i = 0; i < count; i++) {
 		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +856,13 @@
 }
 
 static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
-				   struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct intel_ring_buffer *ring)
 {
-	struct drm_i915_gem_object *obj;
+	struct i915_vma *vma;
 
-	list_for_each_entry(obj, objects, exec_list) {
+	list_for_each_entry(vma, vmas, exec_list) {
+		struct drm_i915_gem_object *obj = vma->obj;
 		u32 old_read = obj->base.read_domains;
 		u32 old_write = obj->base.write_domain;
 
@@ -825,9 +872,7 @@
 		obj->base.read_domains = obj->base.pending_read_domains;
 		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-		/* FIXME: This lookup gets fixed later <-- danvet */
-		list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
-		i915_gem_object_move_to_active(obj, ring);
+		i915_vma_move_to_active(vma, ring);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +930,11 @@
 		       struct i915_address_space *vm)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct eb_objects *eb;
+	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_clip_rect *cliprects = NULL;
 	struct intel_ring_buffer *ring;
+	struct i915_ctx_hang_stats *hs;
 	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
 	u32 mask, flags;
@@ -1000,7 +1046,8 @@
 			return -EINVAL;
 		}
 
-		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+		cliprects = kcalloc(args->num_cliprects,
+				    sizeof(*cliprects),
 				    GFP_KERNEL);
 		if (cliprects == NULL) {
 			ret = -ENOMEM;
@@ -1025,7 +1072,7 @@
 		goto pre_mutex_err;
 	}
 
-	eb = eb_create(args);
+	eb = eb_create(args, vm);
 	if (eb == NULL) {
 		mutex_unlock(&dev->struct_mutex);
 		ret = -ENOMEM;
@@ -1033,18 +1080,16 @@
 	}
 
 	/* Look up object handles */
-	ret = eb_lookup_objects(eb, exec, args, file);
+	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
 		goto err;
 
 	/* take note of the batch buffer before we might reorder the lists */
-	batch_obj = list_entry(eb->objects.prev,
-			       struct drm_i915_gem_object,
-			       exec_list);
+	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1054,7 +1099,7 @@
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-								eb, exec, vm);
+								eb, exec);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		if (ret)
@@ -1076,10 +1121,21 @@
 	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
 		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
 	if (ret)
 		goto err;
 
+	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+	if (IS_ERR(hs)) {
+		ret = PTR_ERR(hs);
+		goto err;
+	}
+
+	if (hs->banned) {
+		ret = -EIO;
+		goto err;
+	}
+
 	ret = i915_switch_context(ring, file, ctx_id);
 	if (ret)
 		goto err;
@@ -1131,7 +1187,7 @@
 
 	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
-	i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
 	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1f7b4ca..c4c42e7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -342,7 +342,7 @@
 	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
 	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
 	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
 				  GFP_KERNEL);
 	if (!ppgtt->pt_pages)
 		return -ENOMEM;
@@ -353,7 +353,7 @@
 			goto err_pt_alloc;
 	}
 
-	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
 				     GFP_KERNEL);
 	if (!ppgtt->pt_dma_addr)
 		goto err_pt_alloc;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e15a1d9..d284d89 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -395,7 +395,7 @@
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
-	vma = i915_gem_vma_create(obj, ggtt);
+	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto err_out;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef..b139053 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@
 		return -EINVAL;
 	}
 
-	if (obj->pin_count) {
+	if (obj->pin_count || obj->framebuffer_references) {
 		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EBUSY;
 	}
@@ -393,7 +393,7 @@
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
 		if (obj->bit_17 == NULL) {
-			obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+			obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
 					      sizeof(long), GFP_KERNEL);
 		}
 	} else {
@@ -504,8 +504,8 @@
 	int i;
 
 	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
-					   sizeof(long), GFP_KERNEL);
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
 		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index dae364f..a8bb213 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -215,6 +215,24 @@
 	}
 }
 
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+	switch (a) {
+	case HANGCHECK_IDLE:
+		return "idle";
+	case HANGCHECK_WAIT:
+		return "wait";
+	case HANGCHECK_ACTIVE:
+		return "active";
+	case HANGCHECK_KICK:
+		return "kick";
+	case HANGCHECK_HUNG:
+		return "hung";
+	}
+
+	return "unknown";
+}
+
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
 				  struct drm_device *dev,
 				  struct drm_i915_error_state *error,
@@ -231,7 +249,8 @@
 	err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
 	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-
+	if (INTEL_INFO(dev)->gen >= 4)
+		err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
 	if (INTEL_INFO(dev)->gen >= 4)
 		err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
 	err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -255,6 +274,9 @@
 	err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
 	err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
 	err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+	err_printf(m, "  hangcheck: %s [%d]\n",
+		   hangcheck_action_to_str(error->hangcheck_action[ring]),
+		   error->hangcheck_score[ring]);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +305,14 @@
 	err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
 		   error->time.tv_usec);
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
-	err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
 	err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
 	err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
 	err_printf(m, "CCID: 0x%08x\n", error->ccid);
+	err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++)
 		err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
@@ -703,6 +726,7 @@
 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
 		if (ring->id == RCS)
 			error->bbaddr = I915_READ64(BB_ADDR);
+		error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
 	} else {
 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
 		error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -720,6 +744,9 @@
 
 	error->cpu_ring_head[ring->id] = ring->head;
 	error->cpu_ring_tail[ring->id] = ring->tail;
+
+	error->hangcheck_score[ring->id] = ring->hangcheck.score;
+	error->hangcheck_action[ring->id] = ring->hangcheck.action;
 }
 
 
@@ -769,7 +796,7 @@
 
 		error->ring[i].num_requests = count;
 		error->ring[i].requests =
-			kmalloc(count*sizeof(struct drm_i915_error_request),
+			kcalloc(count, sizeof(*error->ring[i].requests),
 				GFP_ATOMIC);
 		if (error->ring[i].requests == NULL) {
 			error->ring[i].num_requests = 0;
@@ -811,7 +838,7 @@
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
 	if (i) {
-		active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+		active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
 		if (active_bo)
 			pinned_bo = active_bo + error->active_bo_count[ndx];
 	}
@@ -885,8 +912,12 @@
 		return;
 	}
 
-	DRM_INFO("capturing error event; look for more information in "
-		 "/sys/class/drm/card%d/error\n", dev->primary->index);
+	DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+		 dev->primary->index);
+	DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+	DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+	DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+	DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 
 	kref_init(&error->ref);
 	error->eir = I915_READ(EIR);
@@ -988,6 +1019,7 @@
 	case I915_CACHE_NONE: return " uncached";
 	case I915_CACHE_LLC: return " snooped or LLC";
 	case I915_CACHE_L3_LLC: return " L3+LLC";
+	case I915_CACHE_WT: return " WT";
 	default: return "";
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4b91228..d26f652 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -30,6 +30,7 @@
 
 #include <linux/sysrq.h>
 #include <linux/slab.h>
+#include <linux/circ_buf.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -441,7 +442,7 @@
 
 
 void
-i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 {
 	u32 reg = PIPESTAT(pipe);
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -458,7 +459,7 @@
 }
 
 void
-i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
 {
 	u32 reg = PIPESTAT(pipe);
 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
@@ -486,9 +487,10 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
-	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
 	if (INTEL_INFO(dev)->gen >= 4)
-		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+		i915_enable_pipestat(dev_priv, PIPE_A,
+				     PIPE_LEGACY_BLC_EVENT_ENABLE);
 
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
@@ -518,6 +520,12 @@
 	}
 }
 
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	/* Gen2 doesn't have a hardware frame counter */
+	return 0;
+}
+
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
@@ -526,7 +534,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long high_frame;
 	unsigned long low_frame;
-	u32 high1, high2, low;
+	u32 high1, high2, low, pixel, vbl_start;
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +542,24 @@
 		return 0;
 	}
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		struct intel_crtc *intel_crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		const struct drm_display_mode *mode =
+			&intel_crtc->config.adjusted_mode;
+
+		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+	} else {
+		enum transcoder cpu_transcoder =
+			intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+		u32 htotal;
+
+		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+		vbl_start *= htotal;
+	}
+
 	high_frame = PIPEFRAME(pipe);
 	low_frame = PIPEFRAMEPIXEL(pipe);
 
@@ -544,13 +570,20 @@
 	 */
 	do {
 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+		low   = I915_READ(low_frame);
 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 	} while (high1 != high2);
 
 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
+	pixel = low & PIPE_PIXEL_MASK;
 	low >>= PIPE_FRAME_LOW_SHIFT;
-	return (high1 << 8) | low;
+
+	/*
+	 * The frame counter increments at beginning of active.
+	 * Cook up a vblank counter by also checking the pixel
+	 * counter against vblank start.
+	 */
+	return ((high1 << 8) | low) + (pixel >= vbl_start);
 }
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,67 +600,164 @@
 	return I915_READ(reg);
 }
 
-static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
-			     int *vpos, int *hpos)
+/* raw reads, only for fast reads of display block, no need for forcewake etc. */
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+
+static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
 {
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u32 vbl = 0, position = 0;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t status;
+	int reg;
+
+	if (IS_VALLEYVIEW(dev)) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = VLV_ISR;
+	} else if (IS_GEN2(dev)) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = ISR;
+	} else if (INTEL_INFO(dev)->gen < 5) {
+		status = pipe == PIPE_A ?
+			I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+			I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+		reg = ISR;
+	} else if (INTEL_INFO(dev)->gen < 7) {
+		status = pipe == PIPE_A ?
+			DE_PIPEA_VBLANK :
+			DE_PIPEB_VBLANK;
+
+		reg = DEISR;
+	} else {
+		switch (pipe) {
+		default:
+		case PIPE_A:
+			status = DE_PIPEA_VBLANK_IVB;
+			break;
+		case PIPE_B:
+			status = DE_PIPEB_VBLANK_IVB;
+			break;
+		case PIPE_C:
+			status = DE_PIPEC_VBLANK_IVB;
+			break;
+		}
+
+		reg = DEISR;
+	}
+
+	if (IS_GEN2(dev))
+		return __raw_i915_read16(dev_priv, reg) & status;
+	else
+		return __raw_i915_read32(dev_priv, reg) & status;
+}
+
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+	int position;
 	int vbl_start, vbl_end, htotal, vtotal;
 	bool in_vbl = true;
 	int ret = 0;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
+	unsigned long irqflags;
 
-	if (!i915_pipe_enabled(dev, pipe)) {
+	if (!intel_crtc->active) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 				 "pipe %c\n", pipe_name(pipe));
 		return 0;
 	}
 
-	/* Get vtotal. */
-	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+	htotal = mode->crtc_htotal;
+	vtotal = mode->crtc_vtotal;
+	vbl_start = mode->crtc_vblank_start;
+	vbl_end = mode->crtc_vblank_end;
 
-	if (INTEL_INFO(dev)->gen >= 4) {
+	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/*
+	 * Lock uncore.lock, as we will do multiple timing critical raw
+	 * register reads, potentially with preemption disabled, so the
+	 * following code must not block on uncore.lock.
+	 */
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		/* No obvious pixelcount register. Only query vertical
 		 * scanout position from Display scan line register.
 		 */
-		position = I915_READ(PIPEDSL(pipe));
+		if (IS_GEN2(dev))
+			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+		else
+			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
-		/* Decode into vertical scanout position. Don't have
-		 * horizontal scanout position.
+		/*
+		 * The scanline counter increments at the leading edge
+		 * of hsync, ie. it completely misses the active portion
+		 * of the line. Fix up the counter at both edges of vblank
+		 * to get a more accurate picture whether we're in vblank
+		 * or not.
 		 */
-		*vpos = position & 0x1fff;
-		*hpos = 0;
+		in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
+		if ((in_vbl && position == vbl_start - 1) ||
+		    (!in_vbl && position == vbl_end - 1))
+			position = (position + 1) % vtotal;
 	} else {
 		/* Have access to pixelcount since start of frame.
 		 * We can split this into vertical and horizontal
 		 * scanout position.
 		 */
-		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+		/* convert to pixel counts */
+		vbl_start *= htotal;
+		vbl_end *= htotal;
+		vtotal *= htotal;
+	}
+
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+	in_vbl = position >= vbl_start && position < vbl_end;
+
+	/*
+	 * While in vblank, position will be negative
+	 * counting up towards 0 at vbl_end. And outside
+	 * vblank, position will be positive counting
+	 * up since vbl_end.
+	 */
+	if (position >= vbl_start)
+		position -= vbl_end;
+	else
+		position += vtotal - vbl_end;
+
+	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		*vpos = position;
+		*hpos = 0;
+	} else {
 		*vpos = position / htotal;
 		*hpos = position - (*vpos * htotal);
 	}
 
-	/* Query vblank area. */
-	vbl = I915_READ(VBLANK(cpu_transcoder));
-
-	/* Test position against vblank region. */
-	vbl_start = vbl & 0x1fff;
-	vbl_end = (vbl >> 16) & 0x1fff;
-
-	if ((*vpos < vbl_start) || (*vpos > vbl_end))
-		in_vbl = false;
-
-	/* Inside "upper part" of vblank area? Apply corrective offset: */
-	if (in_vbl && (*vpos >= vbl_start))
-		*vpos = *vpos - vtotal;
-
-	/* Readouts valid? */
-	if (vbl > 0)
-		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
-
 	/* In vblank? */
 	if (in_vbl)
 		ret |= DRM_SCANOUTPOS_INVBL;
@@ -665,7 +795,8 @@
 						     crtc);
 }
 
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
 {
 	enum drm_connector_status old_status;
 
@@ -673,11 +804,16 @@
 	old_status = connector->status;
 
 	connector->status = connector->funcs->detect(connector, false);
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
 		      connector->base.id,
 		      drm_get_connector_name(connector),
-		      old_status, connector->status);
-	return (old_status != connector->status);
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
 }
 
 /*
@@ -801,7 +937,7 @@
 	if (ring->obj == NULL)
 		return;
 
-	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+	trace_i915_gem_request_complete(ring);
 
 	wake_up_all(&ring->irq_queue);
 	i915_queue_hangcheck(dev);
@@ -812,7 +948,7 @@
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    rps.work);
 	u32 pm_iir;
-	u8 new_delay;
+	int new_delay, adj;
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +965,49 @@
 
 	mutex_lock(&dev_priv->rps.hw_lock);
 
+	adj = dev_priv->rps.last_adj;
 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-		new_delay = dev_priv->rps.cur_delay + 1;
+		if (adj > 0)
+			adj *= 2;
+		else
+			adj = 1;
+		new_delay = dev_priv->rps.cur_delay + adj;
 
 		/*
 		 * For better performance, jump directly
 		 * to RPe if we're below it.
 		 */
-		if (IS_VALLEYVIEW(dev_priv->dev) &&
-		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+		if (new_delay < dev_priv->rps.rpe_delay)
 			new_delay = dev_priv->rps.rpe_delay;
-	} else
-		new_delay = dev_priv->rps.cur_delay - 1;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+		if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
+			new_delay = dev_priv->rps.rpe_delay;
+		else
+			new_delay = dev_priv->rps.min_delay;
+		adj = 0;
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+		if (adj < 0)
+			adj *= 2;
+		else
+			adj = -1;
+		new_delay = dev_priv->rps.cur_delay + adj;
+	} else { /* unknown event */
+		new_delay = dev_priv->rps.cur_delay;
+	}
 
 	/* sysfs frequency interfaces may have snuck in while servicing the
 	 * interrupt
 	 */
-	if (new_delay >= dev_priv->rps.min_delay &&
-	    new_delay <= dev_priv->rps.max_delay) {
-		if (IS_VALLEYVIEW(dev_priv->dev))
-			valleyview_set_rps(dev_priv->dev, new_delay);
-		else
-			gen6_set_rps(dev_priv->dev, new_delay);
-	}
+	if (new_delay < (int)dev_priv->rps.min_delay)
+		new_delay = dev_priv->rps.min_delay;
+	if (new_delay > (int)dev_priv->rps.max_delay)
+		new_delay = dev_priv->rps.max_delay;
+	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
 
-	if (IS_VALLEYVIEW(dev_priv->dev)) {
-		/*
-		 * On VLV, when we enter RC6 we may not be at the minimum
-		 * voltage level, so arm a timer to check.  It should only
-		 * fire when there's activity or once after we've entered
-		 * RC6, and then won't be re-armed until the next RPS interrupt.
-		 */
-		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
-				 msecs_to_jiffies(100));
-	}
+	if (IS_VALLEYVIEW(dev_priv->dev))
+		valleyview_set_rps(dev_priv->dev, new_delay);
+	else
+		gen6_set_rps(dev_priv->dev, new_delay);
 
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
@@ -882,9 +1027,10 @@
 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
 						    l3_parity.error_work);
 	u32 error_status, row, bank, subbank;
-	char *parity_event[5];
+	char *parity_event[6];
 	uint32_t misccpctl;
 	unsigned long flags;
+	uint8_t slice = 0;
 
 	/* We must turn off DOP level clock gating to access the L3 registers.
 	 * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1038,81 @@
 	 */
 	mutex_lock(&dev_priv->dev->struct_mutex);
 
+	/* If we've screwed up tracking, just let the interrupt fire again */
+	if (WARN_ON(!dev_priv->l3_parity.which_slice))
+		goto out;
+
 	misccpctl = I915_READ(GEN7_MISCCPCTL);
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
 	POSTING_READ(GEN7_MISCCPCTL);
 
-	error_status = I915_READ(GEN7_L3CDERRST1);
-	row = GEN7_PARITY_ERROR_ROW(error_status);
-	bank = GEN7_PARITY_ERROR_BANK(error_status);
-	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+		u32 reg;
 
-	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
-				    GEN7_L3CDERRST1_ENABLE);
-	POSTING_READ(GEN7_L3CDERRST1);
+		slice--;
+		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+			break;
+
+		dev_priv->l3_parity.which_slice &= ~(1<<slice);
+
+		reg = GEN7_L3CDERRST1 + (slice * 0x200);
+
+		error_status = I915_READ(reg);
+		row = GEN7_PARITY_ERROR_ROW(error_status);
+		bank = GEN7_PARITY_ERROR_BANK(error_status);
+		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+		POSTING_READ(reg);
+
+		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+		parity_event[5] = NULL;
+
+		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+				   KOBJ_CHANGE, parity_event);
+
+		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+			  slice, row, bank, subbank);
+
+		kfree(parity_event[4]);
+		kfree(parity_event[3]);
+		kfree(parity_event[2]);
+		kfree(parity_event[1]);
+	}
 
 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
+out:
+	WARN_ON(dev_priv->l3_parity.which_slice);
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
 	mutex_unlock(&dev_priv->dev->struct_mutex);
-
-	parity_event[0] = I915_L3_PARITY_UEVENT "=1";
-	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
-	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
-	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
-	parity_event[4] = NULL;
-
-	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
-			   KOBJ_CHANGE, parity_event);
-
-	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
-		  row, bank, subbank);
-
-	kfree(parity_event[3]);
-	kfree(parity_event[2]);
-	kfree(parity_event[1]);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-	if (!HAS_L3_GPU_CACHE(dev))
+	if (!HAS_L3_DPF(dev))
 		return;
 
 	spin_lock(&dev_priv->irq_lock);
-	ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
 	spin_unlock(&dev_priv->irq_lock);
 
+	iir &= GT_PARITY_ERROR(dev);
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+		dev_priv->l3_parity.which_slice |= 1 << 1;
+
+	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+		dev_priv->l3_parity.which_slice |= 1 << 0;
+
 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
@@ -975,8 +1147,8 @@
 		i915_handle_error(dev, false);
 	}
 
-	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-		ivybridge_parity_error_irq_handler(dev);
+	if (gt_iir & GT_PARITY_ERROR(dev))
+		ivybridge_parity_error_irq_handler(dev, gt_iir);
 }
 
 #define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1222,102 @@
 	wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+					 uint32_t crc0, uint32_t crc1,
+					 uint32_t crc2, uint32_t crc3,
+					 uint32_t crc4)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+	struct intel_pipe_crc_entry *entry;
+	int head, tail;
+
+	spin_lock(&pipe_crc->lock);
+
+	if (!pipe_crc->entries) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("spurious interrupt\n");
+		return;
+	}
+
+	head = pipe_crc->head;
+	tail = pipe_crc->tail;
+
+	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+		spin_unlock(&pipe_crc->lock);
+		DRM_ERROR("CRC buffer overflowing\n");
+		return;
+	}
+
+	entry = &pipe_crc->entries[head];
+
+	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+	entry->crc[0] = crc0;
+	entry->crc[1] = crc1;
+	entry->crc[2] = crc2;
+	entry->crc[3] = crc3;
+	entry->crc[4] = crc4;
+
+	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+	pipe_crc->head = head;
+
+	spin_unlock(&pipe_crc->lock);
+
+	wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+			     uint32_t crc0, uint32_t crc1,
+			     uint32_t crc2, uint32_t crc3,
+			     uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t res1, res2;
+
+	if (INTEL_INFO(dev)->gen >= 3)
+		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+	else
+		res1 = 0;
+
+	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+	else
+		res2 = 0;
+
+	display_pipe_crc_irq_handler(dev, pipe,
+				     I915_READ(PIPE_CRC_RES_RED(pipe)),
+				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+				     res1, res2);
+}
+
 /* The RPS events need forcewake, so we add them to a work queue and mask their
  * IMR bits until the work is done. Other interrupts can be processed without
  * the work queue. */
@@ -1117,13 +1385,16 @@
 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 		for_each_pipe(pipe) {
-			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
 				drm_handle_vblank(dev, pipe);
 
 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
 				intel_prepare_page_flip(dev, pipe);
 				intel_finish_page_flip(dev, pipe);
 			}
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 
 		/* Consume port.  Then clear IIR or we'll miss events */
@@ -1212,21 +1483,26 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 err_int = I915_READ(GEN7_ERR_INT);
+	enum pipe pipe;
 
 	if (err_int & ERR_INT_POISON)
 		DRM_ERROR("Poison interrupt\n");
 
-	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+	for_each_pipe(pipe) {
+		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+								  false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
+		}
 
-	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
-	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
-			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+			if (IS_IVYBRIDGE(dev))
+				ivb_pipe_crc_irq_handler(dev, pipe);
+			else
+				hsw_pipe_crc_irq_handler(dev, pipe);
+		}
+	}
 
 	I915_WRITE(GEN7_ERR_INT, err_int);
 }
@@ -1297,6 +1573,7 @@
 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
 
 	if (de_iir & DE_AUX_CHANNEL_A)
 		dp_aux_irq_handler(dev);
@@ -1304,31 +1581,26 @@
 	if (de_iir & DE_GSE)
 		intel_opregion_asle_intr(dev);
 
-	if (de_iir & DE_PIPEA_VBLANK)
-		drm_handle_vblank(dev, 0);
-
-	if (de_iir & DE_PIPEB_VBLANK)
-		drm_handle_vblank(dev, 1);
-
 	if (de_iir & DE_POISON)
 		DRM_ERROR("Poison interrupt\n");
 
-	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+	for_each_pipe(pipe) {
+		if (de_iir & DE_PIPE_VBLANK(pipe))
+			drm_handle_vblank(dev, pipe);
 
-	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
-		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
+			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+				DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+						 pipe_name(pipe));
 
-	if (de_iir & DE_PLANEA_FLIP_DONE) {
-		intel_prepare_page_flip(dev, 0);
-		intel_finish_page_flip_plane(dev, 0);
-	}
+		if (de_iir & DE_PIPE_CRC_DONE(pipe))
+			i9xx_pipe_crc_irq_handler(dev, pipe);
 
-	if (de_iir & DE_PLANEB_FLIP_DONE) {
-		intel_prepare_page_flip(dev, 1);
-		intel_finish_page_flip_plane(dev, 1);
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
+			intel_prepare_page_flip(dev, pipe);
+			intel_finish_page_flip_plane(dev, pipe);
+		}
 	}
 
 	/* check event from PCH */
@@ -1351,7 +1623,7 @@
 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	enum pipe i;
 
 	if (de_iir & DE_ERR_INT_IVB)
 		ivb_err_int_handler(dev);
@@ -1362,10 +1634,12 @@
 	if (de_iir & DE_GSE_IVB)
 		intel_opregion_asle_intr(dev);
 
-	for (i = 0; i < 3; i++) {
-		if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+	for_each_pipe(i) {
+		if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
 			drm_handle_vblank(dev, i);
-		if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+
+		/* plane/pipes map 1:1 on ilk+ */
+		if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
 			intel_prepare_page_flip(dev, i);
 			intel_finish_page_flip_plane(dev, i);
 		}
@@ -1388,7 +1662,6 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
 	irqreturn_t ret = IRQ_NONE;
-	bool err_int_reenable = false;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -1412,17 +1685,6 @@
 		POSTING_READ(SDEIER);
 	}
 
-	/* On Haswell, also mask ERR_INT because we don't want to risk
-	 * generating "unclaimed register" interrupts from inside the interrupt
-	 * handler. */
-	if (IS_HASWELL(dev)) {
-		spin_lock(&dev_priv->irq_lock);
-		err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-		if (err_int_reenable)
-			ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
 	gt_iir = I915_READ(GTIIR);
 	if (gt_iir) {
 		if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1714,6 @@
 		}
 	}
 
-	if (err_int_reenable) {
-		spin_lock(&dev_priv->irq_lock);
-		if (ivb_can_enable_err_int(dev))
-			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-		spin_unlock(&dev_priv->irq_lock);
-	}
-
 	I915_WRITE(DEIER, de_ier);
 	POSTING_READ(DEIER);
 	if (!HAS_PCH_NOP(dev)) {
@@ -1516,7 +1771,7 @@
 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
 	int ret;
 
-	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
 
 	/*
 	 * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1785,7 @@
 	 */
 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
 		DRM_DEBUG_DRIVER("resetting chip\n");
-		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
 				   reset_event);
 
 		/*
@@ -1557,7 +1812,7 @@
 			smp_mb__before_atomic_inc();
 			atomic_inc(&dev_priv->gpu_error.reset_counter);
 
-			kobject_uevent_env(&dev->primary->kdev.kobj,
+			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
 			atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1787,7 +2042,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK_ILK(pipe);
+						     DE_PIPE_VBLANK(pipe);
 
 	if (!i915_pipe_enabled(dev, pipe))
 		return -EINVAL;
@@ -1810,7 +2065,7 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	imr = I915_READ(VLV_IMR);
-	if (pipe == 0)
+	if (pipe == PIPE_A)
 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 	else
 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1845,7 +2100,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
-						     DE_PIPE_VBLANK_ILK(pipe);
+						     DE_PIPE_VBLANK(pipe);
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ironlake_disable_display_irq(dev_priv, bit);
@@ -1862,7 +2117,7 @@
 	i915_disable_pipestat(dev_priv, pipe,
 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
 	imr = I915_READ(VLV_IMR);
-	if (pipe == 0)
+	if (pipe == PIPE_A)
 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
 	else
 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
@@ -1965,6 +2220,7 @@
 	if (tmp & RING_WAIT) {
 		DRM_ERROR("Kicking stuck wait on %s\n",
 			  ring->name);
+		i915_handle_error(dev, false);
 		I915_WRITE_CTL(ring, tmp);
 		return HANGCHECK_KICK;
 	}
@@ -1976,6 +2232,7 @@
 		case 1:
 			DRM_ERROR("Kicking stuck semaphore on %s\n",
 				  ring->name);
+			i915_handle_error(dev, false);
 			I915_WRITE_CTL(ring, tmp);
 			return HANGCHECK_KICK;
 		case 0:
@@ -2021,12 +2278,21 @@
 
 		if (ring->hangcheck.seqno == seqno) {
 			if (ring_idle(ring, seqno)) {
+				ring->hangcheck.action = HANGCHECK_IDLE;
+
 				if (waitqueue_active(&ring->irq_queue)) {
 					/* Issue a wake-up to catch stuck h/w. */
-					DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-						  ring->name);
-					wake_up_all(&ring->irq_queue);
-					ring->hangcheck.score += HUNG;
+					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
+							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+								  ring->name);
+						else
+							DRM_INFO("Fake missed irq on %s\n",
+								 ring->name);
+						wake_up_all(&ring->irq_queue);
+					}
+					/* Safeguard against driver failure */
+					ring->hangcheck.score += BUSY;
 				} else
 					busy = false;
 			} else {
@@ -2049,6 +2315,7 @@
 								    acthd);
 
 				switch (ring->hangcheck.action) {
+				case HANGCHECK_IDLE:
 				case HANGCHECK_WAIT:
 					break;
 				case HANGCHECK_ACTIVE:
@@ -2064,6 +2331,8 @@
 				}
 			}
 		} else {
+			ring->hangcheck.action = HANGCHECK_ACTIVE;
+
 			/* Gradually reduce the count so that we catch DoS
 			 * attempts across multiple batches.
 			 */
@@ -2254,10 +2523,10 @@
 	pm_irqs = gt_irqs = 0;
 
 	dev_priv->gt_irq_mask = ~0;
-	if (HAS_L3_GPU_CACHE(dev)) {
+	if (HAS_L3_DPF(dev)) {
 		/* L3 parity interrupt is always unmasked. */
-		dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-		gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+		gt_irqs |= GT_PARITY_ERROR(dev);
 	}
 
 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2575,10 @@
 	} else {
 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-				DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-				DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+				DE_AUX_CHANNEL_A |
+				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+				DE_POISON);
 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
 	}
 
@@ -2341,7 +2612,8 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 enable_mask;
-	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
+		PIPE_CRC_DONE_ENABLE;
 	unsigned long irqflags;
 
 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2371,9 +2643,9 @@
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	 * just to make the assert_spin_locked check happy. */
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
-	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
-	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2464,6 +2736,7 @@
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
 
 	I915_WRITE16(EMR,
 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +2757,13 @@
 		     I915_USER_INTERRUPT);
 	POSTING_READ16(IER);
 
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 }
 
@@ -2570,13 +2850,14 @@
 		if (iir & I915_USER_INTERRUPT)
 			notify_ring(dev, &dev_priv->ring[RCS]);
 
-		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
-		    i8xx_handle_vblank(dev, 0, iir))
-			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    i8xx_handle_vblank(dev, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
-		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
-		    i8xx_handle_vblank(dev, 1, iir))
-			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
+		}
 
 		iir = new_iir;
 	}
@@ -2623,6 +2904,7 @@
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u32 enable_mask;
+	unsigned long irqflags;
 
 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
@@ -2658,6 +2940,13 @@
 
 	i915_enable_asle_pipestat(dev);
 
+	/* Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked check happy. */
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
 	return 0;
 }
 
@@ -2769,6 +3058,9 @@
 
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 
 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2867,7 +3159,9 @@
 	/* Interrupt setup is already guaranteed to be single-threaded, this is
 	 * just to make the assert_spin_locked check happy. */
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
+	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	/*
@@ -3013,6 +3307,9 @@
 
 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
 				blc_event = true;
+
+			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+				i9xx_pipe_crc_irq_handler(dev, pipe);
 		}
 
 
@@ -3122,18 +3419,21 @@
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
-	dev->driver->get_vblank_counter = i915_get_vblank_counter;
-	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	if (IS_GEN2(dev)) {
+		dev->max_vblank_count = 0;
+		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+	} else {
+		dev->driver->get_vblank_counter = i915_get_vblank_counter;
+		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	}
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
-	else
-		dev->driver->get_vblank_timestamp = NULL;
-	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+	}
 
 	if (IS_VALLEYVIEW(dev)) {
 		dev->driver->irq_handler = valleyview_irq_handler;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ef9b354..04896da 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@@ -264,6 +265,11 @@
 #define  MI_SEMAPHORE_SYNC_VVE	    (1<<16) /* VECS wait for VCS  (VEVSYNC) */
 #define  MI_SEMAPHORE_SYNC_RVE	    (2<<16) /* VECS wait for RCS  (VERSYNC) */
 #define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
+
+#define MI_PREDICATE_RESULT_2	(0x2214)
+#define  LOWER_SLICE_ENABLED	(1<<0)
+#define  LOWER_SLICE_DISABLED	(0<<0)
+
 /*
  * 3D instructions used by the kernel
  */
@@ -346,12 +352,25 @@
 #define   IOSF_PORT_PUNIT			0x4
 #define   IOSF_PORT_NC				0x11
 #define   IOSF_PORT_DPIO			0x12
+#define   IOSF_PORT_GPIO_NC			0x13
+#define   IOSF_PORT_CCK				0x14
+#define   IOSF_PORT_CCU				0xA9
+#define   IOSF_PORT_GPS_CORE			0x48
 #define VLV_IOSF_DATA				(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR				(VLV_DISPLAY_BASE + 0x2108)
 
 #define PUNIT_OPCODE_REG_READ			6
 #define PUNIT_OPCODE_REG_WRITE			7
 
+#define PUNIT_REG_PWRGT_CTRL			0x60
+#define PUNIT_REG_PWRGT_STATUS			0x61
+#define	  PUNIT_CLK_GATE			1
+#define	  PUNIT_PWR_RESET			2
+#define	  PUNIT_PWR_GATE			3
+#define	  RENDER_PWRGT				(PUNIT_PWR_GATE << 0)
+#define	  MEDIA_PWRGT				(PUNIT_PWR_GATE << 2)
+#define	  DISP2D_PWRGT				(PUNIT_PWR_GATE << 6)
+
 #define PUNIT_REG_GPU_LFM			0xd3
 #define PUNIT_REG_GPU_FREQ_REQ			0xd4
 #define PUNIT_REG_GPU_FREQ_STS			0xd8
@@ -372,6 +391,40 @@
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT		27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK		0xf8000000
 
+/* vlv2 north clock has */
+#define CCK_FUSE_REG				0x8
+#define  CCK_FUSE_HPLL_FREQ_MASK		0x3
+#define CCK_REG_DSI_PLL_FUSE			0x44
+#define CCK_REG_DSI_PLL_CONTROL			0x48
+#define  DSI_PLL_VCO_EN				(1 << 31)
+#define  DSI_PLL_LDO_GATE			(1 << 30)
+#define  DSI_PLL_P1_POST_DIV_SHIFT		17
+#define  DSI_PLL_P1_POST_DIV_MASK		(0x1ff << 17)
+#define  DSI_PLL_P2_MUX_DSI0_DIV2		(1 << 13)
+#define  DSI_PLL_P3_MUX_DSI1_DIV2		(1 << 12)
+#define  DSI_PLL_MUX_MASK			(3 << 9)
+#define  DSI_PLL_MUX_DSI0_DSIPLL		(0 << 10)
+#define  DSI_PLL_MUX_DSI0_CCK			(1 << 10)
+#define  DSI_PLL_MUX_DSI1_DSIPLL		(0 << 9)
+#define  DSI_PLL_MUX_DSI1_CCK			(1 << 9)
+#define  DSI_PLL_CLK_GATE_MASK			(0xf << 5)
+#define  DSI_PLL_CLK_GATE_DSI0_DSIPLL		(1 << 8)
+#define  DSI_PLL_CLK_GATE_DSI1_DSIPLL		(1 << 7)
+#define  DSI_PLL_CLK_GATE_DSI0_CCK		(1 << 6)
+#define  DSI_PLL_CLK_GATE_DSI1_CCK		(1 << 5)
+#define  DSI_PLL_LOCK				(1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER			0x4c
+#define  DSI_PLL_LFSR				(1 << 31)
+#define  DSI_PLL_FRACTION_EN			(1 << 30)
+#define  DSI_PLL_FRAC_COUNTER_SHIFT		27
+#define  DSI_PLL_FRAC_COUNTER_MASK		(7 << 27)
+#define  DSI_PLL_USYNC_CNT_SHIFT		18
+#define  DSI_PLL_USYNC_CNT_MASK			(0x1ff << 18)
+#define  DSI_PLL_N1_DIV_SHIFT			16
+#define  DSI_PLL_N1_DIV_MASK			(3 << 16)
+#define  DSI_PLL_M1_DIV_SHIFT			0
+#define  DSI_PLL_M1_DIV_MASK			(0x1ff << 0)
+
 /*
  * DPIO - a special bus for various display related registers to hide behind
  *
@@ -387,11 +440,11 @@
 #define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
 #define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
 #define  DPIO_SFR_BYPASS		(1<<1)
-#define  DPIO_RESET			(1<<0)
+#define  DPIO_CMNRST			(1<<0)
 
 #define _DPIO_TX3_SWING_CTL4_A		0x690
 #define _DPIO_TX3_SWING_CTL4_B		0x2a90
-#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX_SWING_CTL4_A, \
+#define DPIO_TX3_SWING_CTL4(pipe) _PIPE(pipe, _DPIO_TX3_SWING_CTL4_A, \
 					_DPIO_TX3_SWING_CTL4_B)
 
 /*
@@ -669,13 +722,18 @@
 #define NOPID		0x02094
 #define HWSTAM		0x02098
 #define DMA_FADD_I8XX	0x020d0
+#define RING_BBSTATE(base)	((base)+0x110)
 
 #define ERROR_GEN6	0x040a0
 #define GEN7_ERR_INT	0x44040
 #define   ERR_INT_POISON		(1<<31)
 #define   ERR_INT_MMIO_UNCLAIMED	(1<<13)
+#define   ERR_INT_PIPE_CRC_DONE_C	(1<<8)
 #define   ERR_INT_FIFO_UNDERRUN_C	(1<<6)
+#define   ERR_INT_PIPE_CRC_DONE_B	(1<<5)
 #define   ERR_INT_FIFO_UNDERRUN_B	(1<<3)
+#define   ERR_INT_PIPE_CRC_DONE_A	(1<<2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)	(1<<(2 + pipe*3))
 #define   ERR_INT_FIFO_UNDERRUN_A	(1<<0)
 #define   ERR_INT_FIFO_UNDERRUN(pipe)	(1<<(pipe*3))
 
@@ -890,6 +948,7 @@
 #define GT_BLT_USER_INTERRUPT			(1 << 22)
 #define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
 #define GT_BSD_USER_INTERRUPT			(1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1	(1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
 #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT	(1 <<  3)
@@ -900,6 +959,10 @@
 #define PM_VEBOX_CS_ERROR_INTERRUPT		(1 << 12) /* hsw+ */
 #define PM_VEBOX_USER_INTERRUPT			(1 << 10) /* hsw+ */
 
+#define GT_PARITY_ERROR(dev) \
+	(GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+	 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
 /* These are all the "old" interrupts */
 #define ILK_BSD_USER_INTERRUPT				(1<<5)
 #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
@@ -1048,9 +1111,6 @@
 					     _HSW_PIPE_SLICE_CHICKEN_1_A, + \
 					     _HSW_PIPE_SLICE_CHICKEN_1_B)
 
-#define HSW_CLKGATE_DISABLE_PART_1	0x46500
-#define   HSW_DPFC_GATING_DISABLE	(1<<23)
-
 /*
  * GPIO regs
  */
@@ -1387,6 +1447,12 @@
 
 #define MI_ARB_VLV		(VLV_DISPLAY_BASE + 0x6504)
 
+#define CZCLK_CDCLK_FREQ_RATIO	(VLV_DISPLAY_BASE + 0x6508)
+#define   CDCLK_FREQ_SHIFT	4
+#define   CDCLK_FREQ_MASK	(0x1f << CDCLK_FREQ_SHIFT)
+#define   CZCLK_FREQ_MASK	0xf
+#define GMBUSFREQ_VLV		(VLV_DISPLAY_BASE + 0x6510)
+
 /*
  * Palette regs
  */
@@ -1404,13 +1470,15 @@
  * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
  * every way.  It is not accessible from the CP register read instructions.
  *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
  */
 #define MCHBAR_MIRROR_BASE	0x10000
 
 #define MCHBAR_MIRROR_BASE_SNB	0x140000
 
 /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
-#define DCLK 0x5e04
+#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
 /** 915-945 and GM965 MCH register controlling DRAM channel access */
 #define DCC			0x10200
@@ -1705,9 +1773,9 @@
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 #define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
-#define GEN6_GT_PERF_STATUS	0x145948
-#define GEN6_RP_STATE_LIMITS	0x145994
-#define GEN6_RP_STATE_CAP	0x145998
+#define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+#define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 
 /*
  * Logical Context regs
@@ -1753,6 +1821,9 @@
  */
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
 
+#define VLV_CLK_CTL2			0x101104
+#define   CLK_CTL2_CZCOUNT_30NS_SHIFT	28
+
 /*
  * Overlay regs
  */
@@ -1771,6 +1842,83 @@
  * Display engine regs
  */
 
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A		(dev_priv->info->display_mmio_offset + 0x60050)
+#define   PIPE_CRC_ENABLE		(1 << 31)
+/* ivb+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_IVB	(0 << 29)
+#define   PIPE_CRC_SOURCE_SPRITE_IVB	(1 << 29)
+#define   PIPE_CRC_SOURCE_PF_IVB	(2 << 29)
+/* ilk+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_ILK	(0 << 28)
+#define   PIPE_CRC_SOURCE_SPRITE_ILK	(1 << 28)
+#define   PIPE_CRC_SOURCE_PIPE_ILK	(2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define   PIPE_CRC_SOURCE_PORT_A_ILK	(4 << 28)
+#define   PIPE_CRC_SOURCE_FDI_ILK	(5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define   PIPE_CRC_SOURCE_PIPE_VLV	(0 << 27)
+#define   PIPE_CRC_SOURCE_HDMIB_VLV	(1 << 27)
+#define   PIPE_CRC_SOURCE_HDMIC_VLV	(2 << 27)
+/* with DP port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_VLV	(3 << 27)
+#define   PIPE_CRC_SOURCE_DP_B_VLV	(6 << 27)
+#define   PIPE_CRC_SOURCE_DP_C_VLV	(7 << 27)
+/* gen3+ source selection */
+#define   PIPE_CRC_SOURCE_PIPE_I9XX	(0 << 28)
+#define   PIPE_CRC_SOURCE_SDVOB_I9XX	(1 << 28)
+#define   PIPE_CRC_SOURCE_SDVOC_I9XX	(2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_G4X	(3 << 28)
+#define   PIPE_CRC_SOURCE_TV_PRE	(4 << 28)
+#define   PIPE_CRC_SOURCE_TV_POST	(5 << 28)
+#define   PIPE_CRC_SOURCE_DP_B_G4X	(6 << 28)
+#define   PIPE_CRC_SOURCE_DP_C_G4X	(7 << 28)
+/* gen2 doesn't have source selection bits */
+#define   PIPE_CRC_INCLUDE_BORDER_I8XX	(1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB		0x60064
+#define _PIPE_CRC_RES_2_A_IVB		0x60068
+#define _PIPE_CRC_RES_3_A_IVB		0x6006c
+#define _PIPE_CRC_RES_4_A_IVB		0x60070
+#define _PIPE_CRC_RES_5_A_IVB		0x60074
+
+#define _PIPE_CRC_RES_RED_A		(dev_priv->info->display_mmio_offset + 0x60060)
+#define _PIPE_CRC_RES_GREEN_A		(dev_priv->info->display_mmio_offset + 0x60064)
+#define _PIPE_CRC_RES_BLUE_A		(dev_priv->info->display_mmio_offset + 0x60068)
+#define _PIPE_CRC_RES_RES1_A_I915	(dev_priv->info->display_mmio_offset + 0x6006c)
+#define _PIPE_CRC_RES_RES2_A_G4X	(dev_priv->info->display_mmio_offset + 0x60080)
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB		0x61064
+#define _PIPE_CRC_RES_2_B_IVB		0x61068
+#define _PIPE_CRC_RES_3_B_IVB		0x6106c
+#define _PIPE_CRC_RES_4_B_IVB		0x61070
+#define _PIPE_CRC_RES_5_B_IVB		0x61074
+
+#define PIPE_CRC_CTL(pipe)	_PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_RES_1_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe)	\
+	_PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+#define PIPE_CRC_RES_GREEN(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+#define PIPE_CRC_RES_BLUE(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+	_PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+
 /* Pipe A timing regs */
 #define _HTOTAL_A	(dev_priv->info->display_mmio_offset + 0x60000)
 #define _HBLANK_A	(dev_priv->info->display_mmio_offset + 0x60004)
@@ -1793,7 +1941,6 @@
 #define _BCLRPAT_B	(dev_priv->info->display_mmio_offset + 0x61020)
 #define _VSYNCSHIFT_B	(dev_priv->info->display_mmio_offset + 0x61028)
 
-
 #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
 #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
 #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
@@ -1804,7 +1951,8 @@
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
 /* HSW eDP PSR registers */
-#define EDP_PSR_CTL				0x64800
+#define EDP_PSR_BASE(dev)			0x64800
+#define EDP_PSR_CTL(dev)			(EDP_PSR_BASE(dev) + 0)
 #define   EDP_PSR_ENABLE			(1<<31)
 #define   EDP_PSR_LINK_DISABLE			(0<<27)
 #define   EDP_PSR_LINK_STANDBY			(1<<27)
@@ -1827,16 +1975,16 @@
 #define   EDP_PSR_TP1_TIME_0us			(3<<4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT		0
 
-#define EDP_PSR_AUX_CTL			0x64810
-#define EDP_PSR_AUX_DATA1		0x64814
+#define EDP_PSR_AUX_CTL(dev)			(EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev)			(EDP_PSR_BASE(dev) + 0x14)
 #define   EDP_PSR_DPCD_COMMAND		0x80060000
-#define EDP_PSR_AUX_DATA2		0x64818
+#define EDP_PSR_AUX_DATA2(dev)			(EDP_PSR_BASE(dev) + 0x18)
 #define   EDP_PSR_DPCD_NORMAL_OPERATION	(1<<24)
-#define EDP_PSR_AUX_DATA3		0x6481c
-#define EDP_PSR_AUX_DATA4		0x64820
-#define EDP_PSR_AUX_DATA5		0x64824
+#define EDP_PSR_AUX_DATA3(dev)			(EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev)			(EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev)			(EDP_PSR_BASE(dev) + 0x24)
 
-#define EDP_PSR_STATUS_CTL			0x64840
+#define EDP_PSR_STATUS_CTL(dev)			(EDP_PSR_BASE(dev) + 0x40)
 #define   EDP_PSR_STATUS_STATE_MASK		(7<<29)
 #define   EDP_PSR_STATUS_STATE_IDLE		(0<<29)
 #define   EDP_PSR_STATUS_STATE_SRDONACK		(1<<29)
@@ -1860,10 +2008,10 @@
 #define   EDP_PSR_STATUS_SENDING_TP1		(1<<4)
 #define   EDP_PSR_STATUS_IDLE_MASK		0xf
 
-#define EDP_PSR_PERF_CNT		0x64844
+#define EDP_PSR_PERF_CNT(dev)		(EDP_PSR_BASE(dev) + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK		0xffffff
 
-#define EDP_PSR_DEBUG_CTL		0x64860
+#define EDP_PSR_DEBUG_CTL(dev)		(EDP_PSR_BASE(dev) + 0x60)
 #define   EDP_PSR_DEBUG_MASK_LPSP	(1<<27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP	(1<<26)
 #define   EDP_PSR_DEBUG_MASK_HPD	(1<<25)
@@ -2006,6 +2154,14 @@
 #define PCH_HDMIC	0xe1150
 #define PCH_HDMID	0xe1160
 
+#define PORT_DFT_I9XX				0x61150
+#define   DC_BALANCE_RESET			(1 << 25)
+#define PORT_DFT2_G4X				0x61154
+#define   DC_BALANCE_RESET_VLV			(1 << 31)
+#define   PIPE_SCRAMBLE_RESET_MASK		(0x3 << 0)
+#define   PIPE_B_SCRAMBLE_RESET			(1 << 1)
+#define   PIPE_A_SCRAMBLE_RESET			(1 << 0)
+
 /* Gen 3 SDVO bits: */
 #define   SDVO_ENABLE				(1 << 31)
 #define   SDVO_PIPE_SEL(pipe)			((pipe) << 30)
@@ -2034,6 +2190,7 @@
 
 /* Gen 4 SDVO/HDMI bits: */
 #define   SDVO_COLOR_FORMAT_8bpc		(0 << 26)
+#define   SDVO_COLOR_FORMAT_MASK		(7 << 26)
 #define   SDVO_ENCODING_SDVO			(0 << 10)
 #define   SDVO_ENCODING_HDMI			(2 << 10)
 #define   HDMI_MODE_SELECT_HDMI			(1 << 9) /* HDMI only */
@@ -2238,6 +2395,21 @@
 
 #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 
+#define _VLV_BLC_PWM_CTL2_A (dev_priv->info->display_mmio_offset + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (dev_priv->info->display_mmio_offset + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+				     _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (dev_priv->info->display_mmio_offset + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (dev_priv->info->display_mmio_offset + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+				    _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (dev_priv->info->display_mmio_offset + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (dev_priv->info->display_mmio_offset + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+				     _VLV_BLC_HIST_CTL_B)
+
 /* Backlight control */
 #define BLC_PWM_CTL2	(dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
 #define   BLM_PWM_ENABLE		(1 << 31)
@@ -2986,6 +3158,7 @@
 #define   PIPECONF_DISABLE	0
 #define   PIPECONF_DOUBLE_WIDE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPECONF_DSI_PLL_LOCKED	(1<<29) /* vlv & pipe A only */
 #define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_SINGLE_WIDE	0
 #define   PIPECONF_PIPE_UNLOCKED 0
@@ -3184,11 +3357,11 @@
 
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK		0x45100
-#define  WM0_PIPE_PLANE_MASK	(0x7f<<16)
+#define  WM0_PIPE_PLANE_MASK	(0xffff<<16)
 #define  WM0_PIPE_PLANE_SHIFT	16
-#define  WM0_PIPE_SPRITE_MASK	(0x3f<<8)
+#define  WM0_PIPE_SPRITE_MASK	(0xff<<8)
 #define  WM0_PIPE_SPRITE_SHIFT	8
-#define  WM0_PIPE_CURSOR_MASK	(0x1f)
+#define  WM0_PIPE_CURSOR_MASK	(0xff)
 
 #define WM0_PIPEB_ILK		0x45104
 #define WM0_PIPEC_IVB		0x45200
@@ -3198,9 +3371,9 @@
 #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
 #define  WM1_LP_FBC_MASK	(0xf<<20)
 #define  WM1_LP_FBC_SHIFT	20
-#define  WM1_LP_SR_MASK		(0x1ff<<8)
+#define  WM1_LP_SR_MASK		(0x7ff<<8)
 #define  WM1_LP_SR_SHIFT	8
-#define  WM1_LP_CURSOR_MASK	(0x3f)
+#define  WM1_LP_CURSOR_MASK	(0xff)
 #define WM2_LP_ILK		0x4510c
 #define  WM2_LP_EN		(1<<31)
 #define WM3_LP_ILK		0x45110
@@ -3281,17 +3454,17 @@
  *  } while (high1 != high2);
  *  frame = (high1 << 8) | low1;
  */
-#define _PIPEAFRAMEHIGH          (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEAFRAMEHIGH          0x70040
 #define   PIPE_FRAME_HIGH_MASK    0x0000ffff
 #define   PIPE_FRAME_HIGH_SHIFT   0
-#define _PIPEAFRAMEPIXEL         (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEAFRAMEPIXEL         0x70044
 #define   PIPE_FRAME_LOW_MASK     0xff000000
 #define   PIPE_FRAME_LOW_SHIFT    24
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_SHIFT        0
 /* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45	0x70040
-#define _PIPEA_FLIPCOUNT_GM45	0x70044
+#define _PIPEA_FRMCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x70044)
 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
 /* Cursor A & B regs */
@@ -3422,10 +3595,10 @@
 #define _PIPEBDSL		(dev_priv->info->display_mmio_offset + 0x71000)
 #define _PIPEBCONF		(dev_priv->info->display_mmio_offset + 0x71008)
 #define _PIPEBSTAT		(dev_priv->info->display_mmio_offset + 0x71024)
-#define _PIPEBFRAMEHIGH		(dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEBFRAMEPIXEL	(dev_priv->info->display_mmio_offset + 0x71044)
-#define _PIPEB_FRMCOUNT_GM45	0x71040
-#define _PIPEB_FLIPCOUNT_GM45	0x71044
+#define _PIPEBFRAMEHIGH		0x71040
+#define _PIPEBFRAMEPIXEL	0x71044
+#define _PIPEB_FRMCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45	(dev_priv->info->display_mmio_offset + 0x71044)
 
 
 /* Display B control */
@@ -3780,6 +3953,7 @@
 #define DE_SPRITEA_FLIP_DONE    (1 << 28)
 #define DE_PLANEB_FLIP_DONE     (1 << 27)
 #define DE_PLANEA_FLIP_DONE     (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
 #define DE_PCU_EVENT            (1 << 25)
 #define DE_GTT_FAULT            (1 << 24)
 #define DE_POISON               (1 << 23)
@@ -3793,13 +3967,18 @@
 #define DE_PIPEB_ODD_FIELD      (1 << 13)
 #define DE_PIPEB_LINE_COMPARE   (1 << 12)
 #define DE_PIPEB_VSYNC          (1 << 11)
+#define DE_PIPEB_CRC_DONE	(1 << 10)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEA_VBLANK         (1 << 7)
+#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8*(pipe)))
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_VSYNC          (1 << 3)
+#define DE_PIPEA_CRC_DONE	(1 << 2)
+#define DE_PIPE_CRC_DONE(pipe)	(1 << (2 + 8*(pipe)))
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8*(pipe)))
 
 /* More Ivybridge lolz */
 #define DE_ERR_INT_IVB			(1<<30)
@@ -3815,9 +3994,8 @@
 #define DE_PIPEB_VBLANK_IVB		(1<<5)
 #define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
 #define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
+#define DE_PLANE_FLIP_DONE_IVB(plane)	(1<< (3 + 5*(plane)))
 #define DE_PIPEA_VBLANK_IVB		(1<<0)
-
-#define DE_PIPE_VBLANK_ILK(pipe)	(1 << ((pipe * 8) + 7))
 #define DE_PIPE_VBLANK_IVB(pipe)	(1 << (pipe * 5))
 
 #define VLV_MASTER_IER			0x4400c /* Gunit master IER */
@@ -4416,6 +4594,8 @@
 #define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
 #define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
 #define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
+#define  PANEL_PORT_SELECT_DPB_VLV	(1 << 30)
+#define  PANEL_PORT_SELECT_DPC_VLV	(2 << 30)
 #define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
 #define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
 
@@ -4447,7 +4627,6 @@
 #define  PANEL_PORT_SELECT_MASK	(3 << 30)
 #define  PANEL_PORT_SELECT_LVDS	(0 << 30)
 #define  PANEL_PORT_SELECT_DPA	(1 << 30)
-#define  EDP_PANEL		(1 << 30)
 #define  PANEL_PORT_SELECT_DPC	(2 << 30)
 #define  PANEL_PORT_SELECT_DPD	(3 << 30)
 #define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
@@ -4456,11 +4635,6 @@
 #define  PANEL_LIGHT_ON_DELAY_SHIFT	0
 
 #define PCH_PP_OFF_DELAYS	0xc720c
-#define  PANEL_POWER_PORT_SELECT_MASK	(0x3 << 30)
-#define  PANEL_POWER_PORT_LVDS		(0 << 30)
-#define  PANEL_POWER_PORT_DP_A		(1 << 30)
-#define  PANEL_POWER_PORT_DP_C		(2 << 30)
-#define  PANEL_POWER_PORT_DP_D		(3 << 30)
 #define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_SHIFT	16
 #define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
@@ -4638,7 +4812,7 @@
 #define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
 #define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
 #define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
-#define   GEN7_RP_DOWN_IDLE_AVG			(0x2<<0)
+#define   GEN6_RP_DOWN_IDLE_AVG			(0x2<<0)
 #define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
 #define GEN6_RP_UP_THRESHOLD			0xA02C
 #define GEN6_RP_DOWN_THRESHOLD			0xA030
@@ -4683,6 +4857,10 @@
 						 GEN6_PM_RP_DOWN_TIMEOUT)
 
 #define GEN6_GT_GFX_RC6_LOCKED			0x138104
+#define VLV_COUNTER_CONTROL			0x138104
+#define   VLV_COUNT_RANGE_HIGH			(1<<15)
+#define   VLV_MEDIA_RC6_COUNT_EN		(1<<1)
+#define   VLV_RENDER_RC6_COUNT_EN		(1<<0)
 #define GEN6_GT_GFX_RC6				0x138108
 #define GEN6_GT_GFX_RC6p			0x13810C
 #define GEN6_GT_GFX_RC6pp			0x138110
@@ -4694,6 +4872,8 @@
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
 #define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
+#define   GEN6_PCODE_READ_D_COMP		0x10
+#define   GEN6_PCODE_WRITE_D_COMP		0x11
 #define   GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define   GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
 #define GEN6_PCODE_DATA				0x138128
@@ -4713,6 +4893,7 @@
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
 #define   GEN7_PARITY_ERROR_VALID	(1<<13)
 #define   GEN7_L3CDERRST1_BANK_MASK	(3<<11)
@@ -4726,6 +4907,7 @@
 #define   GEN7_L3CDERRST1_ENABLE	(1<<7)
 
 #define GEN7_L3LOG_BASE			0xB070
+#define HSW_L3LOG_BASE_SLICE1		0xB270
 #define GEN7_L3LOG_SIZE			0x80
 
 #define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
@@ -4781,6 +4963,18 @@
 					CPT_AUD_CNTL_ST_B)
 #define CPT_AUD_CNTRL_ST2		0xE50C0
 
+#define VLV_HDMIW_HDMIEDID_A		(VLV_DISPLAY_BASE + 0x62050)
+#define VLV_HDMIW_HDMIEDID_B		(VLV_DISPLAY_BASE + 0x62150)
+#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					VLV_HDMIW_HDMIEDID_A, \
+					VLV_HDMIW_HDMIEDID_B)
+#define VLV_AUD_CNTL_ST_A		(VLV_DISPLAY_BASE + 0x620B4)
+#define VLV_AUD_CNTL_ST_B		(VLV_DISPLAY_BASE + 0x621B4)
+#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					VLV_AUD_CNTL_ST_A, \
+					VLV_AUD_CNTL_ST_B)
+#define VLV_AUD_CNTL_ST2		(VLV_DISPLAY_BASE + 0x620C0)
+
 /* These are the 4 32-bit write offset registers for each stream
  * output buffer.  It determines the offset from the
  * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
@@ -4797,6 +4991,12 @@
 #define CPT_AUD_CFG(pipe) _PIPE(pipe, \
 					CPT_AUD_CONFIG_A, \
 					CPT_AUD_CONFIG_B)
+#define VLV_AUD_CONFIG_A		(VLV_DISPLAY_BASE + 0x62000)
+#define VLV_AUD_CONFIG_B		(VLV_DISPLAY_BASE + 0x62100)
+#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
+					VLV_AUD_CONFIG_A, \
+					VLV_AUD_CONFIG_B)
+
 #define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
 #define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
 #define   AUD_CONFIG_UPPER_N_SHIFT		20
@@ -4804,7 +5004,17 @@
 #define   AUD_CONFIG_LOWER_N_SHIFT		4
 #define   AUD_CONFIG_LOWER_N_VALUE		(0xfff << 4)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
-#define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK	(0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175	(0 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25200	(1 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27000	(2 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27027	(3 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54000	(4 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54054	(5 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74176	(6 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250	(7 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352	(8 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500	(9 << 16)
 #define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
 
 /* HSW Audio */
@@ -5128,4 +5338,414 @@
 #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL			(VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe)		_PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define  DPI_ENABLE					(1 << 31) /* A + B */
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT		27
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 27)
+#define  DUAL_LINK_MODE_MASK				(1 << 26)
+#define  DUAL_LINK_MODE_FRONT_BACK			(0 << 26)
+#define  DUAL_LINK_MODE_PIXEL_ALTERNATIVE		(1 << 26)
+#define  DITHERING_ENABLE				(1 << 25) /* A + B */
+#define  FLOPPED_HSTX					(1 << 23)
+#define  DE_INVERT					(1 << 19) /* XXX */
+#define  MIPIA_FLISDSI_DELAY_COUNT_SHIFT		18
+#define  MIPIA_FLISDSI_DELAY_COUNT_MASK			(0xf << 18)
+#define  AFE_LATCHOUT					(1 << 17)
+#define  LP_OUTPUT_HOLD					(1 << 16)
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT		15
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK		(1 << 15)
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT		11
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_MASK		(0xf << 11)
+#define  CSB_SHIFT					9
+#define  CSB_MASK					(3 << 9)
+#define  CSB_20MHZ					(0 << 9)
+#define  CSB_10MHZ					(1 << 9)
+#define  CSB_40MHZ					(2 << 9)
+#define  BANDGAP_MASK					(1 << 8)
+#define  BANDGAP_PNW_CIRCUIT				(0 << 8)
+#define  BANDGAP_LNC_CIRCUIT				(1 << 8)
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT		5
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK		(7 << 5)
+#define  TEARING_EFFECT_DELAY				(1 << 4) /* A + B */
+#define  TEARING_EFFECT_SHIFT				2 /* A + B */
+#define  TEARING_EFFECT_MASK				(3 << 2)
+#define  TEARING_EFFECT_OFF				(0 << 2)
+#define  TEARING_EFFECT_DSI				(1 << 2)
+#define  TEARING_EFFECT_GPIO				(2 << 2)
+#define  LANE_CONFIGURATION_SHIFT			0
+#define  LANE_CONFIGURATION_MASK			(3 << 0)
+#define  LANE_CONFIGURATION_4LANE			(0 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_A			(1 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_B			(2 << 0)
+
+#define _MIPIA_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL			(VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe)		_PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define  TEARING_EFFECT_DELAY_SHIFT			0
+#define  TEARING_EFFECT_DELAY_MASK			(0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG				(VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY			(VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe)		_PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define  BUS_POSSESSION					(1 << 3) /* set to give bus to receiver */
+#define  ULPS_STATE_MASK				(3 << 1)
+#define  ULPS_STATE_ENTER				(2 << 1)
+#define  ULPS_STATE_EXIT				(1 << 1)
+#define  ULPS_STATE_NORMAL_OPERATION			(0 << 1)
+#define  DEVICE_READY					(1 << 0)
+
+#define _MIPIA_INTR_STAT			(VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT			(VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe)		_PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN				(VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN				(VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe)		_PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define  TEARING_EFFECT					(1 << 31)
+#define  SPL_PKT_SENT_INTERRUPT				(1 << 30)
+#define  GEN_READ_DATA_AVAIL				(1 << 29)
+#define  LP_GENERIC_WR_FIFO_FULL			(1 << 28)
+#define  HS_GENERIC_WR_FIFO_FULL			(1 << 27)
+#define  RX_PROT_VIOLATION				(1 << 26)
+#define  RX_INVALID_TX_LENGTH				(1 << 25)
+#define  ACK_WITH_NO_ERROR				(1 << 24)
+#define  TURN_AROUND_ACK_TIMEOUT			(1 << 23)
+#define  LP_RX_TIMEOUT					(1 << 22)
+#define  HS_TX_TIMEOUT					(1 << 21)
+#define  DPI_FIFO_UNDERRUN				(1 << 20)
+#define  LOW_CONTENTION					(1 << 19)
+#define  HIGH_CONTENTION				(1 << 18)
+#define  TXDSI_VC_ID_INVALID				(1 << 17)
+#define  TXDSI_DATA_TYPE_NOT_RECOGNISED			(1 << 16)
+#define  TXCHECKSUM_ERROR				(1 << 15)
+#define  TXECC_MULTIBIT_ERROR				(1 << 14)
+#define  TXECC_SINGLE_BIT_ERROR				(1 << 13)
+#define  TXFALSE_CONTROL_ERROR				(1 << 12)
+#define  RXDSI_VC_ID_INVALID				(1 << 11)
+#define  RXDSI_DATA_TYPE_NOT_REGOGNISED			(1 << 10)
+#define  RXCHECKSUM_ERROR				(1 << 9)
+#define  RXECC_MULTIBIT_ERROR				(1 << 8)
+#define  RXECC_SINGLE_BIT_ERROR				(1 << 7)
+#define  RXFALSE_CONTROL_ERROR				(1 << 6)
+#define  RXHS_RECEIVE_TIMEOUT_ERROR			(1 << 5)
+#define  RX_LP_TX_SYNC_ERROR				(1 << 4)
+#define  RXEXCAPE_MODE_ENTRY_ERROR			(1 << 3)
+#define  RXEOT_SYNC_ERROR				(1 << 2)
+#define  RXSOT_SYNC_ERROR				(1 << 1)
+#define  RXSOT_ERROR					(1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG			(VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe)		_PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define  CMD_MODE_DATA_WIDTH_MASK			(7 << 13)
+#define  CMD_MODE_NOT_SUPPORTED				(0 << 13)
+#define  CMD_MODE_DATA_WIDTH_16_BIT			(1 << 13)
+#define  CMD_MODE_DATA_WIDTH_9_BIT			(2 << 13)
+#define  CMD_MODE_DATA_WIDTH_8_BIT			(3 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION1			(4 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION2			(5 << 13)
+#define  VID_MODE_FORMAT_MASK				(0xf << 7)
+#define  VID_MODE_NOT_SUPPORTED				(0 << 7)
+#define  VID_MODE_FORMAT_RGB565				(1 << 7)
+#define  VID_MODE_FORMAT_RGB666				(2 << 7)
+#define  VID_MODE_FORMAT_RGB666_LOOSE			(3 << 7)
+#define  VID_MODE_FORMAT_RGB888				(4 << 7)
+#define  CMD_MODE_CHANNEL_NUMBER_SHIFT			5
+#define  CMD_MODE_CHANNEL_NUMBER_MASK			(3 << 5)
+#define  VID_MODE_CHANNEL_NUMBER_SHIFT			3
+#define  VID_MODE_CHANNEL_NUMBER_MASK			(3 << 3)
+#define  DATA_LANES_PRG_REG_SHIFT			0
+#define  DATA_LANES_PRG_REG_MASK			(7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT			(VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK		0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT		(VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe)	_PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define  TURN_AROUND_TIMEOUT_MASK			0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER		(VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe)	_PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define  DEVICE_RESET_TIMER_MASK			0xffff
+
+#define _MIPIA_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION			(VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe)	_PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define  VERTICAL_ADDRESS_SHIFT				16
+#define  VERTICAL_ADDRESS_MASK				(0xffff << 16)
+#define  HORIZONTAL_ADDRESS_SHIFT			0
+#define  HORIZONTAL_ADDRESS_MASK			0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE		(VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe)	_PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define  DBI_FIFO_EMPTY_HALF				(0 << 0)
+#define  DBI_FIFO_EMPTY_QUARTER				(1 << 0)
+#define  DBI_FIFO_EMPTY_7_LOCATIONS			(2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT			(VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT			(VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT		(VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe)	_PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT		(VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe)	_PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT			(VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT			(VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe)		_PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT		(VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe)	_PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL			(VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe)		_PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define  DPI_LP_MODE					(1 << 6)
+#define  BACKLIGHT_OFF					(1 << 5)
+#define  BACKLIGHT_ON					(1 << 4)
+#define  COLOR_MODE_OFF					(1 << 3)
+#define  COLOR_MODE_ON					(1 << 2)
+#define  TURN_ON					(1 << 1)
+#define  SHUTDOWN					(1 << 0)
+
+#define _MIPIA_DPI_DATA				(VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA				(VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe)		_PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define  COMMAND_BYTE_SHIFT				0
+#define  COMMAND_BYTE_MASK				(0x3f << 0)
+
+#define _MIPIA_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT			(VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe)		_PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define  MASTER_INIT_TIMER_SHIFT			0
+#define  MASTER_INIT_TIMER_MASK				(0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE		(VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe)	_PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define  MAX_RETURN_PKT_SIZE_SHIFT			0
+#define  MAX_RETURN_PKT_SIZE_MASK			(0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT		(VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe)	_PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define  RANDOM_DPI_DISPLAY_RESOLUTION			(1 << 4)
+#define  DISABLE_VIDEO_BTA				(1 << 3)
+#define  IP_TG_CONFIG					(1 << 2)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE		(1 << 0)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS		(2 << 0)
+#define  VIDEO_MODE_BURST				(3 << 0)
+
+#define _MIPIA_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE			(VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe)		_PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 7)
+#define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE		(1 << 6)
+#define  LOW_CONTENTION_RECOVERY_DISABLE		(1 << 5)
+#define  HIGH_CONTENTION_RECOVERY_DISABLE		(1 << 4)
+#define  TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define  TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE		(1 << 2)
+#define  CLOCKSTOP					(1 << 1)
+#define  EOT_DISABLE					(1 << 0)
+
+#define _MIPIA_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK			(VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe)		_PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define  LP_BYTECLK_SHIFT				0
+#define  LP_BYTECLK_MASK				(0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA			(VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA			(VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL			(VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe)		_PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define  LONG_PACKET_WORD_COUNT_SHIFT			8
+#define  LONG_PACKET_WORD_COUNT_MASK			(0xffff << 8)
+#define  SHORT_PACKET_PARAM_SHIFT			8
+#define  SHORT_PACKET_PARAM_MASK			(0xffff << 8)
+#define  VIRTUAL_CHANNEL_SHIFT				6
+#define  VIRTUAL_CHANNEL_MASK				(3 << 6)
+#define  DATA_TYPE_SHIFT				0
+#define  DATA_TYPE_MASK					(3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT			(VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe)	_PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define  DPI_FIFO_EMPTY					(1 << 28)
+#define  DBI_FIFO_EMPTY					(1 << 27)
+#define  LP_CTRL_FIFO_EMPTY				(1 << 26)
+#define  LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
+#define  LP_CTRL_FIFO_FULL				(1 << 24)
+#define  HS_CTRL_FIFO_EMPTY				(1 << 18)
+#define  HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
+#define  HS_CTRL_FIFO_FULL				(1 << 16)
+#define  LP_DATA_FIFO_EMPTY				(1 << 10)
+#define  LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
+#define  LP_DATA_FIFO_FULL				(1 << 8)
+#define  HS_DATA_FIFO_EMPTY				(1 << 2)
+#define  HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
+#define  HS_DATA_FIFO_FULL				(1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE			(VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe)	_PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define  DBI_HS_LP_MODE_MASK				(1 << 0)
+#define  DBI_LP_MODE					(1 << 0)
+#define  DBI_HS_MODE					(0 << 0)
+
+#define _MIPIA_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM			(VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe)		_PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define  EXIT_ZERO_COUNT_SHIFT				24
+#define  EXIT_ZERO_COUNT_MASK				(0x3f << 24)
+#define  TRAIL_COUNT_SHIFT				16
+#define  TRAIL_COUNT_MASK				(0x1f << 16)
+#define  CLK_ZERO_COUNT_SHIFT				8
+#define  CLK_ZERO_COUNT_MASK				(0xff << 8)
+#define  PREPARE_COUNT_SHIFT				0
+#define  PREPARE_COUNT_MASK				(0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL			(VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe)		_PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT		(VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe)	_PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define  LP_HS_SSW_CNT_SHIFT				16
+#define  LP_HS_SSW_CNT_MASK				(0xffff << 16)
+#define  HS_LP_PWR_SW_CNT_SHIFT				0
+#define  HS_LP_PWR_SW_CNT_MASK				(0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL			(VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe)	_PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define  STOP_STATE_STALL_COUNTER_SHIFT			0
+#define  STOP_STATE_STALL_COUNTER_MASK			(0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1			(VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1			(VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe)	_PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define  RX_CONTENTION_DETECTED				(1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL			(VLV_DISPLAY_BASE + 0xb100)
+#define  DBI_TYPEC_ENABLE				(1 << 31)
+#define  DBI_TYPEC_WIP					(1 << 30)
+#define  DBI_TYPEC_OPTION_SHIFT				28
+#define  DBI_TYPEC_OPTION_MASK				(3 << 28)
+#define  DBI_TYPEC_FREQ_SHIFT				24
+#define  DBI_TYPEC_FREQ_MASK				(0xf << 24)
+#define  DBI_TYPEC_OVERRIDE				(1 << 8)
+#define  DBI_TYPEC_OVERRIDE_COUNTER_SHIFT		0
+#define  DBI_TYPEC_OVERRIDE_COUNTER_MASK		(0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL				(VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL				(VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe)			_PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define  ESCAPE_CLOCK_DIVIDER_SHIFT			5 /* A only */
+#define  ESCAPE_CLOCK_DIVIDER_MASK			(3 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_1				(0 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_2				(1 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_4				(2 << 5)
+#define  READ_REQUEST_PRIORITY_SHIFT			3
+#define  READ_REQUEST_PRIORITY_MASK			(3 << 3)
+#define  READ_REQUEST_PRIORITY_LOW			(0 << 3)
+#define  READ_REQUEST_PRIORITY_HIGH			(3 << 3)
+#define  RGB_FLIP_TO_BGR				(1 << 2)
+
+#define _MIPIA_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS			(VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe)		_PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define  DATA_MEM_ADDRESS_SHIFT				5
+#define  DATA_MEM_ADDRESS_MASK				(0x7ffffff << 5)
+#define  DATA_VALID					(1 << 0)
+
+#define _MIPIA_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH			(VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe)		_PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define  DATA_LENGTH_SHIFT				0
+#define  DATA_LENGTH_MASK				(0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS			(VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe)	_PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define  COMMAND_MEM_ADDRESS_SHIFT			5
+#define  COMMAND_MEM_ADDRESS_MASK			(0x7ffffff << 5)
+#define  AUTO_PWG_ENABLE				(1 << 2)
+#define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING		(1 << 1)
+#define  COMMAND_VALID					(1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH			(VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe)	_PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define  COMMAND_LENGTH_SHIFT(n)			(8 * (n)) /* n: 0...3 */
+#define  COMMAND_LENGTH_MASK(n)				(0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0		(VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+	(_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID			(VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe)	_PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define  READ_DATA_VALID(n)				(1 << (n))
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 70db618..98790c7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -214,6 +214,22 @@
 		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
 		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
 			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+
+		dev_priv->regfile.saveBLC_PWM_CTL =
+			I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
+		dev_priv->regfile.saveBLC_HIST_CTL =
+			I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
+		dev_priv->regfile.saveBLC_PWM_CTL2 =
+			I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
+		dev_priv->regfile.saveBLC_PWM_CTL_B =
+			I915_READ(VLV_BLC_PWM_CTL(PIPE_B));
+		dev_priv->regfile.saveBLC_HIST_CTL_B =
+			I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
+		dev_priv->regfile.saveBLC_PWM_CTL2_B =
+			I915_READ(VLV_BLC_PWM_CTL2(PIPE_B));
 	} else {
 		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
 		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
@@ -302,6 +318,19 @@
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
 		I915_WRITE(RSTDBYCTL,
 			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+	} else if (IS_VALLEYVIEW(dev)) {
+		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_A),
+			   dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
+			   dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_A),
+			   dev_priv->regfile.saveBLC_PWM_CTL2);
+		I915_WRITE(VLV_BLC_PWM_CTL(PIPE_B),
+			   dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
+			   dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(VLV_BLC_PWM_CTL2(PIPE_B),
+			   dev_priv->regfile.saveBLC_PWM_CTL2);
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
 		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
@@ -340,7 +369,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+	if (INTEL_INFO(dev)->gen <= 4)
+		pci_read_config_byte(dev->pdev, LBB,
+				     &dev_priv->regfile.saveLBB);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -367,7 +398,8 @@
 	intel_disable_gt_powersave(dev);
 
 	/* Cache mode state */
-	dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+	if (INTEL_INFO(dev)->gen < 7)
+		dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
 	/* Memory Arbitration state */
 	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +422,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
-	pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+	if (INTEL_INFO(dev)->gen <= 4)
+		pci_write_config_byte(dev->pdev, LBB,
+				      dev_priv->regfile.saveLBB);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -414,7 +448,9 @@
 	}
 
 	/* Cache mode state */
-	I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+	if (INTEL_INFO(dev)->gen < 7)
+		I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+			   0xffff0000);
 
 	/* Memory arbitration state */
 	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c8c4112..cef38fd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,30 +32,50 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
 #ifdef CONFIG_PM
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u64 raw_time; /* 32b value may overflow during fixed point math */
+	u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
 
 	if (!intel_enable_rc6(dev))
 		return 0;
 
-	raw_time = I915_READ(reg) * 128ULL;
-	return DIV_ROUND_UP_ULL(raw_time, 100000);
+	/* On VLV, residency time is in CZ units rather than 1.28us */
+	if (IS_VALLEYVIEW(dev)) {
+		u32 clkctl2;
+
+		clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+			CLK_CTL2_CZCOUNT_30NS_SHIFT;
+		if (!clkctl2) {
+			WARN(!clkctl2, "bogus CZ count value");
+			return 0;
+		}
+		units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+			units <<= 8;
+
+		div = 1000000ULL * bias;
+	}
+
+	raw_time = I915_READ(reg) * units;
+	return DIV_ROUND_UP_ULL(raw_time, div);
 }
 
 static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
 }
 
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_get_drvdata(kdev);
 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
@@ -63,16 +83,20 @@
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	if (IS_VALLEYVIEW(dminor->dev))
+		rc6p_residency = 0;
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(kdev);
 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	if (IS_VALLEYVIEW(dminor->dev))
+		rc6pp_residency = 0;
 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
@@ -97,7 +121,7 @@
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
-	if (!HAS_L3_GPU_CACHE(dev))
+	if (!HAS_L3_DPF(dev))
 		return -EPERM;
 
 	if (offset % 4 != 0)
@@ -115,31 +139,34 @@
 	     loff_t offset, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
-	uint32_t misccpctl;
-	int i, ret;
+	int slice = (int)(uintptr_t)attr->private;
+	int ret;
+
+	count = round_down(count, 4);
 
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 		return ret;
 
+	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 		return ret;
 
-	misccpctl = I915_READ(GEN7_MISCCPCTL);
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
-	for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
-		*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
-
-	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+	if (dev_priv->l3_parity.remap_info[slice])
+		memcpy(buf,
+		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
+		       count);
+	else
+		memset(buf, 0, count);
 
 	mutex_unlock(&drm_dev->struct_mutex);
 
-	return i - offset;
+	return count;
 }
 
 static ssize_t
@@ -148,21 +175,26 @@
 	      loff_t offset, size_t count)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_minor *dminor = dev_to_drm_minor(dev);
 	struct drm_device *drm_dev = dminor->dev;
 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	struct i915_hw_context *ctx;
 	u32 *temp = NULL; /* Just here to make handling failures easy */
+	int slice = (int)(uintptr_t)attr->private;
 	int ret;
 
 	ret = l3_access_valid(drm_dev, offset);
 	if (ret)
 		return ret;
 
+	if (dev_priv->hw_contexts_disabled)
+		return -ENXIO;
+
 	ret = i915_mutex_lock_interruptible(drm_dev);
 	if (ret)
 		return ret;
 
-	if (!dev_priv->l3_parity.remap_info) {
+	if (!dev_priv->l3_parity.remap_info[slice]) {
 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 		if (!temp) {
 			mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@
 	 * at this point it is left as a TODO.
 	*/
 	if (temp)
-		dev_priv->l3_parity.remap_info = temp;
+		dev_priv->l3_parity.remap_info[slice] = temp;
 
-	memcpy(dev_priv->l3_parity.remap_info + (offset/4),
-	       buf + (offset/4),
-	       count);
+	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
 
-	i915_gem_l3_remap(drm_dev);
+	/* NB: We defer the remapping until we switch to the context */
+	list_for_each_entry(ctx, &dev_priv->context_list, link)
+		ctx->remap_slice |= (1<<slice);
 
 	mutex_unlock(&drm_dev->struct_mutex);
 
@@ -200,17 +232,29 @@
 	.size = GEN7_L3LOG_SIZE,
 	.read = i915_l3_read,
 	.write = i915_l3_write,
-	.mmap = NULL
+	.mmap = NULL,
+	.private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+	.size = GEN7_L3LOG_SIZE,
+	.read = i915_l3_read,
+	.write = i915_l3_write,
+	.mmap = NULL,
+	.private = (void *)1
 };
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
 		u32 freq;
@@ -227,7 +271,7 @@
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 				     struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -238,11 +282,13 @@
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@
 	if (ret)
 		return ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 	if (IS_VALLEYVIEW(dev_priv->dev))
 		ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@
 	if (ret)
 		return ret;
 
+	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
 	mutex_lock(&dev_priv->rps.hw_lock);
 
 	if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val, rp_state_cap;
@@ -436,7 +488,7 @@
 {
 
 	struct device *kdev = container_of(kobj, struct device, kobj);
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	struct i915_error_state_file_priv error_priv;
 	struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@
 				 loff_t off, size_t count)
 {
 	struct device *kdev = container_of(kobj, struct device, kobj);
-	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_minor *minor = dev_to_drm_minor(kdev);
 	struct drm_device *dev = minor->dev;
 	int ret;
 
@@ -501,27 +553,34 @@
 
 #ifdef CONFIG_PM
 	if (INTEL_INFO(dev)->gen >= 6) {
-		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
 					&rc6_attr_group);
 		if (ret)
 			DRM_ERROR("RC6 residency sysfs setup failed\n");
 	}
 #endif
-	if (HAS_L3_GPU_CACHE(dev)) {
-		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+	if (HAS_L3_DPF(dev)) {
+		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
 		if (ret)
 			DRM_ERROR("l3 parity sysfs setup failed\n");
+
+		if (NUM_L3_SLICES(dev) > 1) {
+			ret = device_create_bin_file(dev->primary->kdev,
+						     &dpf_attrs_1);
+			if (ret)
+				DRM_ERROR("l3 parity slice 1 setup failed\n");
+		}
 	}
 
 	ret = 0;
 	if (IS_VALLEYVIEW(dev))
-		ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
 	else if (INTEL_INFO(dev)->gen >= 6)
-		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
 	if (ret)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
-	ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
 				    &error_state_attr);
 	if (ret)
 		DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
-	sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
+	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
 	if (IS_VALLEYVIEW(dev))
-		sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
 	else
-		sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
-	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
+	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
 #endif
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6..6e580c9 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@
 	    TP_printk("dev=%d", __entry->dev)
 );
 
+TRACE_EVENT(i915_gem_evict_vm,
+	    TP_PROTO(struct i915_address_space *vm),
+	    TP_ARGS(vm),
+
+	    TP_STRUCT__entry(
+			     __field(struct i915_address_space *, vm)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->vm = vm;
+			  ),
+
+	    TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+	    TP_PROTO(struct intel_ring_buffer *from,
+		     struct intel_ring_buffer *to,
+		     u32 seqno),
+	    TP_ARGS(from, to, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, sync_from)
+			     __field(u32, sync_to)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = from->dev->primary->index;
+			   __entry->sync_from = from->id;
+			   __entry->sync_to = to->id;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+		      __entry->dev,
+		      __entry->sync_from, __entry->sync_to,
+		      __entry->seqno)
+);
+
 TRACE_EVENT(i915_gem_ring_dispatch,
 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
 	    TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@
 	    TP_ARGS(ring, seqno)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-	    TP_ARGS(ring, seqno)
+TRACE_EVENT(i915_gem_request_complete,
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = ring->get_seqno(ring, false);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 53f2bed..6dd622d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@
 {
 	struct sdvo_device_mapping *p_mapping;
 	struct bdb_general_definitions *p_defs;
-	struct child_device_config *p_child;
+	union child_device_config *p_child;
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -416,36 +416,36 @@
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
-		if (!p_child->device_type) {
+		if (!p_child->old.device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
-		if (p_child->slave_addr != SLAVE_ADDR1 &&
-			p_child->slave_addr != SLAVE_ADDR2) {
+		if (p_child->old.slave_addr != SLAVE_ADDR1 &&
+			p_child->old.slave_addr != SLAVE_ADDR2) {
 			/*
 			 * If the slave address is neither 0x70 nor 0x72,
 			 * it is not a SDVO device. Skip it.
 			 */
 			continue;
 		}
-		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
-			p_child->dvo_port != DEVICE_PORT_DVOC) {
+		if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
+			p_child->old.dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
 			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
 			continue;
 		}
 		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
 				" %s port\n",
-				p_child->slave_addr,
-				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
+				p_child->old.slave_addr,
+				(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
 					"SDVOB" : "SDVOC");
-		p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+		p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
 		if (!p_mapping->initialized) {
-			p_mapping->dvo_port = p_child->dvo_port;
-			p_mapping->slave_addr = p_child->slave_addr;
-			p_mapping->dvo_wiring = p_child->dvo_wiring;
-			p_mapping->ddc_pin = p_child->ddc_pin;
-			p_mapping->i2c_pin = p_child->i2c_pin;
+			p_mapping->dvo_port = p_child->old.dvo_port;
+			p_mapping->slave_addr = p_child->old.slave_addr;
+			p_mapping->dvo_wiring = p_child->old.dvo_wiring;
+			p_mapping->ddc_pin = p_child->old.ddc_pin;
+			p_mapping->i2c_pin = p_child->old.i2c_pin;
 			p_mapping->initialized = 1;
 			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
 				      p_mapping->dvo_port,
@@ -457,7 +457,7 @@
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
-		if (p_child->slave2_addr) {
+		if (p_child->old.slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
 			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@
 parse_driver_features(struct drm_i915_private *dev_priv,
 		       struct bdb_header *bdb)
 {
-	struct drm_device *dev = dev_priv->dev;
 	struct bdb_driver_features *driver;
 
 	driver = find_section(bdb, BDB_DRIVER_FEATURES);
 	if (!driver)
 		return;
 
-	if (SUPPORTS_EDP(dev) &&
-	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
 		dev_priv->vbt.edp_support = 1;
 
 	if (driver->dual_frequency)
@@ -501,7 +499,7 @@
 
 	edp = find_section(bdb, BDB_EDP);
 	if (!edp) {
-		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
+		if (dev_priv->vbt.edp_support)
 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
 		return;
 	}
@@ -569,11 +567,149 @@
 }
 
 static void
+parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_mipi *mipi;
+
+	mipi = find_section(bdb, BDB_MIPI);
+	if (!mipi) {
+		DRM_DEBUG_KMS("No MIPI BDB found");
+		return;
+	}
+
+	/* XXX: add more info */
+	dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+			   struct bdb_header *bdb)
+{
+	union child_device_config *it, *child = NULL;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+	uint8_t hdmi_level_shift;
+	int i, j;
+	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+	uint8_t aux_channel;
+	/* Each DDI port can have more than one value on the "DVO Port" field,
+	 * so look for all the possible values for each port and abort if more
+	 * than one is found. */
+	int dvo_ports[][2] = {
+		{DVO_PORT_HDMIA, DVO_PORT_DPA},
+		{DVO_PORT_HDMIB, DVO_PORT_DPB},
+		{DVO_PORT_HDMIC, DVO_PORT_DPC},
+		{DVO_PORT_HDMID, DVO_PORT_DPD},
+		{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+	};
+
+	/* Find the child device to use, abort if more than one found. */
+	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+		it = dev_priv->vbt.child_dev + i;
+
+		for (j = 0; j < 2; j++) {
+			if (dvo_ports[port][j] == -1)
+				break;
+
+			if (it->common.dvo_port == dvo_ports[port][j]) {
+				if (child) {
+					DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+						      port_name(port));
+					return;
+				}
+				child = it;
+			}
+		}
+	}
+	if (!child)
+		return;
+
+	aux_channel = child->raw[25];
+
+	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+	is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+	is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+	is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+	is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+
+	info->supports_dvi = is_dvi;
+	info->supports_hdmi = is_hdmi;
+	info->supports_dp = is_dp;
+
+	DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+		      port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+	if (is_edp && is_dvi)
+		DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+			      port_name(port));
+	if (is_crt && port != PORT_E)
+		DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+	if (is_crt && (is_dvi || is_dp))
+		DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+			      port_name(port));
+	if (is_dvi && (port == PORT_A || port == PORT_E))
+		DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+	if (!is_dvi && !is_dp && !is_crt)
+		DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+			      port_name(port));
+	if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+	if (is_dvi) {
+		if (child->common.ddc_pin == 0x05 && port != PORT_B)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+		if (child->common.ddc_pin == 0x04 && port != PORT_C)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+		if (child->common.ddc_pin == 0x06 && port != PORT_D)
+			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+	}
+
+	if (is_dp) {
+		if (aux_channel == 0x40 && port != PORT_A)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+		if (aux_channel == 0x10 && port != PORT_B)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+		if (aux_channel == 0x20 && port != PORT_C)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+		if (aux_channel == 0x30 && port != PORT_D)
+			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+	}
+
+	if (bdb->version >= 158) {
+		/* The VBT HDMI level shift values match the table we have. */
+		hdmi_level_shift = child->raw[7] & 0xF;
+		if (hdmi_level_shift < 0xC) {
+			DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+				      port_name(port),
+				      hdmi_level_shift);
+			info->hdmi_level_shift = hdmi_level_shift;
+		}
+	}
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+			    struct bdb_header *bdb)
+{
+	struct drm_device *dev = dev_priv->dev;
+	enum port port;
+
+	if (!HAS_DDI(dev))
+		return;
+
+	if (!dev_priv->vbt.child_dev_num)
+		return;
+
+	if (bdb->version < 155)
+		return;
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++)
+		parse_ddi_port(dev_priv, port, bdb);
+}
+
+static void
 parse_device_mapping(struct drm_i915_private *dev_priv,
 		       struct bdb_header *bdb)
 {
 	struct bdb_general_definitions *p_defs;
-	struct child_device_config *p_child, *child_dev_ptr;
+	union child_device_config *p_child, *child_dev_ptr;
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -601,7 +737,7 @@
 	/* get the number of child device that is present */
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
-		if (!p_child->device_type) {
+		if (!p_child->common.device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
@@ -621,7 +757,7 @@
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
 		p_child = &(p_defs->devices[i]);
-		if (!p_child->device_type) {
+		if (!p_child->common.device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
@@ -637,6 +773,7 @@
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
+	enum port port;
 
 	dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
@@ -655,6 +792,18 @@
 	dev_priv->vbt.lvds_use_ssc = 1;
 	dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
 	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+		struct ddi_vbt_port_info *info =
+			&dev_priv->vbt.ddi_port_info[port];
+
+		/* Recommended BSpec default: 800mV 0dB. */
+		info->hdmi_level_shift = 6;
+
+		info->supports_dvi = (port != PORT_A && port != PORT_E);
+		info->supports_hdmi = info->supports_dvi;
+		info->supports_dp = (port != PORT_E);
+	}
 }
 
 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@
 	parse_device_mapping(dev_priv, bdb);
 	parse_driver_features(dev_priv, bdb);
 	parse_edp(dev_priv, bdb);
+	parse_mipi(dev_priv, bdb);
+	parse_ddi_ports(dev_priv, bdb);
 
 	if (bios)
 		pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index e088d6f..f580a2b 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -104,6 +104,7 @@
 #define BDB_LVDS_LFP_DATA	 42
 #define BDB_LVDS_BACKLIGHT	 43
 #define BDB_LVDS_POWER		 44
+#define BDB_MIPI		 50
 #define BDB_SKIP		254 /* VBIOS private block, ignore */
 
 struct bdb_general_features {
@@ -201,7 +202,10 @@
 #define DEVICE_PORT_DVOB	0x01
 #define DEVICE_PORT_DVOC	0x02
 
-struct child_device_config {
+/* We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. */
+struct old_child_dev_config {
 	u16 handle;
 	u16 device_type;
 	u8  device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@
 	u8  dvo_function;
 } __attribute__((packed));
 
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+struct common_child_dev_config {
+	u16 handle;
+	u16 device_type;
+	u8 not_common1[12];
+	u8 dvo_port;
+	u8 not_common2[2];
+	u8 ddc_pin;
+	u16 edid_ptr;
+} __attribute__((packed));
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+	/* This one is safe to be used anywhere, but the code should still check
+	 * the BDB version. */
+	u8 raw[33];
+	/* This one should only be kept for legacy code. */
+	struct old_child_dev_config old;
+	/* This one should also be safe to use anywhere, even without version
+	 * checks. */
+	struct common_child_dev_config common;
+};
+
 struct bdb_general_definitions {
 	/* DDC GPIO */
 	u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@
 	 * number = (block_size - sizeof(bdb_general_definitions))/
 	 *	     sizeof(child_device_config);
 	 */
-	struct child_device_config devices[0];
+	union child_device_config devices[0];
 } __attribute__((packed));
 
 struct bdb_lvds_options {
@@ -608,6 +638,40 @@
 #define	 DEVICE_TYPE_DP		0x68C6
 #define	 DEVICE_TYPE_eDP	0x78C6
 
+#define  DEVICE_TYPE_CLASS_EXTENSION	(1 << 15)
+#define  DEVICE_TYPE_POWER_MANAGEMENT	(1 << 14)
+#define  DEVICE_TYPE_HOTPLUG_SIGNALING	(1 << 13)
+#define  DEVICE_TYPE_INTERNAL_CONNECTOR	(1 << 12)
+#define  DEVICE_TYPE_NOT_HDMI_OUTPUT	(1 << 11)
+#define  DEVICE_TYPE_MIPI_OUTPUT	(1 << 10)
+#define  DEVICE_TYPE_COMPOSITE_OUTPUT	(1 << 9)
+#define  DEVICE_TYPE_DUAL_CHANNEL	(1 << 8)
+#define  DEVICE_TYPE_HIGH_SPEED_LINK	(1 << 6)
+#define  DEVICE_TYPE_LVDS_SINGALING	(1 << 5)
+#define  DEVICE_TYPE_TMDS_DVI_SIGNALING	(1 << 4)
+#define  DEVICE_TYPE_VIDEO_SIGNALING	(1 << 3)
+#define  DEVICE_TYPE_DISPLAYPORT_OUTPUT	(1 << 2)
+#define  DEVICE_TYPE_DIGITAL_OUTPUT	(1 << 1)
+#define  DEVICE_TYPE_ANALOG_OUTPUT	(1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+	(DEVICE_TYPE_INTERNAL_CONNECTOR | \
+	 DEVICE_TYPE_NOT_HDMI_OUTPUT | \
+	 DEVICE_TYPE_MIPI_OUTPUT | \
+	 DEVICE_TYPE_COMPOSITE_OUTPUT | \
+	 DEVICE_TYPE_DUAL_CHANNEL | \
+	 DEVICE_TYPE_LVDS_SINGALING | \
+	 DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+	 DEVICE_TYPE_VIDEO_SIGNALING | \
+	 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+	 DEVICE_TYPE_DIGITAL_OUTPUT | \
+	 DEVICE_TYPE_ANALOG_OUTPUT)
+
 /* define the DVO port for HDMI output type */
 #define		DVO_B		1
 #define		DVO_C		2
@@ -618,4 +682,57 @@
 #define		PORT_IDPC	8
 #define		PORT_IDPD	9
 
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA	0
+#define DVO_PORT_HDMIB	1
+#define DVO_PORT_HDMIC	2
+#define DVO_PORT_HDMID	3
+#define DVO_PORT_LVDS	4
+#define DVO_PORT_TV	5
+#define DVO_PORT_CRT	6
+#define DVO_PORT_DPB	7
+#define DVO_PORT_DPC	8
+#define DVO_PORT_DPD	9
+#define DVO_PORT_DPA	10
+
+/* MIPI DSI panel info */
+struct bdb_mipi {
+	u16 panel_id;
+	u16 bridge_revision;
+
+	/* General params */
+	u32 dithering:1;
+	u32 bpp_pixel_format:1;
+	u32 rsvd1:1;
+	u32 dphy_valid:1;
+	u32 resvd2:28;
+
+	u16 port_info;
+	u16 rsvd3:2;
+	u16 num_lanes:2;
+	u16 rsvd4:12;
+
+	/* DSI config */
+	u16 virt_ch_num:2;
+	u16 vtm:2;
+	u16 rsvd5:12;
+
+	u32 dsi_clock;
+	u32 bridge_ref_clk;
+	u16 rsvd_pwr;
+
+	/* Dphy Params */
+	u32 prepare_cnt:5;
+	u32 rsvd6:3;
+	u32 clk_zero_cnt:8;
+	u32 trail_cnt:5;
+	u32 rsvd7:3;
+	u32 exit_zero_cnt:6;
+	u32 rsvd8:2;
+
+	u32 hl_switch_cnt;
+	u32 lp_byte_clk;
+	u32 clk_lane_switch_cnt;
+} __attribute__((packed));
+
 #endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ea9022e..2e01bd3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -83,8 +83,7 @@
 	return true;
 }
 
-static void intel_crt_get_config(struct intel_encoder *encoder,
-				 struct intel_crtc_config *pipe_config)
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,35 @@
 	else
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
-	pipe_config->adjusted_mode.flags |= flags;
+	return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	int dotclock;
+
+	pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+	dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+			       struct intel_crtc_config *pipe_config)
+{
+	intel_ddi_get_config(encoder, pipe_config);
+
+	pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+					      DRM_MODE_FLAG_NHSYNC |
+					      DRM_MODE_FLAG_PVSYNC |
+					      DRM_MODE_FLAG_NVSYNC);
+	pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
 }
 
 /* Note: The caller is required to filter out dpms modes not supported by the
@@ -247,7 +274,7 @@
 	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
 	u32 adpa;
 
-	if (HAS_PCH_SPLIT(dev))
+	if (INTEL_INFO(dev)->gen >= 5)
 		adpa = ADPA_HOTPLUG_BITS;
 	else
 		adpa = 0;
@@ -349,9 +376,6 @@
 
 	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
-	/* FIXME: debug force function and remove */
-	ret = true;
-
 	return ret;
 }
 
@@ -653,7 +677,6 @@
 
 static void intel_crt_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -759,7 +782,7 @@
 	if (!crt)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(crt);
 		return;
@@ -799,7 +822,10 @@
 	crt->base.mode_set = intel_crt_mode_set;
 	crt->base.disable = intel_disable_crt;
 	crt->base.enable = intel_enable_crt;
-	crt->base.get_config = intel_crt_get_config;
+	if (IS_HASWELL(dev))
+		crt->base.get_config = hsw_crt_get_config;
+	else
+		crt->base.get_config = intel_crt_get_config;
 	if (I915_HAS_HOTPLUG(dev))
 		crt->base.hpd_pin = HPD_CRT;
 	if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 63de270..31f4fe2 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@
 	0x80C30FFF, 0x000B0000,
 	0x00FFFFFF, 0x00040006,
 	0x80D75FFF, 0x000B0000,
-	0x00FFFFFF, 0x00040006		/* HDMI parameters */
 };
 
 static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,25 @@
 	0x00C30FFF, 0x001E0000,
 	0x00FFFFFF, 0x00060006,
 	0x00D75FFF, 0x001E0000,
-	0x00FFFFFF, 0x00040006		/* HDMI parameters */
 };
 
-static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const u32 hsw_ddi_translations_hdmi[] = {
+				/* Idx	NT mV diff	T mV diff	db  */
+	0x00FFFFFF, 0x0006000E, /* 0:	400		400		0   */
+	0x00E79FFF, 0x000E000C, /* 1:	400		500		2   */
+	0x00D75FFF, 0x0005000A, /* 2:	400		600		3.5 */
+	0x00FFFFFF, 0x0005000A, /* 3:	600		600		0   */
+	0x00E79FFF, 0x001D0007, /* 4:	600		750		2   */
+	0x00D75FFF, 0x000C0004, /* 5:	600		900		3.5 */
+	0x00FFFFFF, 0x00040006, /* 6:	800		800		0   */
+	0x80E79FFF, 0x00030002, /* 7:	800		1000		2   */
+	0x00FFFFFF, 0x00140005, /* 8:	850		850		0   */
+	0x00FFFFFF, 0x000C0004, /* 9:	900		900		0   */
+	0x00FFFFFF, 0x001C0003, /* 10:	950		950		0   */
+	0x80FFFFFF, 0x00030002, /* 11:	1000		1000		0   */
+};
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
 	int type = intel_encoder->type;
@@ -92,12 +106,18 @@
 	const u32 *ddi_translations = (port == PORT_E) ?
 		hsw_ddi_translations_fdi :
 		hsw_ddi_translations_dp;
+	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
 	for (i = 0, reg = DDI_BUF_TRANS(port);
 	     i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
 		I915_WRITE(reg, ddi_translations[i]);
 		reg += 4;
 	}
+	/* Entry 9 is for HDMI: */
+	for (i = 0; i < 2; i++) {
+		I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+		reg += 4;
+	}
 }
 
 /* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +316,6 @@
 			DRM_DEBUG_DRIVER("DP audio: write eld information\n");
 			intel_write_eld(&encoder->base, adjusted_mode);
 		}
-
-		intel_dp_init_link_config(intel_dp);
-
 	} else if (type == INTEL_OUTPUT_HDMI) {
 		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
@@ -767,9 +784,9 @@
 		BUG();
 	}
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
 		temp |= TRANS_DDI_PVSYNC;
-	if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
 		temp |= TRANS_DDI_PHSYNC;
 
 	if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1202,7 +1219,7 @@
 
 	val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
 	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
-	if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 		val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
 	I915_WRITE(DP_TP_CTL(port), val);
 	POSTING_READ(DP_TP_CTL(port));
@@ -1249,8 +1266,8 @@
 		intel_dp_check_link_status(intel_dp);
 }
 
-static void intel_ddi_get_config(struct intel_encoder *encoder,
-				 struct intel_crtc_config *pipe_config)
+void intel_ddi_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_config *pipe_config)
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1285,37 @@
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
 	pipe_config->adjusted_mode.flags |= flags;
+
+	switch (temp & TRANS_DDI_BPC_MASK) {
+	case TRANS_DDI_BPC_6:
+		pipe_config->pipe_bpp = 18;
+		break;
+	case TRANS_DDI_BPC_8:
+		pipe_config->pipe_bpp = 24;
+		break;
+	case TRANS_DDI_BPC_10:
+		pipe_config->pipe_bpp = 30;
+		break;
+	case TRANS_DDI_BPC_12:
+		pipe_config->pipe_bpp = 36;
+		break;
+	default:
+		break;
+	}
+
+	switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+	case TRANS_DDI_MODE_SELECT_HDMI:
+	case TRANS_DDI_MODE_SELECT_DVI:
+	case TRANS_DDI_MODE_SELECT_FDI:
+		break;
+	case TRANS_DDI_MODE_SELECT_DP_SST:
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		pipe_config->has_dp_encoder = true;
+		intel_dp_get_m_n(intel_crtc, pipe_config);
+		break;
+	default:
+		break;
+	}
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1297,6 +1345,41 @@
 	.destroy = intel_ddi_destroy,
 };
 
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_connector *connector;
+	enum port port = intel_dig_port->port;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+	if (!intel_dp_init_connector(intel_dig_port, connector)) {
+		kfree(connector);
+		return NULL;
+	}
+
+	return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+	struct intel_connector *connector;
+	enum port port = intel_dig_port->port;
+
+	connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+	if (!connector)
+		return NULL;
+
+	intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+	intel_hdmi_init_connector(intel_dig_port, connector);
+
+	return connector;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1305,17 +1388,22 @@
 	struct drm_encoder *encoder;
 	struct intel_connector *hdmi_connector = NULL;
 	struct intel_connector *dp_connector = NULL;
+	bool init_hdmi, init_dp;
 
-	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+	if (!init_dp && !init_hdmi) {
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+			      port_name(port));
+		init_hdmi = true;
+		init_dp = true;
+	}
+
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
 	if (!intel_dig_port)
 		return;
 
-	dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-	if (!dp_connector) {
-		kfree(intel_dig_port);
-		return;
-	}
-
 	intel_encoder = &intel_dig_port->base;
 	encoder = &intel_encoder->base;
 
@@ -1335,28 +1423,22 @@
 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
 					  (DDI_BUF_PORT_REVERSAL |
 					   DDI_A_4_LANES);
-	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
 	intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = false;
 	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
-	if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
+	if (init_dp)
+		dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+
+	/* In theory we don't need the encoder->type check, but leave it just in
+	 * case we have some really bad VBTs... */
+	if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
+		hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
+
+	if (!dp_connector && !hdmi_connector) {
 		drm_encoder_cleanup(encoder);
 		kfree(intel_dig_port);
-		kfree(dp_connector);
-		return;
-	}
-
-	if (intel_encoder->type != INTEL_OUTPUT_EDP) {
-		hdmi_connector = kzalloc(sizeof(struct intel_connector),
-					 GFP_KERNEL);
-		if (!hdmi_connector) {
-			return;
-		}
-
-		intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
-		intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 581fb4b..e92f170 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,14 +41,13 @@
 #include <drm/drm_crtc_helper.h>
 #include <linux/dma_remapping.h>
 
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_config *pipe_config);
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
-				    struct intel_crtc_config *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config);
 
 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
 			  int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@
 	intel_p2_t	    p2;
 };
 
-/* FDI */
-#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
-
 int
 intel_pch_rawclk(struct drm_device *dev)
 {
@@ -313,44 +309,44 @@
 		.p2_slow = 7, .p2_fast = 7 },
 };
 
-static const intel_limit_t intel_limits_vlv_dac = {
-	.dot = { .min = 25000, .max = 270000 },
+static const intel_limit_t intel_limits_vlv = {
+	 /*
+	  * These are the data rate limits (measured in fast clocks)
+	  * since those are the strictest limits we have. The fast
+	  * clock and actual rate limits are more relaxed, so checking
+	  * them would make no difference.
+	  */
+	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
 	.vco = { .min = 4000000, .max = 6000000 },
 	.n = { .min = 1, .max = 7 },
-	.m = { .min = 22, .max = 450 }, /* guess */
 	.m1 = { .min = 2, .max = 3 },
 	.m2 = { .min = 11, .max = 156 },
-	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 1, .max = 3 },
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 2, .p2_fast = 20 },
-};
-
-static const intel_limit_t intel_limits_vlv_hdmi = {
-	.dot = { .min = 25000, .max = 270000 },
-	.vco = { .min = 4000000, .max = 6000000 },
-	.n = { .min = 1, .max = 7 },
-	.m = { .min = 60, .max = 300 }, /* guess */
-	.m1 = { .min = 2, .max = 3 },
-	.m2 = { .min = 11, .max = 156 },
-	.p = { .min = 10, .max = 30 },
 	.p1 = { .min = 2, .max = 3 },
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 2, .p2_fast = 20 },
+	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
 };
 
-static const intel_limit_t intel_limits_vlv_dp = {
-	.dot = { .min = 25000, .max = 270000 },
-	.vco = { .min = 4000000, .max = 6000000 },
-	.n = { .min = 1, .max = 7 },
-	.m = { .min = 22, .max = 450 },
-	.m1 = { .min = 2, .max = 3 },
-	.m2 = { .min = 11, .max = 156 },
-	.p = { .min = 10, .max = 30 },
-	.p1 = { .min = 1, .max = 3 },
-	.p2 = { .dot_limit = 270000,
-		.p2_slow = 2, .p2_fast = 20 },
-};
+static void vlv_clock(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *encoder;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->type == type)
+			return true;
+
+	return false;
+}
 
 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
 						int refclk)
@@ -412,12 +408,7 @@
 		else
 			limit = &intel_limits_pineview_sdvo;
 	} else if (IS_VALLEYVIEW(dev)) {
-		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
-			limit = &intel_limits_vlv_dac;
-		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
-			limit = &intel_limits_vlv_hdmi;
-		else
-			limit = &intel_limits_vlv_dp;
+		limit = &intel_limits_vlv;
 	} else if (!IS_GEN2(dev)) {
 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
 			limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
-	clock->vco = refclk * clock->m / clock->n;
-	clock->dot = clock->vco / clock->p;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@
 {
 	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
-	clock->vco = refclk * clock->m / (clock->n + 2);
-	clock->dot = clock->vco / clock->p;
-}
-
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *encoder;
-
-	for_each_encoder_on_crtc(dev, crtc, encoder)
-		if (encoder->type == type)
-			return true;
-
-	return false;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@
 			       const intel_limit_t *limit,
 			       const intel_clock_t *clock)
 {
+	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+		INTELPllInvalid("n out of range\n");
 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 		INTELPllInvalid("p1 out of range\n");
-	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
-		INTELPllInvalid("p out of range\n");
 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
 		INTELPllInvalid("m2 out of range\n");
 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
 		INTELPllInvalid("m1 out of range\n");
-	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
-		INTELPllInvalid("m1 <= m2\n");
-	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
-		INTELPllInvalid("m out of range\n");
-	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-		INTELPllInvalid("n out of range\n");
+
+	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+		if (clock->m1 <= clock->m2)
+			INTELPllInvalid("m1 <= m2\n");
+
+	if (!IS_VALLEYVIEW(dev)) {
+		if (clock->p < limit->p.min || limit->p.max < clock->p)
+			INTELPllInvalid("p out of range\n");
+		if (clock->m < limit->m.min || limit->m.max < clock->m)
+			INTELPllInvalid("m out of range\n");
+	}
+
 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
 		INTELPllInvalid("vco out of range\n");
 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
-	u32 m, n, fastclk;
-	u32 updrate, minupdate, p;
-	unsigned long bestppm, ppm, absppm;
-	int dotclk, flag;
+	struct drm_device *dev = crtc->dev;
+	intel_clock_t clock;
+	unsigned int bestppm = 1000000;
+	/* min update 19.2 MHz */
+	int max_n = min(limit->n.max, refclk / 19200);
+	bool found = false;
 
-	flag = 0;
-	dotclk = target * 1000;
-	bestppm = 1000000;
-	ppm = absppm = 0;
-	fastclk = dotclk / (2*100);
-	updrate = 0;
-	minupdate = 19200;
-	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
-	bestm1 = bestm2 = bestp1 = bestp2 = 0;
+	target *= 5; /* fast clock */
+
+	memset(best_clock, 0, sizeof(*best_clock));
 
 	/* based on hardware requirement, prefer smaller n to precision */
-	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
-		updrate = refclk / n;
-		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
-			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
-				if (p2 > 10)
-					p2 = p2 - 1;
-				p = p1 * p2;
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+				clock.p = clock.p1 * clock.p2;
 				/* based on hardware requirement, prefer bigger m1,m2 values */
-				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
-					m2 = (((2*(fastclk * p * n / m1 )) +
-					       refclk) / (2*refclk));
-					m = m1 * m2;
-					vco = updrate * m;
-					if (vco >= limit->vco.min && vco < limit->vco.max) {
-						ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
-						absppm = (ppm > 0) ? ppm : (-ppm);
-						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
-							bestppm = 0;
-							flag = 1;
-						}
-						if (absppm < bestppm - 10) {
-							bestppm = absppm;
-							flag = 1;
-						}
-						if (flag) {
-							bestn = n;
-							bestm1 = m1;
-							bestm2 = m2;
-							bestp1 = p1;
-							bestp2 = p2;
-							flag = 0;
-						}
+				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+					unsigned int ppm, diff;
+
+					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+								     refclk * clock.m1);
+
+					vlv_clock(refclk, &clock);
+
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+
+					diff = abs(clock.dot - target);
+					ppm = div_u64(1000000ULL * diff, target);
+
+					if (ppm < 100 && clock.p > best_clock->p) {
+						bestppm = 0;
+						*best_clock = clock;
+						found = true;
+					}
+
+					if (bestppm >= 10 && ppm < bestppm - 10) {
+						bestppm = ppm;
+						*best_clock = clock;
+						found = true;
 					}
 				}
 			}
 		}
 	}
-	best_clock->n = bestn;
-	best_clock->m1 = bestm1;
-	best_clock->m2 = bestm2;
-	best_clock->p1 = bestp1;
-	best_clock->p2 = bestp2;
 
-	return true;
+	return found;
+}
+
+bool intel_crtc_active(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Be paranoid as we can arrive here with only partial
+	 * state retrieved from the hardware during setup.
+	 *
+	 * We can ditch the adjusted_mode.crtc_clock check as soon
+	 * as Haswell has gained clock readout/fastboot support.
+	 *
+	 * We can ditch the crtc->fb check as soon as we can
+	 * properly reconstruct framebuffers.
+	 */
+	return intel_crtc->active && crtc->fb &&
+		intel_crtc->config.adjusted_mode.crtc_clock;
 }
 
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@
 		DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg = PIPEDSL(pipe);
+	u32 line1, line2;
+	u32 line_mask;
+
+	if (IS_GEN2(dev))
+		line_mask = DSL_LINEMASK_GEN2;
+	else
+		line_mask = DSL_LINEMASK_GEN3;
+
+	line1 = I915_READ(reg) & line_mask;
+	mdelay(5);
+	line2 = I915_READ(reg) & line_mask;
+
+	return line1 == line2;
+}
+
 /*
  * intel_wait_for_pipe_off - wait for pipe to turn off
  * @dev: drm device
@@ -843,22 +850,8 @@
 			     100))
 			WARN(1, "pipe_off wait timed out\n");
 	} else {
-		u32 last_line, line_mask;
-		int reg = PIPEDSL(pipe);
-		unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
-		if (IS_GEN2(dev))
-			line_mask = DSL_LINEMASK_GEN2;
-		else
-			line_mask = DSL_LINEMASK_GEN3;
-
 		/* Wait for the display line to settle */
-		do {
-			last_line = I915_READ(reg) & line_mask;
-			mdelay(5);
-		} while (((I915_READ(reg) & line_mask) != last_line) &&
-			 time_after(timeout, jiffies));
-		if (time_after(jiffies, timeout))
+		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
 			WARN(1, "pipe_off wait timed out\n");
 	}
 }
@@ -929,6 +922,24 @@
 	     state_string(state), state_string(cur_state));
 }
 
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	mutex_lock(&dev_priv->dpio_lock);
+	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	cur_state = val & DSI_PLL_VCO_EN;
+	WARN(cur_state != state,
+	     "DSI PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
 struct intel_shared_dpll *
 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 {
@@ -1069,6 +1080,26 @@
 	     pipe_name(pipe));
 }
 
+static void assert_cursor(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	struct drm_device *dev = dev_priv->dev;
+	bool cur_state;
+
+	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+		cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+	else if (IS_845G(dev) || IS_I865G(dev))
+		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+	else
+		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+	WARN(cur_state != state,
+	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+	     pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
 void assert_pipe(struct drm_i915_private *dev_priv,
 		 enum pipe pipe, bool state)
 {
@@ -1323,6 +1354,26 @@
 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
+static void intel_init_dpio(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!IS_VALLEYVIEW(dev))
+		return;
+
+	/*
+	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
+	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
+	 *   b.	The other bits such as sfr settings / modesel may all be set
+	 *      to 0.
+	 *
+	 * This should only be done on init and resume from S3 with both
+	 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
+	 */
+	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
 static void vlv_enable_pll(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@
 	POSTING_READ(DPLL(pipe));
 }
 
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	u32 val = 0;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	/* Leave integrated clock source enabled */
+	if (pipe == PIPE_B)
+		val = DPLL_INTEGRATED_CRI_CLK_VLV;
+	I915_WRITE(DPLL(pipe), val);
+	POSTING_READ(DPLL(pipe));
+}
+
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
 {
 	u32 port_mask;
@@ -1661,7 +1726,7 @@
  * returning.
  */
 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-			      bool pch_port)
+			      bool pch_port, bool dsi)
 {
 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
 								      pipe);
@@ -1670,6 +1735,7 @@
 	u32 val;
 
 	assert_planes_disabled(dev_priv, pipe);
+	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
 
 	if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@
 	 * need the check.
 	 */
 	if (!HAS_PCH_SPLIT(dev_priv->dev))
-		assert_pll_enabled(dev_priv, pipe);
+		if (dsi)
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
 	else {
 		if (pch_port) {
 			/* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@
 	 * or we might hang the display.
 	 */
 	assert_planes_disabled(dev_priv, pipe);
+	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
 
 	/* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@
  * Plane regs are double buffered, going from enabled->disabled needs a
  * trigger in order to latch.  The display address reg provides this.
  */
-void intel_flush_display_plane(struct drm_i915_private *dev_priv,
-				      enum plane plane)
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+			       enum plane plane)
 {
-	if (dev_priv->info->gen >= 4)
-		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
-	else
-		I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+	u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+
+	I915_WRITE(reg, I915_READ(reg));
+	POSTING_READ(reg);
 }
 
 /**
- * intel_enable_plane - enable a display plane on a given pipe
+ * intel_enable_primary_plane - enable the primary plane on a given pipe
  * @dev_priv: i915 private structure
  * @plane: plane to enable
  * @pipe: pipe being fed
  *
  * Enable @plane on @pipe, making sure that @pipe is running first.
  */
-static void intel_enable_plane(struct drm_i915_private *dev_priv,
-			       enum plane plane, enum pipe pipe)
+static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
+				       enum plane plane, enum pipe pipe)
 {
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 	int reg;
 	u32 val;
 
 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
 	assert_pipe_enabled(dev_priv, pipe);
 
+	WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
+
+	intel_crtc->primary_enabled = true;
+
 	reg = DSPCNTR(plane);
 	val = I915_READ(reg);
 	if (val & DISPLAY_PLANE_ENABLE)
 		return;
 
 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
-	intel_flush_display_plane(dev_priv, plane);
+	intel_flush_primary_plane(dev_priv, plane);
 	intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
 /**
- * intel_disable_plane - disable a display plane
+ * intel_disable_primary_plane - disable the primary plane
  * @dev_priv: i915 private structure
  * @plane: plane to disable
  * @pipe: pipe consuming the data
  *
  * Disable @plane; should be an independent operation.
  */
-static void intel_disable_plane(struct drm_i915_private *dev_priv,
-				enum plane plane, enum pipe pipe)
+static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
+					enum plane plane, enum pipe pipe)
 {
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 	int reg;
 	u32 val;
 
+	WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
+
+	intel_crtc->primary_enabled = false;
+
 	reg = DSPCNTR(plane);
 	val = I915_READ(reg);
 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
 		return;
 
 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
-	intel_flush_display_plane(dev_priv, plane);
+	intel_flush_primary_plane(dev_priv, plane);
 	intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
@@ -1839,10 +1921,7 @@
 		alignment = 0;
 		break;
 	case I915_TILING_Y:
-		/* Despite that we check this in framebuffer_init userspace can
-		 * screw us over and change the tiling after the fact. Only
-		 * pinned buffers can't change their tiling. */
-		DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+		WARN(1, "Y tiled bo slipped through, driver bug!\n");
 		return -EINVAL;
 	default:
 		BUG();
@@ -2244,11 +2323,26 @@
 		return ret;
 	}
 
-	/* Update pipe size and adjust fitter if needed */
+	/*
+	 * Update pipe size and adjust fitter if needed: the reason for this is
+	 * that in compute_mode_changes we check the native mode (not the pfit
+	 * mode) to see if we can flip rather than do a full mode set. In the
+	 * fastboot case, we'll flip, but if we don't update the pipesrc and
+	 * pfit state, we'll end up with a big fb scanned out into the wrong
+	 * sized surface.
+	 *
+	 * To fix this properly, we need to hoist the checks up into
+	 * compute_mode_changes (or above), check the actual pfit state and
+	 * whether the platform allows pfit disable with pipe active, and only
+	 * then update the pipesrc and pfit state, even on the flip path.
+	 */
 	if (i915_fastboot) {
+		const struct drm_display_mode *adjusted_mode =
+			&intel_crtc->config.adjusted_mode;
+
 		I915_WRITE(PIPESRC(intel_crtc->pipe),
-			   ((crtc->mode.hdisplay - 1) << 16) |
-			   (crtc->mode.vdisplay - 1));
+			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+			   (adjusted_mode->crtc_vdisplay - 1));
 		if (!intel_crtc->config.pch_pfit.enabled &&
 		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
 		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2327,9 +2421,10 @@
 			   FDI_FE_ERRC_ENABLE);
 }
 
-static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
+static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
 {
-	return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
+	return crtc->base.enabled && crtc->active &&
+		crtc->config.has_pch_encoder;
 }
 
 static void ivb_modeset_global_resources(struct drm_device *dev)
@@ -2872,6 +2967,7 @@
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
 	u32 temp;
 
@@ -2889,14 +2985,14 @@
 			SBI_ICLK);
 
 	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
-	if (crtc->mode.clock == 20000) {
+	if (clock == 20000) {
 		auxdiv = 1;
 		divsel = 0x41;
 		phaseinc = 0x20;
 	} else {
 		/* The iCLK virtual clock root frequency is in MHz,
-		 * but the crtc->mode.clock in in KHz. To get the divisors,
-		 * it is necessary to divide one by another, so we
+		 * but the adjusted_mode->crtc_clock in in KHz. To get the
+		 * divisors, it is necessary to divide one by another, so we
 		 * convert the virtual clock precision to KHz here for higher
 		 * precision.
 		 */
@@ -2904,7 +3000,7 @@
 		u32 iclk_pi_range = 64;
 		u32 desired_divisor, msb_divisor_value, pi_value;
 
-		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+		desired_divisor = (iclk_virtual_root_freq / clock);
 		msb_divisor_value = desired_divisor / iclk_pi_range;
 		pi_value = desired_divisor % iclk_pi_range;
 
@@ -2920,7 +3016,7 @@
 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 
 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
-			crtc->mode.clock,
+			clock,
 			auxdiv,
 			divsel,
 			phasedir,
@@ -2979,6 +3075,48 @@
 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
 }
 
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t temp;
+
+	temp = I915_READ(SOUTH_CHICKEN1);
+	if (temp & FDI_BC_BIFURCATION_SELECT)
+		return;
+
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+	temp |= FDI_BC_BIFURCATION_SELECT;
+	DRM_DEBUG_KMS("enabling fdi C rx\n");
+	I915_WRITE(SOUTH_CHICKEN1, temp);
+	POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	switch (intel_crtc->pipe) {
+	case PIPE_A:
+		break;
+	case PIPE_B:
+		if (intel_crtc->config.fdi_lanes > 2)
+			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+		else
+			cpt_enable_fdi_bc_bifurcation(dev);
+
+		break;
+	case PIPE_C:
+		cpt_enable_fdi_bc_bifurcation(dev);
+
+		break;
+	default:
+		BUG();
+	}
+}
+
 /*
  * Enable PCH resources required for PCH ports:
  *   - PCH PLLs
@@ -2997,6 +3135,9 @@
 
 	assert_pch_transcoder_disabled(dev_priv, pipe);
 
+	if (IS_IVYBRIDGE(dev))
+		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+
 	/* Write the TU size bits before fdi link training, so that error
 	 * detection works. */
 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
@@ -3240,6 +3381,92 @@
 			intel_plane_disable(&intel_plane->base);
 }
 
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!crtc->config.ips_enabled)
+		return;
+
+	/* We can only enable IPS after we enable a plane and wait for a vblank.
+	 * We guarantee that the plane is enabled by calling intel_enable_ips
+	 * only after intel_enable_plane. And intel_enable_plane already waits
+	 * for a vblank, so all we need to do here is to enable the IPS bit. */
+	assert_plane_enabled(dev_priv, crtc->plane);
+	I915_WRITE(IPS_CTL, IPS_ENABLE);
+
+	/* The bit only becomes 1 in the next vblank, so this wait here is
+	 * essentially intel_wait_for_vblank. If we don't have this and don't
+	 * wait for vblanks until the end of crtc_enable, then the HW state
+	 * readout code will complain that the expected IPS_CTL value is not the
+	 * one we read. */
+	if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+		DRM_ERROR("Timed out waiting for IPS enable\n");
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!crtc->config.ips_enabled)
+		return;
+
+	assert_plane_enabled(dev_priv, crtc->plane);
+	I915_WRITE(IPS_CTL, 0);
+	POSTING_READ(IPS_CTL);
+
+	/* We need to wait for a vblank before we can disable the plane. */
+	intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	int palreg = PALETTE(pipe);
+	int i;
+	bool reenable_ips = false;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled || !intel_crtc->active)
+		return;
+
+	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+			assert_dsi_pll_enabled(dev_priv);
+		else
+			assert_pll_enabled(dev_priv, pipe);
+	}
+
+	/* use legacy palette for Ironlake */
+	if (HAS_PCH_SPLIT(dev))
+		palreg = LGC_PALETTE(pipe);
+
+	/* Workaround : Do not read or write the pipe palette/gamma data while
+	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+	 */
+	if (intel_crtc->config.ips_enabled &&
+	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+	     GAMMA_MODE_MODE_SPLIT)) {
+		hsw_disable_ips(intel_crtc);
+		reenable_ips = true;
+	}
+
+	for (i = 0; i < 256; i++) {
+		I915_WRITE(palreg + 4 * i,
+			   (intel_crtc->lut_r[i] << 16) |
+			   (intel_crtc->lut_g[i] << 8) |
+			   intel_crtc->lut_b[i]);
+	}
+
+	if (reenable_ips)
+		hsw_enable_ips(intel_crtc);
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -3259,8 +3486,6 @@
 	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
 	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
-	intel_update_watermarks(dev);
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
 			encoder->pre_enable(encoder);
@@ -3283,9 +3508,10 @@
 	 */
 	intel_crtc_load_lut(crtc);
 
+	intel_update_watermarks(crtc);
 	intel_enable_pipe(dev_priv, pipe,
-			  intel_crtc->config.has_pch_encoder);
-	intel_enable_plane(dev_priv, plane, pipe);
+			  intel_crtc->config.has_pch_encoder, false);
+	intel_enable_primary_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
 	intel_crtc_update_cursor(crtc, true);
 
@@ -3319,34 +3545,74 @@
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void hsw_enable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
 
-	if (!crtc->config.ips_enabled)
-		return;
+	intel_enable_primary_plane(dev_priv, plane, pipe);
+	intel_enable_planes(crtc);
+	intel_crtc_update_cursor(crtc, true);
 
-	/* We can only enable IPS after we enable a plane and wait for a vblank.
-	 * We guarantee that the plane is enabled by calling intel_enable_ips
-	 * only after intel_enable_plane. And intel_enable_plane already waits
-	 * for a vblank, so all we need to do here is to enable the IPS bit. */
-	assert_plane_enabled(dev_priv, crtc->plane);
-	I915_WRITE(IPS_CTL, IPS_ENABLE);
+	hsw_enable_ips(intel_crtc);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
 }
 
-static void hsw_disable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+
+	intel_crtc_wait_for_pending_flips(crtc);
+	drm_vblank_off(dev, pipe);
+
+	/* FBC must be disabled before disabling the plane on HSW. */
+	if (dev_priv->fbc.plane == plane)
+		intel_disable_fbc(dev);
+
+	hsw_disable_ips(intel_crtc);
+
+	intel_crtc_update_cursor(crtc, false);
+	intel_disable_planes(crtc);
+	intel_disable_primary_plane(dev_priv, plane, pipe);
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
 
-	if (!crtc->config.ips_enabled)
+	/* We want to get the other_active_crtc only if there's only 1 other
+	 * active crtc. */
+	list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+		if (!crtc_it->active || crtc_it == crtc)
+			continue;
+
+		if (other_active_crtc)
+			return;
+
+		other_active_crtc = crtc_it;
+	}
+	if (!other_active_crtc)
 		return;
 
-	assert_plane_enabled(dev_priv, crtc->plane);
-	I915_WRITE(IPS_CTL, 0);
-
-	/* We need to wait for a vblank before we can disable the plane. */
-	intel_wait_for_vblank(dev, crtc->pipe);
+	intel_wait_for_vblank(dev, other_active_crtc->pipe);
+	intel_wait_for_vblank(dev, other_active_crtc->pipe);
 }
 
 static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3356,7 +3622,6 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
 
 	WARN_ON(!crtc->enabled);
 
@@ -3369,8 +3634,6 @@
 	if (intel_crtc->config.has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
-	intel_update_watermarks(dev);
-
 	if (intel_crtc->config.has_pch_encoder)
 		dev_priv->display.fdi_link_train(crtc);
 
@@ -3391,23 +3654,22 @@
 	intel_ddi_set_pipe_settings(crtc);
 	intel_ddi_enable_transcoder_func(crtc);
 
+	intel_update_watermarks(crtc);
 	intel_enable_pipe(dev_priv, pipe,
-			  intel_crtc->config.has_pch_encoder);
-	intel_enable_plane(dev_priv, plane, pipe);
-	intel_enable_planes(crtc);
-	intel_crtc_update_cursor(crtc, true);
-
-	hsw_enable_ips(intel_crtc);
+			  intel_crtc->config.has_pch_encoder, false);
 
 	if (intel_crtc->config.has_pch_encoder)
 		lpt_pch_enable(crtc);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_update_fbc(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	for_each_encoder_on_crtc(dev, crtc, encoder)
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		encoder->enable(encoder);
+		intel_opregion_notify_encoder(encoder, true);
+	}
+
+	/* If we change the relative order between pipe/planes enabling, we need
+	 * to change the workaround. */
+	haswell_mode_set_planes_workaround(intel_crtc);
+	haswell_crtc_enable_planes(crtc);
 
 	/*
 	 * There seems to be a race in PCH platform hw (at least on some
@@ -3460,7 +3722,7 @@
 
 	intel_crtc_update_cursor(crtc, false);
 	intel_disable_planes(crtc);
-	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_primary_plane(dev_priv, plane, pipe);
 
 	if (intel_crtc->config.has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3501,7 +3763,7 @@
 	}
 
 	intel_crtc->active = false;
-	intel_update_watermarks(dev);
+	intel_update_watermarks(crtc);
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
@@ -3515,27 +3777,17 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
-	int plane = intel_crtc->plane;
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
 	if (!intel_crtc->active)
 		return;
 
-	for_each_encoder_on_crtc(dev, crtc, encoder)
+	haswell_crtc_disable_planes(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		intel_opregion_notify_encoder(encoder, false);
 		encoder->disable(encoder);
-
-	intel_crtc_wait_for_pending_flips(crtc);
-	drm_vblank_off(dev, pipe);
-
-	/* FBC must be disabled before disabling the plane on HSW. */
-	if (dev_priv->fbc.plane == plane)
-		intel_disable_fbc(dev);
-
-	hsw_disable_ips(intel_crtc);
-
-	intel_crtc_update_cursor(crtc, false);
-	intel_disable_planes(crtc);
-	intel_disable_plane(dev_priv, plane, pipe);
+	}
 
 	if (intel_crtc->config.has_pch_encoder)
 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3558,7 +3810,7 @@
 	}
 
 	intel_crtc->active = false;
-	intel_update_watermarks(dev);
+	intel_update_watermarks(crtc);
 
 	mutex_lock(&dev->struct_mutex);
 	intel_update_fbc(dev);
@@ -3650,6 +3902,7 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
+	bool is_dsi;
 
 	WARN_ON(!crtc->enabled);
 
@@ -3657,13 +3910,15 @@
 		return;
 
 	intel_crtc->active = true;
-	intel_update_watermarks(dev);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_pll_enable)
 			encoder->pre_pll_enable(encoder);
 
-	vlv_enable_pll(intel_crtc);
+	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+	if (!is_dsi)
+		vlv_enable_pll(intel_crtc);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
@@ -3673,8 +3928,9 @@
 
 	intel_crtc_load_lut(crtc);
 
-	intel_enable_pipe(dev_priv, pipe, false);
-	intel_enable_plane(dev_priv, plane, pipe);
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+	intel_enable_primary_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
 	intel_crtc_update_cursor(crtc, true);
 
@@ -3699,7 +3955,6 @@
 		return;
 
 	intel_crtc->active = true;
-	intel_update_watermarks(dev);
 
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		if (encoder->pre_enable)
@@ -3711,8 +3966,9 @@
 
 	intel_crtc_load_lut(crtc);
 
-	intel_enable_pipe(dev_priv, pipe, false);
-	intel_enable_plane(dev_priv, plane, pipe);
+	intel_update_watermarks(crtc);
+	intel_enable_pipe(dev_priv, pipe, false, false);
+	intel_enable_primary_plane(dev_priv, plane, pipe);
 	intel_enable_planes(crtc);
 	/* The fixup needs to happen before cursor is enabled */
 	if (IS_G4X(dev))
@@ -3768,7 +4024,7 @@
 	intel_crtc_dpms_overlay(intel_crtc, false);
 	intel_crtc_update_cursor(crtc, false);
 	intel_disable_planes(crtc);
-	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_primary_plane(dev_priv, plane, pipe);
 
 	intel_disable_pipe(dev_priv, pipe);
 
@@ -3778,11 +4034,15 @@
 		if (encoder->post_disable)
 			encoder->post_disable(encoder);
 
-	i9xx_disable_pll(dev_priv, pipe);
+	if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+		vlv_disable_pll(dev_priv, pipe);
+	else if (!IS_VALLEYVIEW(dev))
+		i9xx_disable_pll(dev_priv, pipe);
 
 	intel_crtc->active = false;
+	intel_update_watermarks(crtc);
+
 	intel_update_fbc(dev);
-	intel_update_watermarks(dev);
 }
 
 static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3856,6 +4116,7 @@
 	dev_priv->display.off(crtc);
 
 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+	assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
 	if (crtc->fb) {
@@ -4045,8 +4306,7 @@
 	 */
 	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
-	fdi_dotclock = adjusted_mode->clock;
-	fdi_dotclock /= pipe_config->pixel_multiplier;
+	fdi_dotclock = adjusted_mode->crtc_clock;
 
 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
 					   pipe_config->pipe_bpp);
@@ -4088,13 +4348,39 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
-	if (HAS_PCH_SPLIT(dev)) {
-		/* FDI link clock is fixed at 2.7G */
-		if (pipe_config->requested_mode.clock * 3
-		    > IRONLAKE_FDI_FREQ * 4)
+	/* FIXME should check pixel clock limits on all platforms */
+	if (INTEL_INFO(dev)->gen < 4) {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		int clock_limit =
+			dev_priv->display.get_display_clock_speed(dev);
+
+		/*
+		 * Enable pixel doubling when the dot clock
+		 * is > 90% of the (display) core speed.
+		 *
+		 * GDG double wide on either pipe,
+		 * otherwise pipe A only.
+		 */
+		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+			clock_limit *= 2;
+			pipe_config->double_wide = true;
+		}
+
+		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
 			return -EINVAL;
 	}
 
+	/*
+	 * Pipe horizontal size must be even in:
+	 * - DVO ganged mode
+	 * - LVDS dual channel mode
+	 * - Double wide pipe
+	 */
+	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+		pipe_config->pipe_src_w &= ~1;
+
 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
 	 */
@@ -4258,28 +4544,6 @@
 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-static int vlv_get_refclk(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int refclk = 27000; /* for DP & HDMI */
-
-	return 100000; /* only one validated so far */
-
-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
-		refclk = 96000;
-	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		if (intel_panel_use_ssc(dev_priv))
-			refclk = 100000;
-		else
-			refclk = 96000;
-	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
-		refclk = 100000;
-	}
-
-	return refclk;
-}
-
 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
 {
 	struct drm_device *dev = crtc->dev;
@@ -4287,7 +4551,7 @@
 	int refclk;
 
 	if (IS_VALLEYVIEW(dev)) {
-		refclk = vlv_get_refclk(crtc);
+		refclk = 100000;
 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 		refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4345,7 +4609,8 @@
 	}
 }
 
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+		pipe)
 {
 	u32 reg_val;
 
@@ -4353,24 +4618,24 @@
 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
 	 * and set it to a reasonable value instead.
 	 */
-	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
 	reg_val &= 0xffffff00;
 	reg_val |= 0x00000030;
-	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
 	reg_val &= 0x8cffffff;
 	reg_val = 0x8c000000;
-	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
 	reg_val &= 0xffffff00;
-	vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 
-	reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
 	reg_val &= 0x00ffffff;
 	reg_val |= 0xb0000000;
-	vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 }
 
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4436,18 +4701,18 @@
 
 	/* PLL B needs special handling */
 	if (pipe)
-		vlv_pllb_recal_opamp(dev_priv);
+		vlv_pllb_recal_opamp(dev_priv, pipe);
 
 	/* Set up Tx target for periodic Rcomp update */
-	vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
 
 	/* Disable target IRef on PLL */
-	reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+	reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
 	reg_val &= 0x00ffffff;
-	vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
 
 	/* Disable fast lock */
-	vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+	vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
 
 	/* Set idtafcrecal before PLL is enabled */
 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4461,55 +4726,55 @@
 	 * Note: don't use the DAC post divider as it seems unstable.
 	 */
 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 
 	mdiv |= DPIO_ENABLE_CALIBRATION;
-	vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 
 	/* Set HBR and RBR LPF coefficients */
 	if (crtc->config.port_clock == 162000 ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
 				 0x009f0003);
 	else
-		vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+		vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
 				 0x00d0000f);
 
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
 		/* Use SSC source */
 		if (!pipe)
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df40000);
 		else
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df70000);
 	} else { /* HDMI or VGA */
 		/* Use bend source */
 		if (!pipe)
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df70000);
 		else
-			vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+			vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
 					 0x0df40000);
 	}
 
-	coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+	coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
 		coreclk |= 0x01000000;
-	vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+	vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
 
-	vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
 
 	/* Enable DPIO clock input */
 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
 		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
-	if (pipe)
+	/* We should never disable this, set it here for state tracking */
+	if (pipe == PIPE_B)
 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
 	dpll |= DPLL_VCO_ENABLE;
 	crtc->config.dpll_hw_state.dpll = dpll;
 
@@ -4647,7 +4912,6 @@
 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 	struct drm_display_mode *adjusted_mode =
 		&intel_crtc->config.adjusted_mode;
-	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
 	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
 
 	/* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4700,7 +4964,8 @@
 	 * always be the user's requested size.
 	 */
 	I915_WRITE(PIPESRC(pipe),
-		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
+		   (intel_crtc->config.pipe_src_h - 1));
 }
 
 static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4738,8 +5003,11 @@
 	}
 
 	tmp = I915_READ(PIPESRC(crtc->pipe));
-	pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
-	pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
+	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
 }
 
 static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4759,7 +5027,7 @@
 
 	crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
-	crtc->mode.clock = pipe_config->adjusted_mode.clock;
+	crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
 	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
 }
 
@@ -4775,17 +5043,8 @@
 	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
 		pipeconf |= PIPECONF_ENABLE;
 
-	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
-		/* Enable pixel doubling when the dot clock is > 90% of the (display)
-		 * core speed.
-		 *
-		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-		 * pipe == 0 check?
-		 */
-		if (intel_crtc->config.requested_mode.clock >
-		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
-			pipeconf |= PIPECONF_DOUBLE_WIDE;
-	}
+	if (intel_crtc->config.double_wide)
+		pipeconf |= PIPECONF_DOUBLE_WIDE;
 
 	/* only g4x and later have fancy bpc/dither controls */
 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4839,14 +5098,13 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
 	int pipe = intel_crtc->pipe;
 	int plane = intel_crtc->plane;
 	int refclk, num_connectors = 0;
 	intel_clock_t clock, reduced_clock;
 	u32 dspcntr;
 	bool ok, has_reduced_clock = false;
-	bool is_lvds = false;
+	bool is_lvds = false, is_dsi = false;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	int ret;
@@ -4856,42 +5114,49 @@
 		case INTEL_OUTPUT_LVDS:
 			is_lvds = true;
 			break;
+		case INTEL_OUTPUT_DSI:
+			is_dsi = true;
+			break;
 		}
 
 		num_connectors++;
 	}
 
-	refclk = i9xx_get_refclk(crtc, num_connectors);
+	if (is_dsi)
+		goto skip_dpll;
 
-	/*
-	 * Returns a set of divisors for the desired target clock with the given
-	 * refclk, or FALSE.  The returned values represent the clock equation:
-	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-	 */
-	limit = intel_limit(crtc, refclk);
-	ok = dev_priv->display.find_dpll(limit, crtc,
-					 intel_crtc->config.port_clock,
-					 refclk, NULL, &clock);
-	if (!ok && !intel_crtc->config.clock_set) {
-		DRM_ERROR("Couldn't find PLL settings for mode!\n");
-		return -EINVAL;
-	}
-
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		has_reduced_clock =
-			dev_priv->display.find_dpll(limit, crtc,
-						    dev_priv->lvds_downclock,
-						    refclk, &clock,
-						    &reduced_clock);
-	}
-	/* Compat-code for transition, will disappear. */
 	if (!intel_crtc->config.clock_set) {
+		refclk = i9xx_get_refclk(crtc, num_connectors);
+
+		/*
+		 * Returns a set of divisors for the desired target clock with
+		 * the given refclk, or FALSE.  The returned values represent
+		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+		 * 2) / p1 / p2.
+		 */
+		limit = intel_limit(crtc, refclk);
+		ok = dev_priv->display.find_dpll(limit, crtc,
+						 intel_crtc->config.port_clock,
+						 refclk, NULL, &clock);
+		if (!ok) {
+			DRM_ERROR("Couldn't find PLL settings for mode!\n");
+			return -EINVAL;
+		}
+
+		if (is_lvds && dev_priv->lvds_downclock_avail) {
+			/*
+			 * Ensure we match the reduced clock's P to the target
+			 * clock.  If the clocks don't match, we can't switch
+			 * the display clock by using the FP0/FP1. In such case
+			 * we will disable the LVDS downclock feature.
+			 */
+			has_reduced_clock =
+				dev_priv->display.find_dpll(limit, crtc,
+							    dev_priv->lvds_downclock,
+							    refclk, &clock,
+							    &reduced_clock);
+		}
+		/* Compat-code for transition, will disappear. */
 		intel_crtc->config.dpll.n = clock.n;
 		intel_crtc->config.dpll.m1 = clock.m1;
 		intel_crtc->config.dpll.m2 = clock.m2;
@@ -4899,17 +5164,19 @@
 		intel_crtc->config.dpll.p2 = clock.p2;
 	}
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev)) {
 		i8xx_update_pll(intel_crtc,
 				has_reduced_clock ? &reduced_clock : NULL,
 				num_connectors);
-	else if (IS_VALLEYVIEW(dev))
+	} else if (IS_VALLEYVIEW(dev)) {
 		vlv_update_pll(intel_crtc);
-	else
+	} else {
 		i9xx_update_pll(intel_crtc,
 				has_reduced_clock ? &reduced_clock : NULL,
                                 num_connectors);
+	}
 
+skip_dpll:
 	/* Set up the display plane register */
 	dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -4926,8 +5193,8 @@
 	 * which should always be the user's requested size.
 	 */
 	I915_WRITE(DSPSIZE(plane),
-		   ((mode->vdisplay - 1) << 16) |
-		   (mode->hdisplay - 1));
+		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
+		   (intel_crtc->config.pipe_src_w - 1));
 	I915_WRITE(DSPPOS(plane), 0);
 
 	i9xx_set_pipeconf(intel_crtc);
@@ -4937,8 +5204,6 @@
 
 	ret = intel_pipe_set_base(crtc, x, y, fb);
 
-	intel_update_watermarks(dev);
-
 	return ret;
 }
 
@@ -4969,6 +5234,32 @@
 			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
 }
 
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+			       struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = pipe_config->cpu_transcoder;
+	intel_clock_t clock;
+	u32 mdiv;
+	int refclk = 100000;
+
+	mutex_lock(&dev_priv->dpio_lock);
+	mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+	clock.m2 = mdiv & DPIO_M2DIV_MASK;
+	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+	vlv_clock(refclk, &clock);
+
+	/* clock.dot is the fast clock */
+	pipe_config->port_clock = clock.dot / 5;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
 				 struct intel_crtc_config *pipe_config)
 {
@@ -4983,6 +5274,25 @@
 	if (!(tmp & PIPECONF_ENABLE))
 		return false;
 
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		switch (tmp & PIPECONF_BPC_MASK) {
+		case PIPECONF_6BPC:
+			pipe_config->pipe_bpp = 18;
+			break;
+		case PIPECONF_8BPC:
+			pipe_config->pipe_bpp = 24;
+			break;
+		case PIPECONF_10BPC:
+			pipe_config->pipe_bpp = 30;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (INTEL_INFO(dev)->gen < 4)
+		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
 	intel_get_pipe_timings(crtc, pipe_config);
 
 	i9xx_get_pfit_config(crtc, pipe_config);
@@ -5015,6 +5325,11 @@
 						     DPLL_PORTB_READY_MASK);
 	}
 
+	if (IS_VALLEYVIEW(dev))
+		vlv_crtc_clock_get(crtc, pipe_config);
+	else
+		i9xx_crtc_clock_get(crtc, pipe_config);
+
 	return true;
 }
 
@@ -5576,48 +5891,6 @@
 	return true;
 }
 
-static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t temp;
-
-	temp = I915_READ(SOUTH_CHICKEN1);
-	if (temp & FDI_BC_BIFURCATION_SELECT)
-		return;
-
-	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
-	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
-
-	temp |= FDI_BC_BIFURCATION_SELECT;
-	DRM_DEBUG_KMS("enabling fdi C rx\n");
-	I915_WRITE(SOUTH_CHICKEN1, temp);
-	POSTING_READ(SOUTH_CHICKEN1);
-}
-
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
-{
-	struct drm_device *dev = intel_crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	switch (intel_crtc->pipe) {
-	case PIPE_A:
-		break;
-	case PIPE_B:
-		if (intel_crtc->config.fdi_lanes > 2)
-			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
-		else
-			cpt_enable_fdi_bc_bifurcation(dev);
-
-		break;
-	case PIPE_C:
-		cpt_enable_fdi_bc_bifurcation(dev);
-
-		break;
-	default:
-		BUG();
-	}
-}
-
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
 {
 	/*
@@ -5799,11 +6072,6 @@
 	else
 		intel_crtc->lowfreq_avail = false;
 
-	if (intel_crtc->config.has_pch_encoder) {
-		pll = intel_crtc_to_shared_dpll(intel_crtc);
-
-	}
-
 	intel_set_pipe_timings(intel_crtc);
 
 	if (intel_crtc->config.has_pch_encoder) {
@@ -5811,9 +6079,6 @@
 					     &intel_crtc->config.fdi_m_n);
 	}
 
-	if (IS_IVYBRIDGE(dev))
-		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
-
 	ironlake_set_pipeconf(crtc);
 
 	/* Set up the display plane register */
@@ -5822,25 +6087,67 @@
 
 	ret = intel_pipe_set_base(crtc, x, y, fb);
 
-	intel_update_watermarks(dev);
-
 	return ret;
 }
 
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+		& ~TU_SIZE_MASK;
+	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+					 enum transcoder transcoder,
+					 struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = crtc->pipe;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+			& ~TU_SIZE_MASK;
+		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	} else {
+		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+			& ~TU_SIZE_MASK;
+		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	}
+}
+
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+		      struct intel_crtc_config *pipe_config)
+{
+	if (crtc->config.has_pch_encoder)
+		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+	else
+		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+					     &pipe_config->dp_m_n);
+}
+
 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
 					struct intel_crtc_config *pipe_config)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum transcoder transcoder = pipe_config->cpu_transcoder;
-
-	pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
-	pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
-	pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
-					& ~TU_SIZE_MASK;
-	pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
-	pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
-				   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+				     &pipe_config->fdi_m_n);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5881,6 +6188,23 @@
 	if (!(tmp & PIPECONF_ENABLE))
 		return false;
 
+	switch (tmp & PIPECONF_BPC_MASK) {
+	case PIPECONF_6BPC:
+		pipe_config->pipe_bpp = 18;
+		break;
+	case PIPECONF_8BPC:
+		pipe_config->pipe_bpp = 24;
+		break;
+	case PIPECONF_10BPC:
+		pipe_config->pipe_bpp = 30;
+		break;
+	case PIPECONF_12BPC:
+		pipe_config->pipe_bpp = 36;
+		break;
+	default:
+		break;
+	}
+
 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
 		struct intel_shared_dpll *pll;
 
@@ -5912,6 +6236,8 @@
 		pipe_config->pixel_multiplier =
 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+		ironlake_pch_clock_get(crtc, pipe_config);
 	} else {
 		pipe_config->pixel_multiplier = 1;
 	}
@@ -5968,8 +6294,8 @@
  * register. Callers should take care of disabling all the display engine
  * functions, doing the mode unset, fixing interrupts, etc.
  */
-void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-		       bool switch_to_fclk, bool allow_power_down)
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+			      bool switch_to_fclk, bool allow_power_down)
 {
 	uint32_t val;
 
@@ -5997,7 +6323,10 @@
 
 	val = I915_READ(D_COMP);
 	val |= D_COMP_COMP_DISABLE;
-	I915_WRITE(D_COMP, val);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+		DRM_ERROR("Failed to disable D_COMP\n");
+	mutex_unlock(&dev_priv->rps.hw_lock);
 	POSTING_READ(D_COMP);
 	ndelay(100);
 
@@ -6016,7 +6345,7 @@
  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  * source.
  */
-void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
 	uint32_t val;
 
@@ -6039,7 +6368,10 @@
 	val = I915_READ(D_COMP);
 	val |= D_COMP_COMP_FORCE;
 	val &= ~D_COMP_COMP_DISABLE;
-	I915_WRITE(D_COMP, val);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+		DRM_ERROR("Failed to enable D_COMP\n");
+	mutex_unlock(&dev_priv->rps.hw_lock);
 	POSTING_READ(D_COMP);
 
 	val = I915_READ(LCPLL_CTL);
@@ -6222,22 +6554,79 @@
 	}
 }
 
-static void haswell_modeset_global_resources(struct drm_device *dev)
+#define for_each_power_domain(domain, mask)				\
+	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
+		if ((1 << (domain)) & (mask))
+
+static unsigned long get_pipe_power_domains(struct drm_device *dev,
+					    enum pipe pipe, bool pfit_enabled)
 {
-	bool enable = false;
+	unsigned long mask;
+	enum transcoder transcoder;
+
+	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+	mask = BIT(POWER_DOMAIN_PIPE(pipe));
+	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+	if (pfit_enabled)
+		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+	return mask;
+}
+
+void intel_display_set_init_power(struct drm_device *dev, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->power_domains.init_power_on == enable)
+		return;
+
+	if (enable)
+		intel_display_power_get(dev, POWER_DOMAIN_INIT);
+	else
+		intel_display_power_put(dev, POWER_DOMAIN_INIT);
+
+	dev_priv->power_domains.init_power_on = enable;
+}
+
+static void modeset_update_power_wells(struct drm_device *dev)
+{
+	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
 	struct intel_crtc *crtc;
 
+	/*
+	 * First get all needed power domains, then put all unneeded, to avoid
+	 * any unnecessary toggling of the power wells.
+	 */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		enum intel_display_power_domain domain;
+
 		if (!crtc->base.enabled)
 			continue;
 
-		if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
-		    crtc->config.cpu_transcoder != TRANSCODER_EDP)
-			enable = true;
+		pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
+						crtc->pipe,
+						crtc->config.pch_pfit.enabled);
+
+		for_each_power_domain(domain, pipe_domains[crtc->pipe])
+			intel_display_power_get(dev, domain);
 	}
 
-	intel_set_power_well(dev, enable);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		enum intel_display_power_domain domain;
 
+		for_each_power_domain(domain, crtc->enabled_power_domains)
+			intel_display_power_put(dev, domain);
+
+		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+	}
+
+	intel_display_set_init_power(dev, false);
+}
+
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+	modeset_update_power_wells(dev);
 	hsw_update_package_c8(dev);
 }
 
@@ -6276,8 +6665,6 @@
 
 	ret = intel_pipe_set_base(crtc, x, y, fb);
 
-	intel_update_watermarks(dev);
-
 	return ret;
 }
 
@@ -6385,6 +6772,44 @@
 	return 0;
 }
 
+static struct {
+	int clock;
+	u32 config;
+} hdmi_audio_clock[] = {
+	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+		if (mode->clock == hdmi_audio_clock[i].clock)
+			break;
+	}
+
+	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+		i = 1;
+	}
+
+	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+		      hdmi_audio_clock[i].clock,
+		      hdmi_audio_clock[i].config);
+
+	return hdmi_audio_clock[i].config;
+}
+
 static bool intel_eld_uptodate(struct drm_connector *connector,
 			       int reg_eldv, uint32_t bits_eldv,
 			       int reg_elda, uint32_t bits_elda,
@@ -6415,7 +6840,8 @@
 }
 
 static void g4x_write_eld(struct drm_connector *connector,
-			  struct drm_crtc *crtc)
+			  struct drm_crtc *crtc,
+			  struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint8_t *eld = connector->eld;
@@ -6455,7 +6881,8 @@
 }
 
 static void haswell_write_eld(struct drm_connector *connector,
-				     struct drm_crtc *crtc)
+			      struct drm_crtc *crtc,
+			      struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint8_t *eld = connector->eld;
@@ -6508,8 +6935,9 @@
 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-	} else
-		I915_WRITE(aud_config, 0);
+	} else {
+		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+	}
 
 	if (intel_eld_uptodate(connector,
 			       aud_cntrl_st2, eldv,
@@ -6542,7 +6970,8 @@
 }
 
 static void ironlake_write_eld(struct drm_connector *connector,
-				     struct drm_crtc *crtc)
+			       struct drm_crtc *crtc,
+			       struct drm_display_mode *mode)
 {
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	uint8_t *eld = connector->eld;
@@ -6560,6 +6989,11 @@
 		aud_config = IBX_AUD_CFG(pipe);
 		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+	} else if (IS_VALLEYVIEW(connector->dev)) {
+		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
+		aud_config = VLV_AUD_CFG(pipe);
+		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
 	} else {
 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
 		aud_config = CPT_AUD_CFG(pipe);
@@ -6569,8 +7003,19 @@
 
 	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
-	i = I915_READ(aud_cntl_st);
-	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
+	if (IS_VALLEYVIEW(connector->dev))  {
+		struct intel_encoder *intel_encoder;
+		struct intel_digital_port *intel_dig_port;
+
+		intel_encoder = intel_attached_encoder(connector);
+		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+		i = intel_dig_port->port;
+	} else {
+		i = I915_READ(aud_cntl_st);
+		i = (i >> 29) & DIP_PORT_SEL_MASK;
+		/* DIP_Port_Select, 0x1 = PortB */
+	}
+
 	if (!i) {
 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
 		/* operate blindly on all ports */
@@ -6586,8 +7031,9 @@
 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-	} else
-		I915_WRITE(aud_config, 0);
+	} else {
+		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+	}
 
 	if (intel_eld_uptodate(connector,
 			       aud_cntrl_st2, eldv,
@@ -6637,50 +7083,7 @@
 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
 	if (dev_priv->display.write_eld)
-		dev_priv->display.write_eld(connector, crtc);
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-	int palreg = PALETTE(pipe);
-	int i;
-	bool reenable_ips = false;
-
-	/* The clocks have to be on to load the palette. */
-	if (!crtc->enabled || !intel_crtc->active)
-		return;
-
-	if (!HAS_PCH_SPLIT(dev_priv->dev))
-		assert_pll_enabled(dev_priv, pipe);
-
-	/* use legacy palette for Ironlake */
-	if (HAS_PCH_SPLIT(dev))
-		palreg = LGC_PALETTE(pipe);
-
-	/* Workaround : Do not read or write the pipe palette/gamma data while
-	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-	 */
-	if (intel_crtc->config.ips_enabled &&
-	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
-	     GAMMA_MODE_MODE_SPLIT)) {
-		hsw_disable_ips(intel_crtc);
-		reenable_ips = true;
-	}
-
-	for (i = 0; i < 256; i++) {
-		I915_WRITE(palreg + 4 * i,
-			   (intel_crtc->lut_r[i] << 16) |
-			   (intel_crtc->lut_g[i] << 8) |
-			   intel_crtc->lut_b[i]);
-	}
-
-	if (reenable_ips)
-		hsw_enable_ips(intel_crtc);
+		dev_priv->display.write_eld(connector, crtc, mode);
 }
 
 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6778,23 +7181,20 @@
 	int pipe = intel_crtc->pipe;
 	int x = intel_crtc->cursor_x;
 	int y = intel_crtc->cursor_y;
-	u32 base, pos;
+	u32 base = 0, pos = 0;
 	bool visible;
 
-	pos = 0;
-
-	if (on && crtc->enabled && crtc->fb) {
+	if (on)
 		base = intel_crtc->cursor_addr;
-		if (x > (int) crtc->fb->width)
-			base = 0;
 
-		if (y > (int) crtc->fb->height)
-			base = 0;
-	} else
+	if (x >= intel_crtc->config.pipe_src_w)
+		base = 0;
+
+	if (y >= intel_crtc->config.pipe_src_h)
 		base = 0;
 
 	if (x < 0) {
-		if (x + intel_crtc->cursor_width < 0)
+		if (x + intel_crtc->cursor_width <= 0)
 			base = 0;
 
 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6803,7 +7203,7 @@
 	pos |= x << CURSOR_X_SHIFT;
 
 	if (y < 0) {
-		if (y + intel_crtc->cursor_height < 0)
+		if (y + intel_crtc->cursor_height <= 0)
 			base = 0;
 
 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6946,8 +7346,8 @@
 {
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-	intel_crtc->cursor_x = x;
-	intel_crtc->cursor_y = y;
+	intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
+	intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
 
 	if (intel_crtc->active)
 		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
@@ -6955,27 +7355,6 @@
 	return 0;
 }
 
-/** Sets the color ramps on behalf of RandR */
-void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-				 u16 blue, int regno)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	intel_crtc->lut_r[regno] = red >> 8;
-	intel_crtc->lut_g[regno] = green >> 8;
-	intel_crtc->lut_b[regno] = blue >> 8;
-}
-
-void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-			     u16 *blue, int regno)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	*red = intel_crtc->lut_r[regno] << 8;
-	*green = intel_crtc->lut_g[regno] << 8;
-	*blue = intel_crtc->lut_b[regno] << 8;
-}
-
 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
 				 u16 *blue, uint32_t start, uint32_t size)
 {
@@ -7011,14 +7390,21 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto err;
+
 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
-	if (ret) {
-		drm_gem_object_unreference_unlocked(&obj->base);
-		kfree(intel_fb);
-		return ERR_PTR(ret);
-	}
+	mutex_unlock(&dev->struct_mutex);
+	if (ret)
+		goto err;
 
 	return &intel_fb->base;
+err:
+	drm_gem_object_unreference_unlocked(&obj->base);
+	kfree(intel_fb);
+
+	return ERR_PTR(ret);
 }
 
 static u32
@@ -7061,6 +7447,7 @@
 mode_fits_in_fbdev(struct drm_device *dev,
 		   struct drm_display_mode *mode)
 {
+#ifdef CONFIG_DRM_I915_FBDEV
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct drm_framebuffer *fb;
@@ -7081,6 +7468,9 @@
 		return NULL;
 
 	return fb;
+#else
+	return NULL;
+#endif
 }
 
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7224,6 +7614,22 @@
 	mutex_unlock(&crtc->mutex);
 }
 
+static int i9xx_pll_refclk(struct drm_device *dev,
+			   const struct intel_crtc_config *pipe_config)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+		return dev_priv->vbt.lvds_ssc_freq * 1000;
+	else if (HAS_PCH_SPLIT(dev))
+		return 120000;
+	else if (!IS_GEN2(dev))
+		return 96000;
+	else
+		return 48000;
+}
+
 /* Returns the clock of the currently programmed mode of the given pipe. */
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 				struct intel_crtc_config *pipe_config)
@@ -7231,14 +7637,15 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe = pipe_config->cpu_transcoder;
-	u32 dpll = I915_READ(DPLL(pipe));
+	u32 dpll = pipe_config->dpll_hw_state.dpll;
 	u32 fp;
 	intel_clock_t clock;
+	int refclk = i9xx_pll_refclk(dev, pipe_config);
 
 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-		fp = I915_READ(FP0(pipe));
+		fp = pipe_config->dpll_hw_state.fp0;
 	else
-		fp = I915_READ(FP1(pipe));
+		fp = pipe_config->dpll_hw_state.fp1;
 
 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
 	if (IS_PINEVIEW(dev)) {
@@ -7269,14 +7676,13 @@
 		default:
 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
-			pipe_config->adjusted_mode.clock = 0;
 			return;
 		}
 
 		if (IS_PINEVIEW(dev))
-			pineview_clock(96000, &clock);
+			pineview_clock(refclk, &clock);
 		else
-			i9xx_clock(96000, &clock);
+			i9xx_clock(refclk, &clock);
 	} else {
 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
@@ -7284,13 +7690,6 @@
 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
 			clock.p2 = 14;
-
-			if ((dpll & PLL_REF_INPUT_MASK) ==
-			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-				/* XXX: might not be 66MHz */
-				i9xx_clock(66000, &clock);
-			} else
-				i9xx_clock(48000, &clock);
 		} else {
 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
 				clock.p1 = 2;
@@ -7302,59 +7701,55 @@
 				clock.p2 = 4;
 			else
 				clock.p2 = 2;
-
-			i9xx_clock(48000, &clock);
 		}
+
+		i9xx_clock(refclk, &clock);
 	}
 
-	pipe_config->adjusted_mode.clock = clock.dot;
+	/*
+	 * This value includes pixel_multiplier. We will use
+	 * port_clock to compute adjusted_mode.crtc_clock in the
+	 * encoder's get_config() function.
+	 */
+	pipe_config->port_clock = clock.dot;
 }
 
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
-				    struct intel_crtc_config *pipe_config)
+int intel_dotclock_calculate(int link_freq,
+			     const struct intel_link_m_n *m_n)
 {
-	struct drm_device *dev = crtc->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-	int link_freq, repeat;
-	u64 clock;
-	u32 link_m, link_n;
-
-	repeat = pipe_config->pixel_multiplier;
-
 	/*
 	 * The calculation for the data clock is:
-	 * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
 	 * But we want to avoid losing precison if possible, so:
-	 * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
 	 *
 	 * and the link clock is simpler:
-	 * link_clock = (m * link_clock * repeat) / n
+	 * link_clock = (m * link_clock) / n
 	 */
 
+	if (!m_n->link_n)
+		return 0;
+
+	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
+
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+				   struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+
+	/* read out port_clock from the DPLL */
+	i9xx_crtc_clock_get(crtc, pipe_config);
+
 	/*
-	 * We need to get the FDI or DP link clock here to derive
-	 * the M/N dividers.
-	 *
-	 * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
-	 * For DP, it's either 1.62GHz or 2.7GHz.
-	 * We do our calculations in 10*MHz since we don't need much precison.
+	 * This value does not include pixel_multiplier.
+	 * We will check that port_clock and adjusted_mode.crtc_clock
+	 * agree once we know their relationship in the encoder's
+	 * get_config() function.
 	 */
-	if (pipe_config->has_pch_encoder)
-		link_freq = intel_fdi_link_freq(dev) * 10000;
-	else
-		link_freq = pipe_config->port_clock;
-
-	link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
-	link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
-
-	if (!link_m || !link_n)
-		return;
-
-	clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
-	do_div(clock, link_n);
-
-	pipe_config->adjusted_mode.clock = clock;
+	pipe_config->adjusted_mode.crtc_clock =
+		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+					 &pipe_config->fdi_m_n);
 }
 
 /** Returns the currently programmed mode of the given pipe. */
@@ -7370,6 +7765,7 @@
 	int hsync = I915_READ(HSYNC(cpu_transcoder));
 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
 	int vsync = I915_READ(VSYNC(cpu_transcoder));
+	enum pipe pipe = intel_crtc->pipe;
 
 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
 	if (!mode)
@@ -7382,11 +7778,14 @@
 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
 	 * to use a real value here instead.
 	 */
-	pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+	pipe_config.cpu_transcoder = (enum transcoder) pipe;
 	pipe_config.pixel_multiplier = 1;
+	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
 	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
 
-	mode->clock = pipe_config.adjusted_mode.clock;
+	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
 	mode->hdisplay = (htot & 0xffff) + 1;
 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
 	mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7492,6 +7891,9 @@
 
 		intel_decrease_pllclock(crtc);
 	}
+
+	if (dev_priv->info->gen >= 6)
+		gen6_rps_idle(dev->dev_private);
 }
 
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7680,7 +8082,7 @@
 	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7722,7 +8124,7 @@
 	intel_ring_emit(ring, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7771,7 +8173,7 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7816,7 +8218,7 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7895,7 +8297,7 @@
 	intel_ring_emit(ring, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 	return 0;
 
 err_unpin:
@@ -7940,7 +8342,7 @@
 	     fb->pitches[0] != crtc->fb->pitches[0]))
 		return -EINVAL;
 
-	work = kzalloc(sizeof *work, GFP_KERNEL);
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL)
 		return -ENOMEM;
 
@@ -8175,6 +8577,17 @@
 	return bpp;
 }
 
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+			"type: 0x%x flags: 0x%x\n",
+		mode->crtc_clock,
+		mode->crtc_hdisplay, mode->crtc_hsync_start,
+		mode->crtc_hsync_end, mode->crtc_htotal,
+		mode->crtc_vdisplay, mode->crtc_vsync_start,
+		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
 				   struct intel_crtc_config *pipe_config,
 				   const char *context)
@@ -8191,10 +8604,19 @@
 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
 		      pipe_config->fdi_m_n.tu);
+	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+		      pipe_config->has_dp_encoder,
+		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+		      pipe_config->dp_m_n.tu);
 	DRM_DEBUG_KMS("requested mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
 	DRM_DEBUG_KMS("adjusted mode:\n");
 	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
 		      pipe_config->gmch_pfit.control,
 		      pipe_config->gmch_pfit.pgm_ratios,
@@ -8204,6 +8626,7 @@
 		      pipe_config->pch_pfit.size,
 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 }
 
 static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8247,6 +8670,7 @@
 
 	drm_mode_copy(&pipe_config->adjusted_mode, mode);
 	drm_mode_copy(&pipe_config->requested_mode, mode);
+
 	pipe_config->cpu_transcoder =
 		(enum transcoder) to_intel_crtc(crtc)->pipe;
 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8273,13 +8697,25 @@
 	if (plane_bpp < 0)
 		goto fail;
 
+	/*
+	 * Determine the real pipe dimensions. Note that stereo modes can
+	 * increase the actual pipe size due to the frame doubling and
+	 * insertion of additional space for blanks between the frame. This
+	 * is stored in the crtc timings. We use the requested mode to do this
+	 * computation to clearly distinguish it from the adjusted mode, which
+	 * can be changed by the connectors in the below retry loop.
+	 */
+	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
+	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
+	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+
 encoder_retry:
 	/* Ensure the port clock defaults are reset when retrying. */
 	pipe_config->port_clock = 0;
 	pipe_config->pixel_multiplier = 1;
 
 	/* Fill in default crtc timings, allow encoders to overwrite them. */
-	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
 
 	/* Pass our mode to the connectors and the CRTC to give them a chance to
 	 * adjust it according to limitations or connector properties, and also
@@ -8300,7 +8736,8 @@
 	/* Set default port clock if not overwritten by the encoder. Needs to be
 	 * done afterwards in case the encoder adjusts the mode. */
 	if (!pipe_config->port_clock)
-		pipe_config->port_clock = pipe_config->adjusted_mode.clock;
+		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+			* pipe_config->pixel_multiplier;
 
 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
 	if (ret < 0) {
@@ -8487,13 +8924,9 @@
 
 }
 
-static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
-				    struct intel_crtc_config *new)
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
-	int clock1, clock2, diff;
-
-	clock1 = cur->adjusted_mode.clock;
-	clock2 = new->adjusted_mode.clock;
+	int diff;
 
 	if (clock1 == clock2)
 		return true;
@@ -8547,6 +8980,15 @@
 		return false; \
 	}
 
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+		DRM_ERROR("mismatch in " #name " " \
+			  "(expected %i, found %i)\n", \
+			  current_config->name, \
+			  pipe_config->name); \
+		return false; \
+	}
+
 #define PIPE_CONF_QUIRK(quirk)	\
 	((current_config->quirks | pipe_config->quirks) & (quirk))
 
@@ -8560,6 +9002,13 @@
 	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
 	PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
+	PIPE_CONF_CHECK_I(has_dp_encoder);
+	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+	PIPE_CONF_CHECK_I(dp_m_n.link_m);
+	PIPE_CONF_CHECK_I(dp_m_n.link_n);
+	PIPE_CONF_CHECK_I(dp_m_n.tu);
+
 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8590,8 +9039,8 @@
 				      DRM_MODE_FLAG_NVSYNC);
 	}
 
-	PIPE_CONF_CHECK_I(requested_mode.hdisplay);
-	PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+	PIPE_CONF_CHECK_I(pipe_src_w);
+	PIPE_CONF_CHECK_I(pipe_src_h);
 
 	PIPE_CONF_CHECK_I(gmch_pfit.control);
 	/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8606,26 +9055,28 @@
 
 	PIPE_CONF_CHECK_I(ips_enabled);
 
+	PIPE_CONF_CHECK_I(double_wide);
+
 	PIPE_CONF_CHECK_I(shared_dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
+	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+		PIPE_CONF_CHECK_I(pipe_bpp);
+
+	if (!IS_HASWELL(dev)) {
+		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+	}
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
 
-	if (!IS_HASWELL(dev)) {
-		if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
-			DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
-				  current_config->adjusted_mode.clock,
-				  pipe_config->adjusted_mode.clock);
-			return false;
-		}
-	}
-
 	return true;
 }
 
@@ -8757,9 +9208,6 @@
 				encoder->get_config(encoder, &pipe_config);
 		}
 
-		if (dev_priv->display.get_clock)
-			dev_priv->display.get_clock(crtc, &pipe_config);
-
 		WARN(crtc->active != active,
 		     "crtc active state doesn't match with hw state "
 		     "(expected %i, found %i)\n", crtc->active, active);
@@ -8834,6 +9282,18 @@
 	check_shared_dpll_state(dev);
 }
 
+void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+				     int dotclock)
+{
+	/*
+	 * FDI already provided one idea for the dotclock.
+	 * Yell if the encoder disagrees.
+	 */
+	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+	     pipe_config->adjusted_mode.crtc_clock, dotclock);
+}
+
 static int __intel_set_mode(struct drm_crtc *crtc,
 			    struct drm_display_mode *mode,
 			    int x, int y, struct drm_framebuffer *fb)
@@ -8846,7 +9306,7 @@
 	unsigned disable_pipes, prepare_pipes, modeset_pipes;
 	int ret = 0;
 
-	saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+	saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
 	if (!saved_mode)
 		return -ENOMEM;
 	saved_hwmode = saved_mode + 1;
@@ -9385,7 +9845,7 @@
 	struct intel_crtc *intel_crtc;
 	int i;
 
-	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
 	if (intel_crtc == NULL)
 		return;
 
@@ -9414,6 +9874,18 @@
 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 }
 
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
+{
+	struct drm_encoder *encoder = connector->base.encoder;
+
+	WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
+
+	if (!encoder)
+		return INVALID_PIPE;
+
+	return to_intel_crtc(encoder->crtc)->pipe;
+}
+
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file)
 {
@@ -9429,7 +9901,7 @@
 
 	if (!drmmode_obj) {
 		DRM_ERROR("no such CRTC id\n");
-		return -EINVAL;
+		return -ENOENT;
 	}
 
 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
@@ -9536,7 +10008,13 @@
 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
 			intel_dp_init(dev, PCH_DP_D, PORT_D);
 	} else if (IS_VALLEYVIEW(dev)) {
-		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+					PORT_B);
+			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+		}
+
 		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
 			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
 					PORT_C);
@@ -9545,12 +10023,7 @@
 					      PORT_C);
 		}
 
-		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
-			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
-					PORT_B);
-			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
-				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
-		}
+		intel_dsi_init(dev);
 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
 		bool found = false;
 
@@ -9606,6 +10079,7 @@
 void intel_framebuffer_fini(struct intel_framebuffer *fb)
 {
 	drm_framebuffer_cleanup(&fb->base);
+	WARN_ON(!fb->obj->framebuffer_references--);
 	drm_gem_object_unreference_unlocked(&fb->obj->base);
 }
 
@@ -9637,9 +10111,12 @@
 			   struct drm_mode_fb_cmd2 *mode_cmd,
 			   struct drm_i915_gem_object *obj)
 {
+	int aligned_height, tile_height;
 	int pitch_limit;
 	int ret;
 
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
 	if (obj->tiling_mode == I915_TILING_Y) {
 		DRM_DEBUG("hardware does not support tiling Y\n");
 		return -EINVAL;
@@ -9728,8 +10205,16 @@
 	if (mode_cmd->offsets[0] != 0)
 		return -EINVAL;
 
+	tile_height = IS_GEN2(dev) ? 16 : 8;
+	aligned_height = ALIGN(mode_cmd->height,
+			       obj->tiling_mode ? tile_height : 1);
+	/* FIXME drm helper for size checks (especially planar formats)? */
+	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+		return -EINVAL;
+
 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
 	intel_fb->obj = obj;
+	intel_fb->obj->framebuffer_references++;
 
 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
 	if (ret) {
@@ -9755,9 +10240,15 @@
 	return intel_framebuffer_create(dev, mode_cmd, obj);
 }
 
+#ifndef CONFIG_DRM_I915_FBDEV
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+#endif
+
 static const struct drm_mode_config_funcs intel_mode_funcs = {
 	.fb_create = intel_user_framebuffer_create,
-	.output_poll_changed = intel_fb_output_poll_changed,
+	.output_poll_changed = intel_fbdev_output_poll_changed,
 };
 
 /* Set up chip specific display functions */
@@ -9783,7 +10274,6 @@
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
-		dev_priv->display.get_clock = ironlake_crtc_clock_get;
 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9791,7 +10281,6 @@
 		dev_priv->display.update_plane = ironlake_update_plane;
 	} else if (IS_VALLEYVIEW(dev)) {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_clock = i9xx_crtc_clock_get;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9799,7 +10288,6 @@
 		dev_priv->display.update_plane = i9xx_update_plane;
 	} else {
 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-		dev_priv->display.get_clock = i9xx_crtc_clock_get;
 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9857,7 +10345,8 @@
 		}
 	} else if (IS_G4X(dev)) {
 		dev_priv->display.write_eld = g4x_write_eld;
-	}
+	} else if (IS_VALLEYVIEW(dev))
+		dev_priv->display.write_eld = ironlake_write_eld;
 
 	/* Default just returns -ENODEV to indicate unsupported */
 	dev_priv->display.queue_flip = intel_default_queue_flip;
@@ -9975,8 +10464,7 @@
 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
-	/* 830/845 need to leave pipe A & dpll A up */
-	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+	/* 830 needs to leave pipe A & dpll A up */
 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
 	/* Lenovo U160 cannot use SSC on LVDS */
@@ -9985,20 +10473,11 @@
 	/* Sony Vaio Y cannot use SSC on LVDS */
 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 
-	/* Acer Aspire 5734Z must invert backlight brightness */
-	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
-	/* Acer/eMachines G725 */
-	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
-	/* Acer/eMachines e725 */
-	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
-	/* Acer/Packard Bell NCL20 */
-	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
-	/* Acer Aspire 4736Z */
-	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+	/*
+	 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
+	 * seem to use inverted backlight PWM.
+	 */
+	{ 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
 
 	/* Dell XPS13 HD Sandy Bridge */
 	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10047,12 +10526,19 @@
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-	intel_init_power_well(dev);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	intel_prepare_ddi(dev);
 
 	intel_init_clock_gating(dev);
 
+	/* Enable the CRI clock source so we can get at the display */
+	if (IS_VALLEYVIEW(dev))
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+			   DPLL_INTEGRATED_CRI_CLK_VLV);
+
+	intel_init_dpio(dev);
+
 	mutex_lock(&dev->struct_mutex);
 	intel_enable_gt_powersave(dev);
 	mutex_unlock(&dev->struct_mutex);
@@ -10320,7 +10806,7 @@
 	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
 		return;
 
-	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
 		i915_disable_vga(dev);
 	}
@@ -10343,6 +10829,7 @@
 								 &crtc->config);
 
 		crtc->base.enabled = crtc->active;
+		crtc->primary_enabled = crtc->active;
 
 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
 			      crtc->base.base.id,
@@ -10383,20 +10870,11 @@
 		}
 
 		encoder->connectors_active = false;
-		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
 			      encoder->base.base.id,
 			      drm_get_encoder_name(&encoder->base),
 			      encoder->base.crtc ? "enabled" : "disabled",
-			      pipe);
-	}
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-			    base.head) {
-		if (!crtc->active)
-			continue;
-		if (dev_priv->display.get_clock)
-			dev_priv->display.get_clock(crtc,
-						    &crtc->config);
+			      pipe_name(pipe));
 	}
 
 	list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10423,7 +10901,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
-	struct drm_plane *plane;
 	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
 	int i;
@@ -10470,7 +10947,12 @@
 		pll->on = false;
 	}
 
+	if (IS_HASWELL(dev))
+		ilk_wm_get_hw_state(dev);
+
 	if (force_restore) {
+		i915_redisable_vga(dev);
+
 		/*
 		 * We need to use raw interfaces for restoring state to avoid
 		 * checking (bogus) intermediate states.
@@ -10482,10 +10964,6 @@
 			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
 					 crtc->fb);
 		}
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
-			intel_plane_restore(plane);
-
-		i915_redisable_vga(dev);
 	} else {
 		intel_modeset_update_staged_output_state(dev);
 	}
@@ -10508,6 +10986,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
+	struct drm_connector *connector;
 
 	/*
 	 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10548,6 +11027,10 @@
 	/* destroy backlight, if any, before the connectors */
 	intel_panel_destroy_backlight(dev);
 
+	/* destroy the sysfs files before encoders/connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		drm_sysfs_connector_remove(connector);
+
 	drm_mode_config_cleanup(dev);
 
 	intel_cleanup_overlay(dev);
@@ -10643,7 +11126,7 @@
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return NULL;
 
-	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	error = kzalloc(sizeof(*error), GFP_ATOMIC);
 	if (error == NULL)
 		return NULL;
 
@@ -10651,6 +11134,9 @@
 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
 	for_each_pipe(i) {
+		if (!intel_display_power_enabled(dev, POWER_DOMAIN_PIPE(i)))
+			continue;
+
 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
 			error->cursor[i].control = I915_READ(CURCNTR(i));
 			error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10684,6 +11170,10 @@
 	for (i = 0; i < error->num_transcoders; i++) {
 		enum transcoder cpu_transcoder = transcoders[i];
 
+		if (!intel_display_power_enabled(dev,
+				POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
+			continue;
+
 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
 
 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
@@ -10695,12 +11185,6 @@
 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
 	}
 
-	/* In the code above we read the registers without checking if the power
-	 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
-	 * prevent the next I915_WRITE from detecting it and printing an error
-	 * message. */
-	intel_uncore_clear_errors(dev);
-
 	return error;
 }
 
@@ -10745,7 +11229,7 @@
 	}
 
 	for (i = 0; i < error->num_transcoders; i++) {
-		err_printf(m, "  CPU transcoder: %c\n",
+		err_printf(m, "CPU transcoder: %c\n",
 			   transcoder_name(error->transcoder[i].cpu_transcoder));
 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 2c555f9..045d464 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -38,6 +38,32 @@
 
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
+struct dp_link_dpll {
+	int link_bw;
+	struct dpll dpll;
+};
+
+static const struct dp_link_dpll gen4_dpll[] = {
+	{ DP_LINK_BW_1_62,
+		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+	{ DP_LINK_BW_2_7,
+		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+	{ DP_LINK_BW_1_62,
+		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+	{ DP_LINK_BW_2_7,
+		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+	{ DP_LINK_BW_1_62,
+		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+	{ DP_LINK_BW_2_7,
+		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -211,24 +237,77 @@
 	}
 }
 
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+				    struct intel_dp *intel_dp,
+				    struct edp_power_seq *out);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+					      struct intel_dp *intel_dp,
+					      struct edp_power_seq *out);
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	enum pipe pipe;
+
+	/* modeset should have pipe */
+	if (crtc)
+		return to_intel_crtc(crtc)->pipe;
+
+	/* init time, try to find a pipe with this port selected */
+	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+			PANEL_PORT_SELECT_MASK;
+		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
+			return pipe;
+		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+			return pipe;
+	}
+
+	/* shrug */
+	return PIPE_A;
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (HAS_PCH_SPLIT(dev))
+		return PCH_PP_CONTROL;
+	else
+		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (HAS_PCH_SPLIT(dev))
+		return PCH_PP_STATUS;
+	else
+		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_stat_reg;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 }
 
 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_ctrl_reg;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-	return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+	return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
 }
 
 static void
@@ -236,19 +315,15 @@
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 pp_stat_reg, pp_ctrl_reg;
 
 	if (!is_edp(intel_dp))
 		return;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
 	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
 		WARN(1, "eDP powered off while attempting aux channel communication.\n");
 		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
-				I915_READ(pp_stat_reg),
-				I915_READ(pp_ctrl_reg));
+			      I915_READ(_pp_stat_reg(intel_dp)),
+			      I915_READ(_pp_ctrl_reg(intel_dp)));
 	}
 }
 
@@ -361,6 +436,12 @@
 		goto out;
 	}
 
+	/* Only 5 data registers! */
+	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+		ret = -E2BIG;
+		goto out;
+	}
+
 	while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
 		/* Must try at least 3 times according to DP spec */
 		for (try = 0; try < 5; try++) {
@@ -451,9 +532,10 @@
 	int msg_bytes;
 	uint8_t	ack;
 
+	if (WARN_ON(send_bytes > 16))
+		return -E2BIG;
+
 	intel_dp_check_edp(intel_dp);
-	if (send_bytes > 16)
-		return -1;
 	msg[0] = AUX_NATIVE_WRITE << 4;
 	msg[1] = address >> 8;
 	msg[2] = address & 0xff;
@@ -494,6 +576,9 @@
 	uint8_t ack;
 	int ret;
 
+	if (WARN_ON(recv_bytes > 19))
+		return -E2BIG;
+
 	intel_dp_check_edp(intel_dp);
 	msg[0] = AUX_NATIVE_READ << 4;
 	msg[1] = address >> 8;
@@ -538,6 +623,7 @@
 	int reply_bytes;
 	int ret;
 
+	ironlake_edp_panel_vdd_on(intel_dp);
 	intel_dp_check_edp(intel_dp);
 	/* Set up the command byte */
 	if (mode & MODE_I2C_READ)
@@ -569,13 +655,18 @@
 		break;
 	}
 
-	for (retry = 0; retry < 5; retry++) {
+	/*
+	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
+	 * required to retry at least seven times upon receiving AUX_DEFER
+	 * before giving up the AUX transaction.
+	 */
+	for (retry = 0; retry < 7; retry++) {
 		ret = intel_dp_aux_ch(intel_dp,
 				      msg, msg_bytes,
 				      reply, reply_bytes);
 		if (ret < 0) {
 			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
-			return ret;
+			goto out;
 		}
 
 		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
@@ -586,7 +677,8 @@
 			break;
 		case AUX_NATIVE_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_ch native nack\n");
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		case AUX_NATIVE_REPLY_DEFER:
 			/*
 			 * For now, just give more slack to branch devices. We
@@ -604,7 +696,8 @@
 		default:
 			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
 				  reply[0]);
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		}
 
 		switch (reply[0] & AUX_I2C_REPLY_MASK) {
@@ -612,22 +705,29 @@
 			if (mode == MODE_I2C_READ) {
 				*read_byte = reply[1];
 			}
-			return reply_bytes - 1;
+			ret = reply_bytes - 1;
+			goto out;
 		case AUX_I2C_REPLY_NACK:
 			DRM_DEBUG_KMS("aux_i2c nack\n");
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		case AUX_I2C_REPLY_DEFER:
 			DRM_DEBUG_KMS("aux_i2c defer\n");
 			udelay(100);
 			break;
 		default:
 			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
-			return -EREMOTEIO;
+			ret = -EREMOTEIO;
+			goto out;
 		}
 	}
 
 	DRM_ERROR("too many retries, giving up\n");
-	return -EREMOTEIO;
+	ret = -EREMOTEIO;
+
+out:
+	ironlake_edp_panel_vdd_off(intel_dp, false);
+	return ret;
 }
 
 static int
@@ -647,11 +747,9 @@
 	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 	intel_dp->adapter.algo_data = &intel_dp->algo;
-	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+	intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 
-	ironlake_edp_panel_vdd_on(intel_dp);
 	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
-	ironlake_edp_panel_vdd_off(intel_dp, false);
 	return ret;
 }
 
@@ -660,41 +758,30 @@
 		   struct intel_crtc_config *pipe_config, int link_bw)
 {
 	struct drm_device *dev = encoder->base.dev;
+	const struct dp_link_dpll *divisor = NULL;
+	int i, count = 0;
 
 	if (IS_G4X(dev)) {
-		if (link_bw == DP_LINK_BW_1_62) {
-			pipe_config->dpll.p1 = 2;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.n = 2;
-			pipe_config->dpll.m1 = 23;
-			pipe_config->dpll.m2 = 8;
-		} else {
-			pipe_config->dpll.p1 = 1;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.n = 1;
-			pipe_config->dpll.m1 = 14;
-			pipe_config->dpll.m2 = 2;
-		}
-		pipe_config->clock_set = true;
+		divisor = gen4_dpll;
+		count = ARRAY_SIZE(gen4_dpll);
 	} else if (IS_HASWELL(dev)) {
 		/* Haswell has special-purpose DP DDI clocks. */
 	} else if (HAS_PCH_SPLIT(dev)) {
-		if (link_bw == DP_LINK_BW_1_62) {
-			pipe_config->dpll.n = 1;
-			pipe_config->dpll.p1 = 2;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.m1 = 12;
-			pipe_config->dpll.m2 = 9;
-		} else {
-			pipe_config->dpll.n = 2;
-			pipe_config->dpll.p1 = 1;
-			pipe_config->dpll.p2 = 10;
-			pipe_config->dpll.m1 = 14;
-			pipe_config->dpll.m2 = 8;
-		}
-		pipe_config->clock_set = true;
+		divisor = pch_dpll;
+		count = ARRAY_SIZE(pch_dpll);
 	} else if (IS_VALLEYVIEW(dev)) {
-		/* FIXME: Need to figure out optimized DP clocks for vlv. */
+		divisor = vlv_dpll;
+		count = ARRAY_SIZE(vlv_dpll);
+	}
+
+	if (divisor && count) {
+		for (i = 0; i < count; i++) {
+			if (link_bw == divisor[i].link_bw) {
+				pipe_config->dpll = divisor[i].dpll;
+				pipe_config->clock_set = true;
+				break;
+			}
+		}
 	}
 }
 
@@ -737,19 +824,22 @@
 
 	DRM_DEBUG_KMS("DP link computation with max lane count %i "
 		      "max bw %02x pixel clock %iKHz\n",
-		      max_lane_count, bws[max_clock], adjusted_mode->clock);
+		      max_lane_count, bws[max_clock],
+		      adjusted_mode->crtc_clock);
 
 	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
 	 * bpc in between. */
 	bpp = pipe_config->pipe_bpp;
-	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+	    dev_priv->vbt.edp_bpp < bpp) {
 		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
 			      dev_priv->vbt.edp_bpp);
-		bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+		bpp = dev_priv->vbt.edp_bpp;
 	}
 
 	for (; bpp >= 6*3; bpp -= 2*3) {
-		mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+						   bpp);
 
 		for (clock = 0; clock <= max_clock; clock++) {
 			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +884,8 @@
 		      mode_rate, link_avail);
 
 	intel_link_compute_m_n(bpp, lane_count,
-			       adjusted_mode->clock, pipe_config->port_clock,
+			       adjusted_mode->crtc_clock,
+			       pipe_config->port_clock,
 			       &pipe_config->dp_m_n);
 
 	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +893,6 @@
 	return true;
 }
 
-void intel_dp_init_link_config(struct intel_dp *intel_dp)
-{
-	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
-	intel_dp->link_configuration[0] = intel_dp->link_bw;
-	intel_dp->link_configuration[1] = intel_dp->lane_count;
-	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-	/*
-	 * Check for DPCD version > 1.1 and enhanced framing support
-	 */
-	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
-		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-	}
-}
-
 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
 {
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +965,6 @@
 		intel_write_eld(&encoder->base, adjusted_mode);
 	}
 
-	intel_dp_init_link_config(intel_dp);
-
 	/* Split out the IBX/CPU vs CPT settings */
 
 	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +974,7 @@
 			intel_dp->DP |= DP_SYNC_VS_HIGH;
 		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
-		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
 		intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +988,7 @@
 			intel_dp->DP |= DP_SYNC_VS_HIGH;
 		intel_dp->DP |= DP_LINK_TRAIN_OFF;
 
-		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
 			intel_dp->DP |= DP_ENHANCED_FRAMING;
 
 		if (crtc->pipe == 1)
@@ -944,8 +1018,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_stat_reg, pp_ctrl_reg;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
 			mask, value,
@@ -987,11 +1061,8 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 control;
-	u32 pp_ctrl_reg;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-	control = I915_READ(pp_ctrl_reg);
-
+	control = I915_READ(_pp_ctrl_reg(intel_dp));
 	control &= ~PANEL_UNLOCK_MASK;
 	control |= PANEL_UNLOCK_REGS;
 	return control;
@@ -1006,17 +1077,16 @@
 
 	if (!is_edp(intel_dp))
 		return;
-	DRM_DEBUG_KMS("Turn eDP VDD on\n");
 
 	WARN(intel_dp->want_panel_vdd,
 	     "eDP VDD already requested on\n");
 
 	intel_dp->want_panel_vdd = true;
 
-	if (ironlake_edp_have_panel_vdd(intel_dp)) {
-		DRM_DEBUG_KMS("eDP VDD already on\n");
+	if (ironlake_edp_have_panel_vdd(intel_dp))
 		return;
-	}
+
+	DRM_DEBUG_KMS("Turning eDP VDD on\n");
 
 	if (!ironlake_edp_have_panel_power(intel_dp))
 		ironlake_wait_panel_power_cycle(intel_dp);
@@ -1024,8 +1094,8 @@
 	pp = ironlake_get_pp_control(intel_dp);
 	pp |= EDP_FORCE_VDD;
 
-	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_stat_reg = _pp_stat_reg(intel_dp);
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
@@ -1050,11 +1120,13 @@
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
 	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+		DRM_DEBUG_KMS("Turning eDP VDD off\n");
+
 		pp = ironlake_get_pp_control(intel_dp);
 		pp &= ~EDP_FORCE_VDD;
 
-		pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-		pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+		pp_stat_reg = _pp_stat_reg(intel_dp);
 
 		I915_WRITE(pp_ctrl_reg, pp);
 		POSTING_READ(pp_ctrl_reg);
@@ -1082,7 +1154,6 @@
 	if (!is_edp(intel_dp))
 		return;
 
-	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
 	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
 
 	intel_dp->want_panel_vdd = false;
@@ -1119,20 +1190,19 @@
 
 	ironlake_wait_panel_power_cycle(intel_dp);
 
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 	pp = ironlake_get_pp_control(intel_dp);
 	if (IS_GEN5(dev)) {
 		/* ILK workaround: disable reset around power sequence */
 		pp &= ~PANEL_POWER_RESET;
-		I915_WRITE(PCH_PP_CONTROL, pp);
-		POSTING_READ(PCH_PP_CONTROL);
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
 	}
 
 	pp |= POWER_TARGET_ON;
 	if (!IS_GEN5(dev))
 		pp |= PANEL_POWER_RESET;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 
@@ -1140,8 +1210,8 @@
 
 	if (IS_GEN5(dev)) {
 		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
-		I915_WRITE(PCH_PP_CONTROL, pp);
-		POSTING_READ(PCH_PP_CONTROL);
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
 	}
 }
 
@@ -1164,7 +1234,7 @@
 	 * panels get very unhappy and cease to work. */
 	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
@@ -1179,7 +1249,6 @@
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
 	u32 pp;
 	u32 pp_ctrl_reg;
 
@@ -1197,12 +1266,12 @@
 	pp = ironlake_get_pp_control(intel_dp);
 	pp |= EDP_BLC_ENABLE;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
 
-	intel_panel_enable_backlight(dev, pipe);
+	intel_panel_enable_backlight(intel_dp->attached_connector);
 }
 
 void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1215,13 +1284,13 @@
 	if (!is_edp(intel_dp))
 		return;
 
-	intel_panel_disable_backlight(dev);
+	intel_panel_disable_backlight(intel_dp->attached_connector);
 
 	DRM_DEBUG_KMS("\n");
 	pp = ironlake_get_pp_control(intel_dp);
 	pp &= ~EDP_BLC_ENABLE;
 
-	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
 	I915_WRITE(pp_ctrl_reg, pp);
 	POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1437,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	int dotclock;
 
 	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
 		tmp = I915_READ(intel_dp->output_reg);
@@ -1395,28 +1465,61 @@
 
 	pipe_config->adjusted_mode.flags |= flags;
 
-	if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+	pipe_config->has_dp_encoder = true;
+
+	intel_dp_get_m_n(crtc, pipe_config);
+
+	if (port == PORT_A) {
 		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
 			pipe_config->port_clock = 162000;
 		else
 			pipe_config->port_clock = 270000;
 	}
+
+	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+					    &pipe_config->dp_m_n);
+
+	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
+
+	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+		/*
+		 * This is a big fat ugly hack.
+		 *
+		 * Some machines in UEFI boot mode provide us a VBT that has 18
+		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+		 * unknown we fail to light up. Yet the same BIOS boots up with
+		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+		 * max, not what it tells us to use.
+		 *
+		 * Note: This will still be broken if the eDP panel is not lit
+		 * up by the BIOS, and thus we can't get the mode at module
+		 * load.
+		 */
+		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+	}
 }
 
-static bool is_edp_psr(struct intel_dp *intel_dp)
+static bool is_edp_psr(struct drm_device *dev)
 {
-	return is_edp(intel_dp) &&
-		intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return dev_priv->psr.sink_support;
 }
 
 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_HASWELL(dev))
+	if (!HAS_PSR(dev))
 		return false;
 
-	return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
 }
 
 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1466,7 +1569,7 @@
 	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 
 	/* Avoid continuous PSR exit by masking memup and hpd */
-	I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
 		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
 	intel_dp->psr_setup_done = true;
@@ -1491,9 +1594,9 @@
 					    DP_PSR_MAIN_LINK_ACTIVE);
 
 	/* Setup AUX registers */
-	I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
-	I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
-	I915_WRITE(EDP_PSR_AUX_CTL,
+	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
+	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+	I915_WRITE(EDP_PSR_AUX_CTL(dev),
 		   DP_AUX_CH_CTL_TIME_OUT_400us |
 		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1516,7 +1619,7 @@
 	} else
 		val |= EDP_PSR_LINK_DISABLE;
 
-	I915_WRITE(EDP_PSR_CTL, val |
+	I915_WRITE(EDP_PSR_CTL(dev), val |
 		   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
 		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
 		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -1533,42 +1636,33 @@
 	struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
 	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
-	if (!IS_HASWELL(dev)) {
+	dev_priv->psr.source_ok = false;
+
+	if (!HAS_PSR(dev)) {
 		DRM_DEBUG_KMS("PSR not supported on this platform\n");
-		dev_priv->no_psr_reason = PSR_NO_SOURCE;
 		return false;
 	}
 
 	if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
 	    (dig_port->port != PORT_A)) {
 		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
-		dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
-		return false;
-	}
-
-	if (!is_edp_psr(intel_dp)) {
-		DRM_DEBUG_KMS("PSR not supported by this panel\n");
-		dev_priv->no_psr_reason = PSR_NO_SINK;
 		return false;
 	}
 
 	if (!i915_enable_psr) {
 		DRM_DEBUG_KMS("PSR disable by flag\n");
-		dev_priv->no_psr_reason = PSR_MODULE_PARAM;
 		return false;
 	}
 
 	crtc = dig_port->base.base.crtc;
 	if (crtc == NULL) {
 		DRM_DEBUG_KMS("crtc not active for PSR\n");
-		dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
 		return false;
 	}
 
 	intel_crtc = to_intel_crtc(crtc);
-	if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+	if (!intel_crtc_active(crtc)) {
 		DRM_DEBUG_KMS("crtc not active for PSR\n");
-		dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
 		return false;
 	}
 
@@ -1576,29 +1670,26 @@
 	if (obj->tiling_mode != I915_TILING_X ||
 	    obj->fence_reg == I915_FENCE_REG_NONE) {
 		DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
-		dev_priv->no_psr_reason = PSR_NOT_TILED;
 		return false;
 	}
 
 	if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
 		DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
-		dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
 		return false;
 	}
 
 	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
 	    S3D_ENABLE) {
 		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
-		dev_priv->no_psr_reason = PSR_S3D_ENABLED;
 		return false;
 	}
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
 		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
-		dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
 		return false;
 	}
 
+	dev_priv->psr.source_ok = true;
 	return true;
 }
 
@@ -1637,10 +1728,11 @@
 	if (!intel_edp_is_psr_enabled(dev))
 		return;
 
-	I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+	I915_WRITE(EDP_PSR_CTL(dev),
+		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
 	/* Wait till PSR is idle */
-	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
 		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
 		DRM_ERROR("Timed out waiting for PSR Idle State\n");
 }
@@ -1654,7 +1746,7 @@
 		if (encoder->type == INTEL_OUTPUT_EDP) {
 			intel_dp = enc_to_intel_dp(&encoder->base);
 
-			if (!is_edp_psr(intel_dp))
+			if (!is_edp_psr(dev))
 				return;
 
 			if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1713,14 +1805,24 @@
 	ironlake_edp_panel_vdd_off(intel_dp, true);
 	intel_dp_complete_link_train(intel_dp);
 	intel_dp_stop_link_train(intel_dp);
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	intel_enable_dp(encoder);
 	ironlake_edp_backlight_on(intel_dp);
 }
 
 static void vlv_enable_dp(struct intel_encoder *encoder)
 {
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	ironlake_edp_backlight_on(intel_dp);
 }
 
-static void intel_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 {
 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1738,53 +1840,59 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
 	int pipe = intel_crtc->pipe;
+	struct edp_power_seq power_seq;
 	u32 val;
 
 	mutex_lock(&dev_priv->dpio_lock);
 
-	val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
 	val = 0;
 	if (pipe)
 		val |= (1<<21);
 	else
 		val &= ~(1<<21);
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
 
 	mutex_unlock(&dev_priv->dpio_lock);
 
+	/* init power sequencer on this pipe and port */
+	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+						      &power_seq);
+
 	intel_enable_dp(encoder);
 
 	vlv_wait_port_ready(dev_priv, port);
 }
 
-static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
-
-	if (!IS_VALLEYVIEW(dev))
-		return;
+	int pipe = intel_crtc->pipe;
 
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 				 DPIO_PCS_CLK_SOFT_RESET);
 
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1919,10 +2027,13 @@
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dport->base.base.crtc);
 	unsigned long demph_reg_value, preemph_reg_value,
 		uniqtranscale_reg_value;
 	uint8_t train_set = intel_dp->train_set[0];
 	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
 
 	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
 	case DP_TRAIN_PRE_EMPHASIS_0:
@@ -1998,21 +2109,22 @@
 	}
 
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
 			 uniqtranscale_reg_value);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
 	mutex_unlock(&dev_priv->dpio_lock);
 
 	return 0;
 }
 
 static void
-intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_get_adjust_train(struct intel_dp *intel_dp,
+		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
 	uint8_t v = 0;
 	uint8_t p = 0;
@@ -2207,14 +2319,15 @@
 
 static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
-			uint32_t dp_reg_value,
+			uint32_t *DP,
 			uint8_t dp_train_pat)
 {
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	struct drm_device *dev = intel_dig_port->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = intel_dig_port->port;
-	int ret;
+	uint8_t buf[sizeof(intel_dp->train_set) + 1];
+	int ret, len;
 
 	if (HAS_DDI(dev)) {
 		uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2243,62 +2356,93 @@
 		I915_WRITE(DP_TP_CTL(port), temp);
 
 	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
-		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
 		case DP_TRAINING_PATTERN_DISABLE:
-			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+			*DP |= DP_LINK_TRAIN_OFF_CPT;
 			break;
 		case DP_TRAINING_PATTERN_1:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
 			break;
 		case DP_TRAINING_PATTERN_2:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		case DP_TRAINING_PATTERN_3:
 			DRM_ERROR("DP training pattern 3 not supported\n");
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
 			break;
 		}
 
 	} else {
-		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+		*DP &= ~DP_LINK_TRAIN_MASK;
 
 		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
 		case DP_TRAINING_PATTERN_DISABLE:
-			dp_reg_value |= DP_LINK_TRAIN_OFF;
+			*DP |= DP_LINK_TRAIN_OFF;
 			break;
 		case DP_TRAINING_PATTERN_1:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+			*DP |= DP_LINK_TRAIN_PAT_1;
 			break;
 		case DP_TRAINING_PATTERN_2:
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		case DP_TRAINING_PATTERN_3:
 			DRM_ERROR("DP training pattern 3 not supported\n");
-			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			*DP |= DP_LINK_TRAIN_PAT_2;
 			break;
 		}
 	}
 
-	I915_WRITE(intel_dp->output_reg, dp_reg_value);
+	I915_WRITE(intel_dp->output_reg, *DP);
 	POSTING_READ(intel_dp->output_reg);
 
-	intel_dp_aux_native_write_1(intel_dp,
-				    DP_TRAINING_PATTERN_SET,
-				    dp_train_pat);
-
-	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+	buf[0] = dp_train_pat;
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
 	    DP_TRAINING_PATTERN_DISABLE) {
-		ret = intel_dp_aux_native_write(intel_dp,
-						DP_TRAINING_LANE0_SET,
-						intel_dp->train_set,
-						intel_dp->lane_count);
-		if (ret != intel_dp->lane_count)
-			return false;
+		/* don't write DP_TRAINING_LANEx_SET on disable */
+		len = 1;
+	} else {
+		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+		len = intel_dp->lane_count + 1;
 	}
 
-	return true;
+	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
+					buf, len);
+
+	return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+			uint8_t dp_train_pat)
+{
+	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+	intel_dp_set_signal_levels(intel_dp, DP);
+	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	intel_get_adjust_train(intel_dp, link_status);
+	intel_dp_set_signal_levels(intel_dp, DP);
+
+	I915_WRITE(intel_dp->output_reg, *DP);
+	POSTING_READ(intel_dp->output_reg);
+
+	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
+					intel_dp->train_set,
+					intel_dp->lane_count);
+
+	return ret == intel_dp->lane_count;
 }
 
 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2342,32 +2486,37 @@
 	uint8_t voltage;
 	int voltage_tries, loop_tries;
 	uint32_t DP = intel_dp->DP;
+	uint8_t link_config[2];
 
 	if (HAS_DDI(dev))
 		intel_ddi_prepare_link_retrain(encoder);
 
 	/* Write the link configuration data */
-	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
-				  intel_dp->link_configuration,
-				  DP_LINK_CONFIGURATION_SIZE);
+	link_config[0] = intel_dp->link_bw;
+	link_config[1] = intel_dp->lane_count;
+	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
+
+	link_config[0] = 0;
+	link_config[1] = DP_SET_ANSI_8B10B;
+	intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
 
 	DP |= DP_PORT_EN;
 
-	memset(intel_dp->train_set, 0, 4);
+	/* clock recovery */
+	if (!intel_dp_reset_link_train(intel_dp, &DP,
+				       DP_TRAINING_PATTERN_1 |
+				       DP_LINK_SCRAMBLING_DISABLE)) {
+		DRM_ERROR("failed to enable link training\n");
+		return;
+	}
+
 	voltage = 0xff;
 	voltage_tries = 0;
 	loop_tries = 0;
 	for (;;) {
-		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
-		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
-
-		intel_dp_set_signal_levels(intel_dp, &DP);
-
-		/* Set training pattern 1 */
-		if (!intel_dp_set_link_train(intel_dp, DP,
-					     DP_TRAINING_PATTERN_1 |
-					     DP_LINK_SCRAMBLING_DISABLE))
-			break;
+		uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
 		if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2387,10 +2536,12 @@
 		if (i == intel_dp->lane_count) {
 			++loop_tries;
 			if (loop_tries == 5) {
-				DRM_DEBUG_KMS("too many full retries, give up\n");
+				DRM_ERROR("too many full retries, give up\n");
 				break;
 			}
-			memset(intel_dp->train_set, 0, 4);
+			intel_dp_reset_link_train(intel_dp, &DP,
+						  DP_TRAINING_PATTERN_1 |
+						  DP_LINK_SCRAMBLING_DISABLE);
 			voltage_tries = 0;
 			continue;
 		}
@@ -2399,15 +2550,18 @@
 		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
 			++voltage_tries;
 			if (voltage_tries == 5) {
-				DRM_DEBUG_KMS("too many voltage retries, give up\n");
+				DRM_ERROR("too many voltage retries, give up\n");
 				break;
 			}
 		} else
 			voltage_tries = 0;
 		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
-		/* Compute new intel_dp->train_set as requested by target */
-		intel_get_adjust_train(intel_dp, link_status);
+		/* Update training set as requested by target */
+		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+			DRM_ERROR("failed to update link training\n");
+			break;
+		}
 	}
 
 	intel_dp->DP = DP;
@@ -2421,11 +2575,18 @@
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
+	if (!intel_dp_set_link_train(intel_dp, &DP,
+				     DP_TRAINING_PATTERN_2 |
+				     DP_LINK_SCRAMBLING_DISABLE)) {
+		DRM_ERROR("failed to start channel equalization\n");
+		return;
+	}
+
 	tries = 0;
 	cr_tries = 0;
 	channel_eq = false;
 	for (;;) {
-		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
+		uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 		if (cr_tries > 5) {
 			DRM_ERROR("failed to train DP, aborting\n");
@@ -2433,21 +2594,18 @@
 			break;
 		}
 
-		intel_dp_set_signal_levels(intel_dp, &DP);
-
-		/* channel eq pattern */
-		if (!intel_dp_set_link_train(intel_dp, DP,
-					     DP_TRAINING_PATTERN_2 |
-					     DP_LINK_SCRAMBLING_DISABLE))
-			break;
-
 		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
-		if (!intel_dp_get_link_status(intel_dp, link_status))
+		if (!intel_dp_get_link_status(intel_dp, link_status)) {
+			DRM_ERROR("failed to get link status\n");
 			break;
+		}
 
 		/* Make sure clock is still ok */
 		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
 			intel_dp_start_link_train(intel_dp);
+			intel_dp_set_link_train(intel_dp, &DP,
+						DP_TRAINING_PATTERN_2 |
+						DP_LINK_SCRAMBLING_DISABLE);
 			cr_tries++;
 			continue;
 		}
@@ -2461,13 +2619,19 @@
 		if (tries > 5) {
 			intel_dp_link_down(intel_dp);
 			intel_dp_start_link_train(intel_dp);
+			intel_dp_set_link_train(intel_dp, &DP,
+						DP_TRAINING_PATTERN_2 |
+						DP_LINK_SCRAMBLING_DISABLE);
 			tries = 0;
 			cr_tries++;
 			continue;
 		}
 
-		/* Compute new intel_dp->train_set as requested by target */
-		intel_get_adjust_train(intel_dp, link_status);
+		/* Update training set as requested by target */
+		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+			DRM_ERROR("failed to update link training\n");
+			break;
+		}
 		++tries;
 	}
 
@@ -2482,7 +2646,7 @@
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
 {
-	intel_dp_set_link_train(intel_dp, intel_dp->DP,
+	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
 				DP_TRAINING_PATTERN_DISABLE);
 }
 
@@ -2569,6 +2733,10 @@
 static bool
 intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
 	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2584,11 +2752,16 @@
 
 	/* Check if the panel supports PSR */
 	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
-	intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
-				       intel_dp->psr_dpcd,
-				       sizeof(intel_dp->psr_dpcd));
-	if (is_edp_psr(intel_dp))
-		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+	if (is_edp(intel_dp)) {
+		intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+					       intel_dp->psr_dpcd,
+					       sizeof(intel_dp->psr_dpcd));
+		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+			dev_priv->psr.sink_support = true;
+			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+		}
+	}
+
 	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
 	      DP_DWN_STRM_PORT_PRESENT))
 		return true; /* native DP sink */
@@ -2708,7 +2881,6 @@
 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 {
 	uint8_t *dpcd = intel_dp->dpcd;
-	bool hpd;
 	uint8_t type;
 
 	if (!intel_dp_get_dpcd(intel_dp))
@@ -2719,8 +2891,8 @@
 		return connector_status_connected;
 
 	/* If we're HPD-aware, SINK_COUNT changes dynamically */
-	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
-	if (hpd) {
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
 		uint8_t reg;
 		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
 						    &reg, 1))
@@ -2734,9 +2906,18 @@
 		return connector_status_connected;
 
 	/* Well we tried, say unknown for unreliable port types */
-	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
-	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
-		return connector_status_unknown;
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+		if (type == DP_DS_PORT_TYPE_VGA ||
+		    type == DP_DS_PORT_TYPE_NON_EDID)
+			return connector_status_unknown;
+	} else {
+		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			DP_DWN_STRM_PORT_TYPE_MASK;
+		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
+			return connector_status_unknown;
+	}
 
 	/* Anything else is out of spec, warn and ignore */
 	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2810,19 +2991,11 @@
 
 	/* use cached edid if we have one */
 	if (intel_connector->edid) {
-		struct edid *edid;
-		int size;
-
 		/* invalid edid */
 		if (IS_ERR(intel_connector->edid))
 			return NULL;
 
-		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
-		edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
-		if (!edid)
-			return NULL;
-
-		return edid;
+		return drm_edid_duplicate(intel_connector->edid);
 	}
 
 	return drm_get_edid(connector, adapter);
@@ -3030,7 +3203,6 @@
 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
 		intel_panel_fini(&intel_connector->panel);
 
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -3101,7 +3273,7 @@
 bool intel_dpd_is_edp(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct child_device_config *p_child;
+	union child_device_config *p_child;
 	int i;
 
 	if (!dev_priv->vbt.child_dev_num)
@@ -3110,8 +3282,9 @@
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
 		p_child = dev_priv->vbt.child_dev + i;
 
-		if (p_child->dvo_port == PORT_IDPD &&
-		    p_child->device_type == DEVICE_TYPE_eDP)
+		if (p_child->common.dvo_port == PORT_IDPD &&
+		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
 			return true;
 	}
 	return false;
@@ -3144,24 +3317,26 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct edp_power_seq cur, vbt, spec, final;
 	u32 pp_on, pp_off, pp_div, pp;
-	int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
 	if (HAS_PCH_SPLIT(dev)) {
-		pp_control_reg = PCH_PP_CONTROL;
+		pp_ctrl_reg = PCH_PP_CONTROL;
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
 	} else {
-		pp_control_reg = PIPEA_PP_CONTROL;
-		pp_on_reg = PIPEA_PP_ON_DELAYS;
-		pp_off_reg = PIPEA_PP_OFF_DELAYS;
-		pp_div_reg = PIPEA_PP_DIVISOR;
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
 	}
 
 	/* Workaround: Need to write PP_CONTROL with the unlock key as
 	 * the very first thing. */
 	pp = ironlake_get_pp_control(intel_dp);
-	I915_WRITE(pp_control_reg, pp);
+	I915_WRITE(pp_ctrl_reg, pp);
 
 	pp_on = I915_READ(pp_on_reg);
 	pp_off = I915_READ(pp_off_reg);
@@ -3249,9 +3424,11 @@
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
 	} else {
-		pp_on_reg = PIPEA_PP_ON_DELAYS;
-		pp_off_reg = PIPEA_PP_OFF_DELAYS;
-		pp_div_reg = PIPEA_PP_DIVISOR;
+		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
 	}
 
 	/* And finally store the new values in the power sequencer. */
@@ -3268,12 +3445,15 @@
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
 	if (IS_VALLEYVIEW(dev)) {
-		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+		if (dp_to_dig_port(intel_dp)->port == PORT_B)
+			port_sel = PANEL_PORT_SELECT_DPB_VLV;
+		else
+			port_sel = PANEL_PORT_SELECT_DPC_VLV;
 	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
 		if (dp_to_dig_port(intel_dp)->port == PORT_A)
-			port_sel = PANEL_POWER_PORT_DP_A;
+			port_sel = PANEL_PORT_SELECT_DPA;
 		else
-			port_sel = PANEL_POWER_PORT_DP_D;
+			port_sel = PANEL_PORT_SELECT_DPD;
 	}
 
 	pp_on |= port_sel;
@@ -3326,7 +3506,6 @@
 	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
 						      &power_seq);
 
-	ironlake_edp_panel_vdd_on(intel_dp);
 	edid = drm_get_edid(connector, &intel_dp->adapter);
 	if (edid) {
 		if (drm_add_edid_modes(connector, edid)) {
@@ -3358,8 +3537,6 @@
 			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 	}
 
-	ironlake_edp_panel_vdd_off(intel_dp, false);
-
 	intel_panel_init(&intel_connector->panel, fixed_mode);
 	intel_panel_setup_backlight(connector);
 
@@ -3516,11 +3693,11 @@
 	struct drm_encoder *encoder;
 	struct intel_connector *intel_connector;
 
-	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
 	if (!intel_dig_port)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dig_port);
 		return;
@@ -3539,12 +3716,12 @@
 	intel_encoder->get_hw_state = intel_dp_get_hw_state;
 	intel_encoder->get_config = intel_dp_get_config;
 	if (IS_VALLEYVIEW(dev)) {
-		intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
 		intel_encoder->pre_enable = vlv_pre_enable_dp;
 		intel_encoder->enable = vlv_enable_dp;
 	} else {
-		intel_encoder->pre_enable = intel_pre_enable_dp;
-		intel_encoder->enable = intel_enable_dp;
+		intel_encoder->pre_enable = g4x_pre_enable_dp;
+		intel_encoder->enable = g4x_enable_dp;
 	}
 
 	intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9b7b68f..1e49aa8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
 /* the i915, i945 have a single sDVO i2c bus - which is different */
 #define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
-#define INTELFB_CONN_LIMIT 4
 
 #define INTEL_I2C_BUS_DVO 1
 #define INTEL_I2C_BUS_SDVO 2
@@ -93,13 +92,17 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_UNKNOWN 9
+#define INTEL_OUTPUT_DSI 9
+#define INTEL_OUTPUT_UNKNOWN 10
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TVOUT 4
 
+#define INTEL_DSI_COMMAND_MODE	0
+#define INTEL_DSI_VIDEO_MODE	1
+
 struct intel_framebuffer {
 	struct drm_framebuffer base;
 	struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
 	unsigned long quirks;
 
+	/* User requested mode, only valid as a starting point to
+	 * compute adjusted_mode, except in the case of (S)DVO where
+	 * it's also for the output timings of the (S)DVO chip.
+	 * adjusted_mode will then correspond to the S(DVO) chip's
+	 * preferred input timings. */
 	struct drm_display_mode requested_mode;
+	/* Actual pipe timings ie. what we program into the pipe timing
+	 * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
 	struct drm_display_mode adjusted_mode;
+
+	/* Pipe source size (ie. panel fitter input size)
+	 * All planes will be positioned inside this space,
+	 * and get clipped at the edges. */
+	int pipe_src_w, pipe_src_h;
+
 	/* Whether to set up the PCH/FDI. Note that we never allow sharing
 	 * between pch encoders and cpu encoders. */
 	bool has_pch_encoder;
@@ -262,7 +278,8 @@
 
 	/*
 	 * Frequence the dpll for the port should run at. Differs from the
-	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
+	 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+	 * already multiplied by pixel_multiplier.
 	 */
 	int port_clock;
 
@@ -288,6 +305,14 @@
 	struct intel_link_m_n fdi_m_n;
 
 	bool ips_enabled;
+
+	bool double_wide;
+};
+
+struct intel_pipe_wm {
+	struct intel_wm_level wm[5];
+	uint32_t linetime;
+	bool fbc_wm_enabled;
 };
 
 struct intel_crtc {
@@ -301,8 +326,9 @@
 	 * some outputs connected to this crtc.
 	 */
 	bool active;
+	unsigned long enabled_power_domains;
 	bool eld_vld;
-	bool primary_disabled; /* is the crtc obscured by a plane? */
+	bool primary_enabled; /* is the primary plane (partially) visible? */
 	bool lowfreq_avail;
 	struct intel_overlay *overlay;
 	struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@
 	/* Access to these should be protected by dev_priv->irq_lock. */
 	bool cpu_fifo_underrun_disabled;
 	bool pch_fifo_underrun_disabled;
+
+	/* per-pipe watermark state */
+	struct {
+		/* watermarks currently being used  */
+		struct intel_pipe_wm active;
+	} wm;
 };
 
 struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@
 };
 
 #define DP_MAX_DOWNSTREAM_PORTS		0x10
-#define DP_LINK_CONFIGURATION_SIZE	9
 
 struct intel_dp {
 	uint32_t output_reg;
 	uint32_t aux_ch_ctl_reg;
 	uint32_t DP;
-	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
 	bool has_audio;
 	enum hdmi_force_audio force_audio;
 	uint32_t color_range;
@@ -495,80 +525,6 @@
 	bool enable_stall_check;
 };
 
-int intel_pch_rawclk(struct drm_device *dev);
-
-int intel_connector_update_modes(struct drm_connector *connector,
-				struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-
-extern void intel_attach_force_audio_property(struct drm_connector *connector);
-extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-
-extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev,
-			    int hdmi_reg, enum port port);
-extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
-				      struct intel_connector *intel_connector);
-extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-				      struct intel_crtc_config *pipe_config);
-extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
-			    bool is_sdvob);
-extern void intel_dvo_init(struct drm_device *dev);
-extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-			       struct intel_ring_buffer *ring);
-extern void intel_mark_idle(struct drm_device *dev);
-extern void intel_lvds_init(struct drm_device *dev);
-extern bool intel_is_dual_link_lvds(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int output_reg,
-			  enum port port);
-extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
-				    struct intel_connector *intel_connector);
-extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
-extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_compute_config(struct intel_encoder *encoder,
-				    struct intel_crtc_config *pipe_config);
-extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
-				      enum plane plane);
-
-/* intel_panel.c */
-extern int intel_panel_init(struct intel_panel *panel,
-			    struct drm_display_mode *fixed_mode);
-extern void intel_panel_fini(struct intel_panel *panel);
-
-extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
-				   struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
-				    struct intel_crtc_config *pipe_config,
-				    int fitting_mode);
-extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
-				     struct intel_crtc_config *pipe_config,
-				     int fitting_mode);
-extern void intel_panel_set_backlight(struct drm_device *dev,
-				      u32 level, u32 max);
-extern int intel_panel_setup_backlight(struct drm_connector *connector);
-extern void intel_panel_enable_backlight(struct drm_device *dev,
-					 enum pipe pipe);
-extern void intel_panel_disable_backlight(struct drm_device *dev);
-extern void intel_panel_destroy_backlight(struct drm_device *dev);
-extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
 struct intel_set_config {
 	struct drm_encoder **save_connector_encoders;
 	struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@
 	bool mode_changed;
 };
 
-extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
-extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_connector_dpms(struct drm_connector *, int mode);
-extern bool intel_connector_get_hw_state(struct intel_connector *connector);
-extern void intel_modeset_check_state(struct drm_device *dev);
-extern void intel_plane_restore(struct drm_plane *plane);
-extern void intel_plane_disable(struct drm_plane *plane);
+struct intel_load_detect_pipe {
+	struct drm_framebuffer *release_fb;
+	bool load_detect_temp;
+	int dpms_mode;
+};
 
-
-static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
 {
 	return to_intel_connector(connector)->encoder;
 }
@@ -616,73 +568,95 @@
 	return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
+
+/* i915_irq.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+					   enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+					   enum transcoder pch_transcoder,
+					   bool enable);
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void hsw_pc8_disable_interrupts(struct drm_device *dev);
+void hsw_pc8_restore_interrupts(struct drm_device *dev);
+
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+				       enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+			  struct intel_crtc_config *pipe_config);
+
+
+/* intel_display.c */
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+			struct intel_ring_buffer *ring);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_update_dpms(struct drm_crtc *crtc);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+void intel_connector_dpms(struct drm_connector *, int mode);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
 				struct intel_digital_port *port);
-
-extern void intel_connector_attach_encoder(struct intel_connector *connector,
-					   struct intel_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
-
-extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-						    struct drm_crtc *crtc);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc);
+enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
-extern enum transcoder
-intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
-			     enum pipe pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
-extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
-
-struct intel_load_detect_pipe {
-	struct drm_framebuffer *release_fb;
-	bool load_detect_temp;
-	int dpms_mode;
-};
-extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
-				       struct drm_display_mode *mode,
-				       struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct drm_connector *connector,
-					   struct intel_load_detect_pipe *old);
-
-extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-				    u16 blue, int regno);
-extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-				    u16 *blue, int regno);
-
-extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-				      struct drm_i915_gem_object *obj,
-				      struct intel_ring_buffer *pipelined);
-extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-
-extern int intel_framebuffer_init(struct drm_device *dev,
-				  struct intel_framebuffer *ifb,
-				  struct drm_mode_fb_cmd2 *mode_cmd,
-				  struct drm_i915_gem_object *obj);
-extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config(struct drm_device *dev);
-extern void intel_fbdev_fini(struct drm_device *dev);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
-extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-
-extern void intel_setup_overlay(struct drm_device *dev);
-extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_put_image(struct drm_device *dev, void *data,
-				   struct drm_file *file_priv);
-extern int intel_overlay_attrs(struct drm_device *dev, void *data,
-			       struct drm_file *file_priv);
-
-extern void intel_fb_output_poll_changed(struct drm_device *dev);
-extern void intel_fb_restore_mode(struct drm_device *dev);
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+					     enum pipe pipe);
+void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+				struct drm_display_mode *mode,
+				struct intel_load_detect_pipe *old);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+				    struct intel_load_detect_pipe *old);
+int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+			       struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *pipelined);
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+int intel_framebuffer_init(struct drm_device *dev,
+			   struct intel_framebuffer *ifb,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_i915_gem_object *obj);
+void intel_framebuffer_fini(struct intel_framebuffer *fb);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
 			struct intel_shared_dpll *pll,
 			bool state);
@@ -696,102 +670,199 @@
 		       enum pipe pipe, bool state);
 #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
 #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-			bool state);
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void intel_write_eld(struct drm_encoder *encoder,
+		     struct drm_display_mode *mode);
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+					     unsigned int tiling_mode,
+					     unsigned int bpp,
+					     unsigned int pitch);
+void intel_display_handle_reset(struct drm_device *dev);
+void hsw_enable_pc8_work(struct work_struct *__work);
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+		      struct intel_crtc_config *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+				int dotclock);
+bool intel_crtc_active(struct drm_crtc *crtc);
+void i915_disable_vga_mem(struct drm_device *dev);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+void intel_display_set_init_power(struct drm_device *dev, bool enable);
 
-extern void intel_init_clock_gating(struct drm_device *dev);
-extern void intel_suspend_hw(struct drm_device *dev);
-extern void intel_write_eld(struct drm_encoder *encoder,
-			    struct drm_display_mode *mode);
-extern void intel_prepare_ddi(struct drm_device *dev);
-extern void hsw_fdi_link_train(struct drm_crtc *crtc);
-extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
-/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_plane *plane,
-					   struct drm_crtc *crtc,
-					   uint32_t sprite_width, int pixel_size,
-					   bool enabled, bool scaled);
+/* intel_dp.c */
+void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+			     struct intel_connector *intel_connector);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_check_link_status(struct intel_dp *intel_dp);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+			     struct intel_crtc_config *pipe_config);
+bool intel_dpd_is_edp(struct drm_device *dev);
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_psr_enable(struct intel_dp *intel_dp);
+void intel_edp_psr_disable(struct intel_dp *intel_dp);
+void intel_edp_psr_update(struct drm_device *dev);
 
-extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
-						    unsigned int tiling_mode,
-						    unsigned int bpp,
-						    unsigned int pitch);
 
-extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
-extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
-				     struct drm_file *file_priv);
+/* intel_dsi.c */
+bool intel_dsi_init(struct drm_device *dev);
 
-/* Power-related functions, located in intel_pm.c */
-extern void intel_init_pm(struct drm_device *dev);
-/* FBC */
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_update_fbc(struct drm_device *dev);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
 
-/* Power well */
-extern int i915_init_power_well(struct drm_device *dev);
-extern void i915_remove_power_well(struct drm_device *dev);
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
 
-extern bool intel_display_power_enabled(struct drm_device *dev,
-					enum intel_display_power_domain domain);
-extern void intel_init_power_well(struct drm_device *dev);
-extern void intel_set_power_well(struct drm_device *dev, bool enable);
-extern void intel_enable_gt_powersave(struct drm_device *dev);
-extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void ironlake_teardown_rc6(struct drm_device *dev);
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_I915_FBDEV
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void intel_fbdev_initial_config(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+			       struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+			       struct intel_crtc_config *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+				 struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+		     struct drm_display_mode *fixed_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+			    struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+			     struct intel_crtc_config *pipe_config,
+			     int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+			      struct intel_crtc_config *pipe_config,
+			      int fitting_mode);
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+			       u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector);
+void intel_panel_enable_backlight(struct intel_connector *connector);
+void intel_panel_disable_backlight(struct intel_connector *connector);
+void intel_panel_destroy_backlight(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+				    struct drm_crtc *crtc,
+				    uint32_t sprite_width, int pixel_size,
+				    bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_update_fbc(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+int intel_power_domains_init(struct drm_device *dev);
+void intel_power_domains_remove(struct drm_device *dev);
+bool intel_display_power_enabled(struct drm_device *dev,
+				 enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_device *dev,
+			     enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_device *dev,
+			     enum intel_display_power_domain domain);
+void intel_power_domains_init_hw(struct drm_device *dev);
+void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_device *dev);
 
-extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
-				   enum pipe *pipe);
-extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
-extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
-extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-					      enum transcoder cpu_transcoder);
-extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
-extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
-extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
-extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
-extern bool
-intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
-extern void intel_display_handle_reset(struct drm_device *dev);
-extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-						  enum pipe pipe,
-						  bool enable);
-extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
-						 enum transcoder pch_transcoder,
-						 bool enable);
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 
-extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_update(struct drm_device *dev);
-extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-			      bool switch_to_fclk, bool allow_power_down);
-extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
-extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
-			       uint32_t mask);
-extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
-			       uint32_t mask);
-extern void hsw_enable_pc8_work(struct work_struct *__work);
-extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
-extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
-extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+			       enum plane plane);
+void intel_plane_restore(struct drm_plane *plane);
+void intel_plane_disable(struct drm_plane *plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+
+
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 0000000..d257b09
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/* the sub-encoders aka panel drivers */
+static const struct intel_dsi_device intel_dsi_devices[] = {
+};
+
+
+static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
+			   u32 mask)
+{
+	u32 tmp = vlv_cck_read(dev_priv, reg);
+	tmp &= ~mask;
+	tmp |= val;
+	vlv_cck_write(dev_priv, reg, tmp);
+}
+
+static void band_gap_wa(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->dpio_lock);
+
+	/* Enable bandgap fix in GOP driver */
+	vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
+	msleep(20);
+
+	/* Turn Display Trunk on */
+	vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
+	msleep(20);
+
+	vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
+	msleep(20);
+
+	vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
+	msleep(20);
+	vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	/* Need huge delay, otherwise clock is not stable */
+	msleep(100);
+}
+
+static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_dsi, base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+	return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+}
+
+static void intel_dsi_hot_plug(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_config *config)
+{
+	struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+						   base);
+	struct intel_connector *intel_connector = intel_dsi->attached_connector;
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
+	struct drm_display_mode *mode = &config->requested_mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (fixed_mode)
+		intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+	if (intel_dsi->dev.dev_ops->mode_fixup)
+		return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
+							  mode, adjusted_mode);
+
+	return true;
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+
+	vlv_enable_dsi_pll(encoder);
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int pipe = intel_crtc->pipe;
+	u32 temp;
+
+	DRM_DEBUG_KMS("\n");
+
+	temp = I915_READ(MIPI_DEVICE_READY(pipe));
+	if ((temp & DEVICE_READY) == 0) {
+		temp &= ~ULPS_STATE_MASK;
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
+	} else if (temp & ULPS_STATE_MASK) {
+		temp &= ~ULPS_STATE_MASK;
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
+		/*
+		 * We need to ensure that there is a minimum of 1 ms time
+		 * available before clearing the UPLS exit state.
+		 */
+		msleep(2);
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+	}
+
+	if (is_cmd_mode(intel_dsi))
+		I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+
+	if (is_vid_mode(intel_dsi)) {
+		msleep(20); /* XXX */
+		dpi_send_cmd(intel_dsi, TURN_ON);
+		msleep(100);
+
+		/* assert ip_tg_enable signal */
+		temp = I915_READ(MIPI_PORT_CTRL(pipe));
+		I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
+		POSTING_READ(MIPI_PORT_CTRL(pipe));
+	}
+
+	intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int pipe = intel_crtc->pipe;
+	u32 temp;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+	if (is_vid_mode(intel_dsi)) {
+		dpi_send_cmd(intel_dsi, SHUTDOWN);
+		msleep(10);
+
+		/* de-assert ip_tg_enable signal */
+		temp = I915_READ(MIPI_PORT_CTRL(pipe));
+		I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
+		POSTING_READ(MIPI_PORT_CTRL(pipe));
+
+		msleep(2);
+	}
+
+	temp = I915_READ(MIPI_DEVICE_READY(pipe));
+	if (temp & DEVICE_READY) {
+		temp &= ~DEVICE_READY;
+		temp &= ~ULPS_STATE_MASK;
+		I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+	}
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+	DRM_DEBUG_KMS("\n");
+
+	vlv_disable_dsi_pll(encoder);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 port, func;
+	enum pipe p;
+
+	DRM_DEBUG_KMS("\n");
+
+	/* XXX: this only works for one DSI output */
+	for (p = PIPE_A; p <= PIPE_B; p++) {
+		port = I915_READ(MIPI_PORT_CTRL(p));
+		func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+
+		if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+			if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
+				*pipe = p;
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+				 struct intel_crtc_config *pipe_config)
+{
+	DRM_DEBUG_KMS("\n");
+
+	/* XXX: read flags, set to adjusted_mode */
+}
+
+static int intel_dsi_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+
+	DRM_DEBUG_KMS("\n");
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+		DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+		return MODE_NO_DBLESCAN;
+	}
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+	switch (divider) {
+	case ESCAPE_CLOCK_DIVIDER_1:
+	default:
+		return 20 * us;
+	case ESCAPE_CLOCK_DIVIDER_2:
+		return 10 * us;
+	case ESCAPE_CLOCK_DIVIDER_4:
+		return 5 * us;
+	}
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+{
+	return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+			    const struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	int pipe = intel_crtc->pipe;
+	unsigned int bpp = intel_crtc->config.pipe_bpp;
+	unsigned int lane_count = intel_dsi->lane_count;
+
+	u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+	hactive = mode->hdisplay;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsync = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsync = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	/* horizontal values are in terms of high speed byte clock */
+	hactive = txbyteclkhs(hactive, bpp, lane_count);
+	hfp = txbyteclkhs(hfp, bpp, lane_count);
+	hsync = txbyteclkhs(hsync, bpp, lane_count);
+	hbp = txbyteclkhs(hbp, bpp, lane_count);
+
+	I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
+	I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+
+	/* meaningful for video mode non-burst sync pulse mode only, can be zero
+	 * for non-burst sync events and burst modes */
+	I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
+	I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+
+	/* vertical values are in terms of lines */
+	I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
+	I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
+	I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+}
+
+static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	int pipe = intel_crtc->pipe;
+	unsigned int bpp = intel_crtc->config.pipe_bpp;
+	u32 val, tmp;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+	/* Update the DSI PLL */
+	vlv_enable_dsi_pll(intel_encoder);
+
+	/* XXX: Location of the call */
+	band_gap_wa(dev_priv);
+
+	/* escape clock divider, 20MHz, shared for A and C. device ready must be
+	 * off when doing this! txclkesc? */
+	tmp = I915_READ(MIPI_CTRL(0));
+	tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+	I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+	/* read request priority is per pipe */
+	tmp = I915_READ(MIPI_CTRL(pipe));
+	tmp &= ~READ_REQUEST_PRIORITY_MASK;
+	I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+
+	/* XXX: why here, why like this? handling in irq handler?! */
+	I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
+	I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+
+	I915_WRITE(MIPI_DPHY_PARAM(pipe),
+		   0x3c << EXIT_ZERO_COUNT_SHIFT |
+		   0x1f << TRAIL_COUNT_SHIFT |
+		   0xc5 << CLK_ZERO_COUNT_SHIFT |
+		   0x1f << PREPARE_COUNT_SHIFT);
+
+	I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
+		   adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+		   adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+
+	set_dsi_timings(encoder, adjusted_mode);
+
+	val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+	if (is_cmd_mode(intel_dsi)) {
+		val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+		val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+	} else {
+		val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+		/* XXX: cross-check bpp vs. pixel format? */
+		val |= intel_dsi->pixel_format;
+	}
+	I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
+
+	/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
+	 * stop state. */
+
+	/*
+	 * In burst mode, value greater than one DPI line Time in byte clock
+	 * (txbyteclkhs) To timeout this timer 1+ of the above said value is
+	 * recommended.
+	 *
+	 * In non-burst mode, Value greater than one DPI frame time in byte
+	 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+	 * is recommended.
+	 *
+	 * In DBI only mode, value greater than one DBI frame time in byte
+	 * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+	 * is recommended.
+	 */
+
+	if (is_vid_mode(intel_dsi) &&
+	    intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+		I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+			   txbyteclkhs(adjusted_mode->htotal, bpp,
+				       intel_dsi->lane_count) + 1);
+	} else {
+		I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+			   txbyteclkhs(adjusted_mode->vtotal *
+				       adjusted_mode->htotal,
+				       bpp, intel_dsi->lane_count) + 1);
+	}
+	I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
+	I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
+	I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+
+	/* dphy stuff */
+
+	/* in terms of low power clock */
+	I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
+
+	/* recovery disables */
+	I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
+
+	/* in terms of txbyteclkhs. actual high to low switch +
+	 * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+	 *
+	 * XXX: write MIPI_STOP_STATE_STALL?
+	 */
+	I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+
+	/* XXX: low power clock equivalence in terms of byte clock. the number
+	 * of byte clocks occupied in one low power clock. based on txbyteclkhs
+	 * and txclkesc. txclkesc time / txbyteclk time * (105 +
+	 * MIPI_STOP_STATE_STALL) / 105.???
+	 */
+	I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+
+	/* the bw essential for transmitting 16 long packets containing 252
+	 * bytes meant for dcs write memory command is programmed in this
+	 * register in terms of byte clocks. based on dsi transfer rate and the
+	 * number of lanes configured the time taken to transmit 16 long packets
+	 * in a dsi stream varies. */
+	I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+
+	I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
+		   0xa << LP_HS_SSW_CNT_SHIFT |
+		   0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+
+	if (is_vid_mode(intel_dsi))
+		I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
+			   intel_dsi->video_mode_format);
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+	DRM_DEBUG_KMS("\n");
+	return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *mode;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!intel_connector->panel.fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		return 0;
+	}
+
+	mode = drm_mode_duplicate(connector->dev,
+				  intel_connector->panel.fixed_mode);
+	if (!mode) {
+		DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+		return 0;
+	}
+
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static void intel_dsi_destroy(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	DRM_DEBUG_KMS("\n");
+	intel_panel_fini(&intel_connector->panel);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+	.get_modes = intel_dsi_get_modes,
+	.mode_valid = intel_dsi_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_dsi_detect,
+	.destroy = intel_dsi_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+bool intel_dsi_init(struct drm_device *dev)
+{
+	struct intel_dsi *intel_dsi;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_display_mode *fixed_mode = NULL;
+	const struct intel_dsi_device *dsi;
+	unsigned int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+	if (!intel_dsi)
+		return false;
+
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dsi);
+		return false;
+	}
+
+	intel_encoder = &intel_dsi->base;
+	encoder = &intel_encoder->base;
+	intel_dsi->attached_connector = intel_connector;
+
+	connector = &intel_connector->base;
+
+	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+	/* XXX: very likely not all of these are needed */
+	intel_encoder->hot_plug = intel_dsi_hot_plug;
+	intel_encoder->compute_config = intel_dsi_compute_config;
+	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+	intel_encoder->pre_enable = intel_dsi_pre_enable;
+	intel_encoder->enable = intel_dsi_enable;
+	intel_encoder->mode_set = intel_dsi_mode_set;
+	intel_encoder->disable = intel_dsi_disable;
+	intel_encoder->post_disable = intel_dsi_post_disable;
+	intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+	intel_encoder->get_config = intel_dsi_get_config;
+
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
+		dsi = &intel_dsi_devices[i];
+		intel_dsi->dev = *dsi;
+
+		if (dsi->dev_ops->init(&intel_dsi->dev))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(intel_dsi_devices)) {
+		DRM_DEBUG_KMS("no device found\n");
+		goto err;
+	}
+
+	intel_encoder->type = INTEL_OUTPUT_DSI;
+	intel_encoder->crtc_mask = (1 << 0); /* XXX */
+
+	intel_encoder->cloneable = false;
+	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DSI);
+
+	drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+	drm_sysfs_connector_add(connector);
+
+	fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+	if (!fixed_mode) {
+		DRM_DEBUG_KMS("no fixed mode\n");
+		goto err;
+	}
+
+	fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+	intel_panel_init(&intel_connector->panel, fixed_mode);
+
+	return true;
+
+err:
+	drm_encoder_cleanup(&intel_encoder->base);
+	kfree(intel_dsi);
+	kfree(intel_connector);
+
+	return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644
index 0000000..c7765f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dsi_device {
+	unsigned int panel_id;
+	const char *name;
+	int type;
+	const struct intel_dsi_dev_ops *dev_ops;
+	void *dev_priv;
+};
+
+struct intel_dsi_dev_ops {
+	bool (*init)(struct intel_dsi_device *dsi);
+
+	/* This callback must be able to assume DSI commands can be sent */
+	void (*enable)(struct intel_dsi_device *dsi);
+
+	/* This callback must be able to assume DSI commands can be sent */
+	void (*disable)(struct intel_dsi_device *dsi);
+
+	int (*mode_valid)(struct intel_dsi_device *dsi,
+			  struct drm_display_mode *mode);
+
+	bool (*mode_fixup)(struct intel_dsi_device *dsi,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode);
+
+	void (*mode_set)(struct intel_dsi_device *dsi,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode);
+
+	enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+
+	bool (*get_hw_state)(struct intel_dsi_device *dev);
+
+	struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
+
+	void (*destroy) (struct intel_dsi_device *dsi);
+};
+
+struct intel_dsi {
+	struct intel_encoder base;
+
+	struct intel_dsi_device dev;
+
+	struct intel_connector *attached_connector;
+
+	/* if true, use HS mode, otherwise LP */
+	bool hs;
+
+	/* virtual channel */
+	int channel;
+
+	/* number of DSI lanes */
+	unsigned int lane_count;
+
+	/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+	u32 pixel_format;
+
+	/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+	u32 video_mode_format;
+
+	/* eot for MIPI_EOT_DISABLE register */
+	u32 eot_disable;
+};
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
+extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644
index 0000000..7c40f98
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/*
+ * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
+ * MIPI_COMMAND_ADDRESS registers.
+ *
+ * Apparently these registers provide a MIPI adapter level way to send (lots of)
+ * commands and data to the receiver, without having to write the commands and
+ * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
+ *
+ * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
+ * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
+ * framebuffer in command mode displays) these are just an optimization that can
+ * come later.
+ *
+ * For memory writes, these should probably be used for performance.
+ */
+
+static void print_stat(struct intel_dsi *intel_dsi)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 val;
+
+	val = I915_READ(MIPI_INTR_STAT(pipe));
+
+#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
+	DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+		      "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+		      "\n", pipe, val,
+		      STAT_BIT(val, TEARING_EFFECT),
+		      STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
+		      STAT_BIT(val, GEN_READ_DATA_AVAIL),
+		      STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
+		      STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
+		      STAT_BIT(val, RX_PROT_VIOLATION),
+		      STAT_BIT(val, RX_INVALID_TX_LENGTH),
+		      STAT_BIT(val, ACK_WITH_NO_ERROR),
+		      STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
+		      STAT_BIT(val, LP_RX_TIMEOUT),
+		      STAT_BIT(val, HS_TX_TIMEOUT),
+		      STAT_BIT(val, DPI_FIFO_UNDERRUN),
+		      STAT_BIT(val, LOW_CONTENTION),
+		      STAT_BIT(val, HIGH_CONTENTION),
+		      STAT_BIT(val, TXDSI_VC_ID_INVALID),
+		      STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
+		      STAT_BIT(val, TXCHECKSUM_ERROR),
+		      STAT_BIT(val, TXECC_MULTIBIT_ERROR),
+		      STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
+		      STAT_BIT(val, TXFALSE_CONTROL_ERROR),
+		      STAT_BIT(val, RXDSI_VC_ID_INVALID),
+		      STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
+		      STAT_BIT(val, RXCHECKSUM_ERROR),
+		      STAT_BIT(val, RXECC_MULTIBIT_ERROR),
+		      STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
+		      STAT_BIT(val, RXFALSE_CONTROL_ERROR),
+		      STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
+		      STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
+		      STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
+		      STAT_BIT(val, RXEOT_SYNC_ERROR),
+		      STAT_BIT(val, RXSOT_SYNC_ERROR),
+		      STAT_BIT(val, RXSOT_ERROR));
+#undef STAT_BIT
+}
+
+enum dsi_type {
+	DSI_DCS,
+	DSI_GENERIC,
+};
+
+/* enable or disable command mode hs transmissions */
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 temp;
+	u32 mask = DBI_FIFO_EMPTY;
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+
+	temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+	temp &= DBI_HS_LP_MODE_MASK;
+	I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+
+	intel_dsi->hs = enable;
+}
+
+static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
+			     u8 data_type, u16 data)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 ctrl_reg;
+	u32 ctrl;
+	u32 mask;
+
+	DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
+		      channel, data_type, data);
+
+	if (intel_dsi->hs) {
+		ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+		mask = HS_CTRL_FIFO_FULL;
+	} else {
+		ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+		mask = LP_CTRL_FIFO_FULL;
+	}
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+		DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+		print_stat(intel_dsi);
+	}
+
+	/*
+	 * Note: This function is also used for long packets, with length passed
+	 * as data, since SHORT_PACKET_PARAM_SHIFT ==
+	 * LONG_PACKET_WORD_COUNT_SHIFT.
+	 */
+	ctrl = data << SHORT_PACKET_PARAM_SHIFT |
+		channel << VIRTUAL_CHANNEL_SHIFT |
+		data_type << DATA_TYPE_SHIFT;
+
+	I915_WRITE(ctrl_reg, ctrl);
+
+	return 0;
+}
+
+static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
+			    u8 data_type, const u8 *data, int len)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 data_reg;
+	int i, j, n;
+	u32 mask;
+
+	DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
+		      channel, data_type, len);
+
+	if (intel_dsi->hs) {
+		data_reg = MIPI_HS_GEN_DATA(pipe);
+		mask = HS_DATA_FIFO_FULL;
+	} else {
+		data_reg = MIPI_LP_GEN_DATA(pipe);
+		mask = LP_DATA_FIFO_FULL;
+	}
+
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+		DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+	for (i = 0; i < len; i += n) {
+		u32 val = 0;
+		n = min_t(int, len - i, 4);
+
+		for (j = 0; j < n; j++)
+			val |= *data++ << 8 * j;
+
+		I915_WRITE(data_reg, val);
+		/* XXX: check for data fifo full, once that is set, write 4
+		 * dwords, then wait for not set, then continue. */
+	}
+
+	return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+}
+
+static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
+			       int channel, const u8 *data, int len,
+			       enum dsi_type type)
+{
+	int ret;
+
+	if (len == 0) {
+		BUG_ON(type == DSI_GENERIC);
+		ret = dsi_vc_send_short(intel_dsi, channel,
+					MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
+					0);
+	} else if (len == 1) {
+		ret = dsi_vc_send_short(intel_dsi, channel,
+					type == DSI_GENERIC ?
+					MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+					MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+	} else if (len == 2) {
+		ret = dsi_vc_send_short(intel_dsi, channel,
+					type == DSI_GENERIC ?
+					MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+					MIPI_DSI_DCS_SHORT_WRITE_PARAM,
+					(data[1] << 8) | data[0]);
+	} else {
+		ret = dsi_vc_send_long(intel_dsi, channel,
+				       type == DSI_GENERIC ?
+				       MIPI_DSI_GENERIC_LONG_WRITE :
+				       MIPI_DSI_DCS_LONG_WRITE, data, len);
+	}
+
+	return ret;
+}
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+		     const u8 *data, int len)
+{
+	return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+}
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+			 const u8 *data, int len)
+{
+	return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+}
+
+static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
+					int channel, u8 dcs_cmd)
+{
+	return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
+				 dcs_cmd);
+}
+
+static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
+					    int channel, u8 *reqdata,
+					    int reqlen)
+{
+	u16 data;
+	u8 data_type;
+
+	switch (reqlen) {
+	case 0:
+		data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+		data = 0;
+		break;
+	case 1:
+		data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+		data = reqdata[0];
+		break;
+	case 2:
+		data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+		data = (reqdata[1] << 8) | reqdata[0];
+		break;
+	default:
+		BUG();
+	}
+
+	return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+}
+
+static int dsi_read_data_return(struct intel_dsi *intel_dsi,
+				u8 *buf, int buflen)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	int i, len = 0;
+	u32 data_reg, val;
+
+	if (intel_dsi->hs) {
+		data_reg = MIPI_HS_GEN_DATA(pipe);
+	} else {
+		data_reg = MIPI_LP_GEN_DATA(pipe);
+	}
+
+	while (len < buflen) {
+		val = I915_READ(data_reg);
+		for (i = 0; i < 4 && len < buflen; i++, len++)
+			buf[len] = val >> 8 * i;
+	}
+
+	return len;
+}
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+		    u8 *buf, int buflen)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 mask;
+	int ret;
+
+	/*
+	 * XXX: should issue multiple read requests and reads if request is
+	 * longer than MIPI_MAX_RETURN_PKT_SIZE
+	 */
+
+	I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+	ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+	if (ret)
+		return ret;
+
+	mask = GEN_READ_DATA_AVAIL;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for read data.\n");
+
+	ret = dsi_read_data_return(intel_dsi, buf, buflen);
+	if (ret < 0)
+		return ret;
+
+	if (ret != buflen)
+		return -EIO;
+
+	return 0;
+}
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+			u8 *reqdata, int reqlen, u8 *buf, int buflen)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 mask;
+	int ret;
+
+	/*
+	 * XXX: should issue multiple read requests and reads if request is
+	 * longer than MIPI_MAX_RETURN_PKT_SIZE
+	 */
+
+	I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+	ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
+					       reqlen);
+	if (ret)
+		return ret;
+
+	mask = GEN_READ_DATA_AVAIL;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for read data.\n");
+
+	ret = dsi_read_data_return(intel_dsi, buf, buflen);
+	if (ret < 0)
+		return ret;
+
+	if (ret != buflen)
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
+{
+	struct drm_encoder *encoder = &intel_dsi->base.base;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	enum pipe pipe = intel_crtc->pipe;
+	u32 mask;
+
+	/* XXX: pipe, hs */
+	if (intel_dsi->hs)
+		cmd &= ~DPI_LP_MODE;
+	else
+		cmd |= DPI_LP_MODE;
+
+	/* DPI virtual channel?! */
+
+	mask = DPI_FIFO_EMPTY;
+	if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+		DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
+
+	/* clear bit */
+	I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+
+	/* XXX: old code skips write if control unchanged */
+	if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
+		DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+	I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+
+	mask = SPL_PKT_SENT_INTERRUPT;
+	if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
+		DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644
index 0000000..54c8a23
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#ifndef _INTEL_DSI_DSI_H
+#define _INTEL_DSI_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+		     const u8 *data, int len);
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+			 const u8 *data, int len);
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+		    u8 *buf, int buflen);
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+			u8 *reqdata, int reqlen, u8 *buf, int buflen);
+
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
+
+/* XXX: questionable write helpers */
+static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
+				     int channel, u8 dcs_cmd)
+{
+	return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+}
+
+static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
+				     int channel, u8 dcs_cmd, u8 param)
+{
+	u8 buf[2] = { dcs_cmd, param };
+	return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+}
+
+static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
+					 int channel)
+{
+	return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+}
+
+static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
+					 int channel, u8 param)
+{
+	return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
+}
+
+static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
+					 int channel, u8 param1, u8 param2)
+{
+	u8 buf[2] = { param1, param2 };
+	return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+}
+
+/* XXX: questionable read helpers */
+static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
+					int channel, u8 *buf, int buflen)
+{
+	return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
+					int channel, u8 param, u8 *buf,
+					int buflen)
+{
+	return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
+					int channel, u8 param1, u8 param2,
+					u8 *buf, int buflen)
+{
+	u8 req[2] = { param1, param2 };
+
+	return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+}
+
+
+#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644
index 0000000..44279b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Shobhit Kumar <shobhit.kumar@intel.com>
+ *	Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE		4
+#define DSI_HSE_PACKET_SIZE		4
+#define DSI_HSA_PACKET_EXTRA_SIZE	6
+#define DSI_HBP_PACKET_EXTRA_SIZE	6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE	6
+#define DSI_HFP_PACKET_EXTRA_SIZE	6
+#define DSI_EOTP_PACKET_SIZE		4
+
+struct dsi_mnp {
+	u32 dsi_pll_ctrl;
+	u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,		/* 81 - 90 */
+	71, 35							/* 91 - 92 */
+};
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+			  int pixel_format, int video_mode_format,
+			  int lane_count, bool eotp)
+{
+	u32 bpp;
+	u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+	u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+	u32 bytes_per_line, bytes_per_frame;
+	u32 num_frames;
+	u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+	u32 dsi_bit_clock_hz;
+	u32 dsi_clk;
+
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
+	}
+
+	hactive = mode->hdisplay;
+	vactive = mode->vdisplay;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsync = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
+
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsync = mode->vsync_end - mode->vsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+
+	hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+	hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+	hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+	hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+	bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+		DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+		hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+		hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+		hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+	/*
+	 * XXX: Need to accurately calculate LP to HS transition timeout and add
+	 * it to bytes_per_line/bytes_per_frame.
+	 */
+
+	if (eotp && video_mode_format == VIDEO_MODE_BURST)
+		bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+	bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+		vactive * bytes_per_line + vfp * bytes_per_line;
+
+	if (eotp &&
+	    (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+	     video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+		bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+	num_frames = drm_mode_vrefresh(mode);
+	bytes_per_x_frames = num_frames * bytes_per_frame;
+
+	bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+	/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+	dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+	dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+
+	if (eotp && video_mode_format == VIDEO_MODE_BURST)
+		dsi_clk *= 2;
+
+	return dsi_clk;
+}
+
+#ifdef MNP_FROM_TABLE
+
+struct dsi_clock_table {
+	u32 freq;
+	u8 m;
+	u8 p;
+};
+
+static const struct dsi_clock_table dsi_clk_tbl[] = {
+	{300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
+	{343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
+	{383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
+	{401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
+	{405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
+	{409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
+	{413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
+	{417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
+	{430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
+	{470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
+	{510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
+	{550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
+	{590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
+	{630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
+	{670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
+	{710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
+	{750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
+	{790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
+	{1000, 80, 2},		/* dsi clock frequency in Mhz*/
+};
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+	unsigned int i;
+	u8 m;
+	u8 n;
+	u8 p;
+	u32 m_seed;
+
+	if (dsi_clk < 300 || dsi_clk > 1000)
+		return -ECHRNG;
+
+	for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
+		if (dsi_clk_tbl[i].freq > dsi_clk)
+			break;
+	}
+
+	m = dsi_clk_tbl[i].m;
+	p = dsi_clk_tbl[i].p;
+	m_seed = lfsr_converts[m - 62];
+	n = 1;
+	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
+	dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+		m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+	return 0;
+}
+
+#else
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+	u32 m, n, p;
+	u32 ref_clk;
+	u32 error;
+	u32 tmp_error;
+	u32 target_dsi_clk;
+	u32 calc_dsi_clk;
+	u32 calc_m;
+	u32 calc_p;
+	u32 m_seed;
+
+	if (dsi_clk < 300 || dsi_clk > 1150) {
+		DRM_ERROR("DSI CLK Out of Range\n");
+		return -ECHRNG;
+	}
+
+	ref_clk = 25000;
+	target_dsi_clk = dsi_clk * 1000;
+	error = 0xFFFFFFFF;
+	calc_m = 0;
+	calc_p = 0;
+
+	for (m = 62; m <= 92; m++) {
+		for (p = 2; p <= 6; p++) {
+
+			calc_dsi_clk = (m * ref_clk) / p;
+			if (calc_dsi_clk >= target_dsi_clk) {
+				tmp_error = calc_dsi_clk - target_dsi_clk;
+				if (tmp_error < error) {
+					error = tmp_error;
+					calc_m = m;
+					calc_p = p;
+				}
+			}
+		}
+	}
+
+	m_seed = lfsr_converts[calc_m - 62];
+	n = 1;
+	dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+	dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+		m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+	return 0;
+}
+
+#endif
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+	int ret;
+	struct dsi_mnp dsi_mnp;
+	u32 dsi_clk;
+
+	dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
+				 intel_dsi->video_mode_format,
+				 intel_dsi->lane_count, !intel_dsi->eot_disable);
+
+	ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+	if (ret) {
+		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+		return;
+	}
+
+	dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+	DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+		      dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	vlv_configure_dsi_pll(encoder);
+
+	/* wait at least 0.5 us after ungating before enabling VCO */
+	usleep_range(1, 10);
+
+	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp |= DSI_PLL_VCO_EN;
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+		DRM_ERROR("DSI PLL lock failed\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_KMS("\n");
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+	tmp &= ~DSI_PLL_VCO_EN;
+	tmp |= DSI_PLL_LDO_GATE;
+	vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 7fa7df5..3c77365 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -153,6 +153,8 @@
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
 	pipe_config->adjusted_mode.flags |= flags;
+
+	pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -171,11 +173,16 @@
 {
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	u32 dvo_reg = intel_dvo->dev.dvo_reg;
 	u32 temp = I915_READ(dvo_reg);
 
 	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
 	I915_READ(dvo_reg);
+	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+					 &crtc->config.requested_mode,
+					 &crtc->config.adjusted_mode);
+
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
@@ -184,6 +191,7 @@
 {
 	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	struct drm_crtc *crtc;
+	struct intel_crtc_config *config;
 
 	/* dvo supports only 2 dpms states. */
 	if (mode != DRM_MODE_DPMS_ON)
@@ -204,10 +212,16 @@
 	/* We call connector dpms manually below in case pipe dpms doesn't
 	 * change due to cloning. */
 	if (mode == DRM_MODE_DPMS_ON) {
+		config = &to_intel_crtc(crtc)->config;
+
 		intel_dvo->base.connectors_active = true;
 
 		intel_crtc_update_dpms(crtc);
 
+		intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+						 &config->requested_mode,
+						 &config->adjusted_mode);
+
 		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 	} else {
 		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
@@ -267,11 +281,6 @@
 		drm_mode_set_crtcinfo(adjusted_mode, 0);
 	}
 
-	if (intel_dvo->dev.dev_ops->mode_fixup)
-		return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
-							  &pipe_config->requested_mode,
-							  adjusted_mode);
-
 	return true;
 }
 
@@ -299,10 +308,6 @@
 		break;
 	}
 
-	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
-					 &crtc->config.requested_mode,
-					 adjusted_mode);
-
 	/* Save the data order, since I don't know what it should be set to. */
 	dvo_val = I915_READ(dvo_reg) &
 		  (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
@@ -370,7 +375,6 @@
 
 static void intel_dvo_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -451,11 +455,11 @@
 	int i;
 	int encoder_type = DRM_MODE_ENCODER_NONE;
 
-	intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+	intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
 	if (!intel_dvo)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dvo);
 		return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c
similarity index 89%
rename from drivers/gpu/drm/i915/intel_fb.c
rename to drivers/gpu/drm/i915/intel_fbdev.c
index bc21000..895fcb4 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -78,8 +78,8 @@
 	mode_cmd.width = sizes->surface_width;
 	mode_cmd.height = sizes->surface_height;
 
-	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
-						      8), 64);
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
 							  sizes->surface_depth);
 
@@ -184,6 +184,27 @@
 	return ret;
 }
 
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				    u16 blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	intel_crtc->lut_r[regno] = red >> 8;
+	intel_crtc->lut_g[regno] = green >> 8;
+	intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				    u16 *blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	*red = intel_crtc->lut_r[regno] << 8;
+	*green = intel_crtc->lut_g[regno] << 8;
+	*blue = intel_crtc->lut_b[regno] << 8;
+}
+
 static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 	.gamma_set = intel_crtc_fb_gamma_set,
 	.gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
-	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+	ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
 	if (!ifbdev)
 		return -ENOMEM;
 
@@ -225,7 +246,7 @@
 
 	ret = drm_fb_helper_init(dev, &ifbdev->helper,
 				 INTEL_INFO(dev)->num_pipes,
-				 INTELFB_CONN_LIMIT);
+				 4);
 	if (ret) {
 		kfree(ifbdev);
 		return ret;
@@ -278,13 +299,13 @@
 
 MODULE_LICENSE("GPL and additional rights");
 
-void intel_fb_output_poll_changed(struct drm_device *dev)
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
 
-void intel_fb_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_device *dev)
 {
 	int ret;
 	struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4148cc8..51a8336 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -713,6 +713,7 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
 	u32 tmp, flags = 0;
+	int dotclock;
 
 	tmp = I915_READ(intel_hdmi->hdmi_reg);
 
@@ -727,6 +728,16 @@
 		flags |= DRM_MODE_FLAG_NVSYNC;
 
 	pipe_config->adjusted_mode.flags |= flags;
+
+	if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -862,7 +873,7 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
-	int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+	int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
 	int portclock_limit = hdmi_portclock_limit(intel_hdmi);
 	int desired_bpp;
 
@@ -904,7 +915,7 @@
 		pipe_config->pipe_bpp = desired_bpp;
 	}
 
-	if (adjusted_mode->clock > portclock_limit) {
+	if (adjusted_mode->crtc_clock > portclock_limit) {
 		DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
 		return false;
 	}
@@ -1063,7 +1074,7 @@
 	return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@
 
 	/* Enable clock channels for this port */
 	mutex_lock(&dev_priv->dpio_lock);
-	val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+	val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
 	val = 0;
 	if (pipe)
 		val |= (1<<21);
 	else
 		val &= ~(1<<21);
 	val |= 0x001000c4;
-	vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+	vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
 
 	/* HDMI 1.0V-2dB */
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
 			 0x2b245f5f);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
 			 0x5578b83a);
-	vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
 			 0x0c782040);
-	vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
 			 0x2b247878);
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
 			 0x00002000);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
 			 DPIO_TX_OCALINIT_EN);
 
 	/* Program lane clock */
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
 			 0x00760018);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
 			 0x00400888);
 	mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1116,55 +1127,60 @@
 	vlv_wait_port_ready(dev_priv, port);
 }
 
-static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
 
 	if (!IS_VALLEYVIEW(dev))
 		return;
 
 	/* Program Tx lane resets to default */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
 			 DPIO_PCS_TX_LANE2_RESET |
 			 DPIO_PCS_TX_LANE1_RESET);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
 			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
 			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
 			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
 			 DPIO_PCS_CLK_SOFT_RESET);
 
 	/* Fix up inter-pair skew failure */
-	vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
-	vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
-	vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
 
-	vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
 			 0x00002000);
-	vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+	vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
 			 DPIO_TX_OCALINIT_EN);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
-static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
 	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
 	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(encoder->base.crtc);
 	int port = vlv_dport_to_channel(dport);
+	int pipe = intel_crtc->pipe;
 
 	/* Reset lanes to avoid HDMI flicker (VLV w/a) */
 	mutex_lock(&dev_priv->dpio_lock);
-	vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
-	vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
+	vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
 	mutex_unlock(&dev_priv->dpio_lock);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -1211,6 +1227,7 @@
 
 	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
+	connector->stereo_allowed = 1;
 
 	switch (port) {
 	case PORT_B:
@@ -1275,11 +1292,11 @@
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 
-	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
 	if (!intel_dig_port)
 		return;
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_dig_port);
 		return;
@@ -1296,10 +1313,10 @@
 	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
 	intel_encoder->get_config = intel_hdmi_get_config;
 	if (IS_VALLEYVIEW(dev)) {
-		intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
-		intel_encoder->pre_enable = intel_hdmi_pre_enable;
+		intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+		intel_encoder->pre_enable = vlv_hdmi_pre_enable;
 		intel_encoder->enable = vlv_enable_hdmi;
-		intel_encoder->post_disable = intel_hdmi_post_disable;
+		intel_encoder->post_disable = vlv_hdmi_post_disable;
 	} else {
 		intel_encoder->enable = intel_enable_hdmi;
 	}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7..2ca17b1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+enum disp_clk {
+	CDCLK,
+	CZCLK
+};
+
 struct gmbus_port {
 	const char *name;
 	int reg;
@@ -58,10 +63,69 @@
 	return container_of(i2c, struct intel_gmbus, adapter);
 }
 
+static int get_disp_clk_div(struct drm_i915_private *dev_priv,
+			    enum disp_clk clk)
+{
+	u32 reg_val;
+	int clk_ratio;
+
+	reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
+
+	if (clk == CDCLK)
+		clk_ratio =
+			((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
+	else
+		clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
+
+	return clk_ratio;
+}
+
+static void gmbus_set_freq(struct drm_i915_private *dev_priv)
+{
+	int vco_freq[] = { 800, 1600, 2000, 2400 };
+	int gmbus_freq = 0, cdclk_div, hpll_freq;
+
+	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+	/* Skip setting the gmbus freq if BIOS has already programmed it */
+	if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
+		return;
+
+	/* Obtain SKU information */
+	mutex_lock(&dev_priv->dpio_lock);
+	hpll_freq =
+		vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	/* Get the CDCLK divide ratio */
+	cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
+
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	if (cdclk_div)
+		gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+
+	if (WARN_ON(gmbus_freq == 0))
+		return;
+
+	I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
+}
+
 void
 intel_i2c_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * In BIOS-less system, program the correct gmbus frequency
+	 * before reading edid.
+	 */
+	if (IS_VALLEYVIEW(dev))
+		gmbus_set_freq(dev_priv);
+
 	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
 	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 831a5c0..c3b4da7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -92,6 +92,7 @@
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 lvds_reg, tmp, flags = 0;
+	int dotclock;
 
 	if (HAS_PCH_SPLIT(dev))
 		lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@
 
 		pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
 	}
+
+	dotclock = pipe_config->port_clock;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -198,7 +206,8 @@
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -217,13 +226,15 @@
 	if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power on\n");
 
-	intel_panel_enable_backlight(dev, intel_crtc->pipe);
+	intel_panel_enable_backlight(intel_connector);
 }
 
 static void intel_disable_lvds(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -235,7 +246,7 @@
 		stat_reg = PP_STATUS;
 	}
 
-	intel_panel_disable_backlight(dev);
+	intel_panel_disable_backlight(intel_connector);
 
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
@@ -466,7 +477,6 @@
 
 	intel_panel_fini(&lvds_connector->base.panel);
 
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -700,6 +710,22 @@
 	},
 	{
 		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D410PT",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D425KT",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
 		.ident = "Intel D510MO",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
@@ -786,7 +812,8 @@
 		return true;
 
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-		struct child_device_config *child = dev_priv->vbt.child_dev + i;
+		union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+		struct old_child_dev_config *child = &uchild->old;
 
 		/* If the device type is not LFP, continue.
 		 * We have to check both the new identifiers as well as the
@@ -940,11 +967,11 @@
 		}
 	}
 
-	lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
 	if (!lvds_encoder)
 		return;
 
-	lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+	lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
 	if (!lvds_connector) {
 		kfree(lvds_encoder);
 		return;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 119771f..91b68dc 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -36,8 +36,11 @@
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
+#define PCI_ASLE		0xe4
+#define PCI_ASLS		0xfc
+#define PCI_SWSCI		0xe8
+#define PCI_SWSCI_SCISEL	(1 << 15)
+#define PCI_SWSCI_GSSCIE	(1 << 0)
 
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
@@ -107,25 +110,38 @@
 	u32 epfm;       /* enabled panel fitting modes */
 	u8 plut[74];    /* panel LUT and identifier */
 	u32 pfmb;       /* PWM freq and min brightness */
-	u8 rsvd[102];
+	u32 cddv;       /* color correction default values */
+	u32 pcft;       /* power conservation features */
+	u32 srot;       /* supported rotation angles */
+	u32 iuer;       /* IUER events */
+	u8 rsvd[86];
 } __attribute__((packed));
 
 /* Driver readiness indicator */
 #define ASLE_ARDY_READY		(1 << 0)
 #define ASLE_ARDY_NOT_READY	(0 << 0)
 
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM     (1 << 0)
-#define ASLE_SET_BACKLIGHT     (1 << 1)
-#define ASLE_SET_PFIT          (1 << 2)
-#define ASLE_SET_PWM_FREQ      (1 << 3)
-#define ASLE_REQ_MSK           0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAILED	(1<<10)
-#define ASLE_BACKLIGHT_FAILED	(1<<12)
-#define ASLE_PFIT_FAILED	(1<<14)
-#define ASLE_PWM_FREQ_FAILED	(1<<16)
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM		(1 << 0)
+#define ASLC_SET_BACKLIGHT		(1 << 1)
+#define ASLC_SET_PFIT			(1 << 2)
+#define ASLC_SET_PWM_FREQ		(1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES	(1 << 4)
+#define ASLC_BUTTON_ARRAY		(1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR	(1 << 6)
+#define ASLC_DOCKING_INDICATOR		(1 << 7)
+#define ASLC_ISCT_STATE_CHANGE		(1 << 8)
+#define ASLC_REQ_MSK			0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED		(1 << 10)
+#define ASLC_BACKLIGHT_FAILED		(1 << 12)
+#define ASLC_PFIT_FAILED		(1 << 14)
+#define ASLC_PWM_FREQ_FAILED		(1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED	(1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED	(1 << 20)
+#define ASLC_CONVERTIBLE_FAILED		(1 << 22)
+#define ASLC_DOCKING_FAILED		(1 << 24)
+#define ASLC_ISCT_STATE_FAILED		(1 << 26)
 
 /* Technology enabled indicator */
 #define ASLE_TCHE_ALS_EN	(1 << 0)
@@ -151,6 +167,60 @@
 
 #define ASLE_CBLV_VALID         (1<<31)
 
+/* IUER */
+#define ASLE_IUER_DOCKING		(1 << 7)
+#define ASLE_IUER_CONVERTIBLE		(1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN	(1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN	(1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN		(1 << 2)
+#define ASLE_IUER_WINDOWS_BTN		(1 << 1)
+#define ASLE_IUER_POWER_BTN		(1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR		(1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT	1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK	(0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT	8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK	(0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT	8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK	(0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT	5
+#define SWSCI_SCIC_EXIT_STATUS_MASK	(7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS	1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+	((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+	 (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA			4
+#define SWSCI_GBDA_SUPPORTED_CALLS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD		SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM	SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB			6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM		SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO	SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
 #define ACPI_OTHER_OUTPUT (0<<8)
 #define ACPI_VGA_OUTPUT (1<<8)
 #define ACPI_TV_OUTPUT (2<<8)
@@ -158,24 +228,224 @@
 #define ACPI_LVDS_OUTPUT (4<<8)
 
 #ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
+	u32 main_function, sub_function, scic;
+	u16 pci_swsci;
+	u32 dslp;
+
+	if (!swsci)
+		return -ENODEV;
+
+	main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+		SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+	sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+		SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+	/* Check if we can call the function. See swsci_setup for details. */
+	if (main_function == SWSCI_SBCB) {
+		if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+		     (1 << sub_function)) == 0)
+			return -EINVAL;
+	} else if (main_function == SWSCI_GBDA) {
+		if ((dev_priv->opregion.swsci_gbda_sub_functions &
+		     (1 << sub_function)) == 0)
+			return -EINVAL;
+	}
+
+	/* Driver sleep timeout in ms. */
+	dslp = ioread32(&swsci->dslp);
+	if (!dslp) {
+		/* The spec says 2ms should be the default, but it's too small
+		 * for some machines. */
+		dslp = 50;
+	} else if (dslp > 500) {
+		/* Hey bios, trust must be earned. */
+		WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
+		dslp = 500;
+	}
+
+	/* The spec tells us to do this, but we are the only user... */
+	scic = ioread32(&swsci->scic);
+	if (scic & SWSCI_SCIC_INDICATOR) {
+		DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+		return -EBUSY;
+	}
+
+	scic = function | SWSCI_SCIC_INDICATOR;
+
+	iowrite32(parm, &swsci->parm);
+	iowrite32(scic, &swsci->scic);
+
+	/* Ensure SCI event is selected and event trigger is cleared. */
+	pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+	if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+		pci_swsci |= PCI_SWSCI_SCISEL;
+		pci_swsci &= ~PCI_SWSCI_GSSCIE;
+		pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+	}
+
+	/* Use event trigger to tell bios to check the mail. */
+	pci_swsci |= PCI_SWSCI_GSSCIE;
+	pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+	/* Poll for the result. */
+#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
+	if (wait_for(C, dslp)) {
+		DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+		SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+	/* Note: scic == 0 is an error! */
+	if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+		DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+		return -EIO;
+	}
+
+	if (parm_out)
+		*parm_out = ioread32(&swsci->parm);
+
+	return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT			0
+#define DISPLAY_TYPE_TV				1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL	2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL	3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+				  bool enable)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 parm = 0;
+	u32 type = 0;
+	u32 port;
+
+	/* don't care about old stuff for now */
+	if (!HAS_DDI(dev))
+		return 0;
+
+	port = intel_ddi_get_encoder_port(intel_encoder);
+	if (port == PORT_E) {
+		port = 0;
+	} else {
+		parm |= 1 << port;
+		port++;
+	}
+
+	if (!enable)
+		parm |= 4 << 8;
+
+	switch (intel_encoder->type) {
+	case INTEL_OUTPUT_ANALOG:
+		type = DISPLAY_TYPE_CRT;
+		break;
+	case INTEL_OUTPUT_UNKNOWN:
+	case INTEL_OUTPUT_DISPLAYPORT:
+	case INTEL_OUTPUT_HDMI:
+		type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+		break;
+	case INTEL_OUTPUT_EDP:
+		type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+		break;
+	default:
+		WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+			  intel_encoder->type);
+		return -EINVAL;
+	}
+
+	parm |= type << (16 + port * 3);
+
+	return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+	pci_power_t pci_power_state;
+	u32 parm;
+} power_state_map[] = {
+	{ PCI_D0,	0x00 },
+	{ PCI_D1,	0x01 },
+	{ PCI_D2,	0x02 },
+	{ PCI_D3hot,	0x04 },
+	{ PCI_D3cold,	0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+	int i;
+
+	if (!HAS_DDI(dev))
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+		if (state == power_state_map[i].pci_power_state)
+			return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+				     power_state_map[i].parm, NULL);
+	}
+
+	return -EINVAL;
+}
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector = NULL;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
 	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 ret = 0;
+	bool found = false;
 
 	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
 	if (!(bclp & ASLE_BCLP_VALID))
-		return ASLE_BACKLIGHT_FAILED;
+		return ASLC_BACKLIGHT_FAILED;
 
 	bclp &= ASLE_BCLP_MSK;
 	if (bclp > 255)
-		return ASLE_BACKLIGHT_FAILED;
+		return ASLC_BACKLIGHT_FAILED;
 
-	intel_panel_set_backlight(dev, bclp, 255);
+	mutex_lock(&dev->mode_config.mutex);
+	/*
+	 * Could match the OpRegion connector here instead, but we'd also need
+	 * to verify the connector could handle a backlight call.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc) {
+			found = true;
+			break;
+		}
+
+	if (!found) {
+		ret = ASLC_BACKLIGHT_FAILED;
+		goto out;
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			intel_connector = to_intel_connector(connector);
+
+	if (!intel_connector) {
+		ret = ASLC_BACKLIGHT_FAILED;
+		goto out;
+	}
+
+	DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+	intel_panel_set_backlight(intel_connector, bclp, 255);
 	iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
-	return 0;
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
 }
 
 static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
@@ -183,13 +453,13 @@
 	/* alsi is the current ALS reading in lux. 0 indicates below sensor
 	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
 	DRM_DEBUG_DRIVER("Illum is not supported\n");
-	return ASLE_ALS_ILLUM_FAILED;
+	return ASLC_ALS_ILLUM_FAILED;
 }
 
 static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
 {
 	DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-	return ASLE_PWM_FREQ_FAILED;
+	return ASLC_PWM_FREQ_FAILED;
 }
 
 static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +467,118 @@
 	/* Panel fitting is currently controlled by the X code, so this is a
 	   noop until modesetting support works fully */
 	DRM_DEBUG_DRIVER("Pfit is not supported\n");
-	return ASLE_PFIT_FAILED;
+	return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+{
+	DRM_DEBUG_DRIVER("SROT is not supported\n");
+	return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+	if (!iuer)
+		DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+	if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+	if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+	if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+	if (iuer & ASLE_IUER_WINDOWS_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+	if (iuer & ASLE_IUER_POWER_BTN)
+		DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+	return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+	if (iuer & ASLE_IUER_CONVERTIBLE)
+		DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+	else
+		DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+	return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+	if (iuer & ASLE_IUER_DOCKING)
+		DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+	else
+		DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+	return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("ISCT is not supported\n");
+	return ASLC_ISCT_STATE_FAILED;
+}
+
+static void asle_work(struct work_struct *work)
+{
+	struct intel_opregion *opregion =
+		container_of(work, struct intel_opregion, asle_work);
+	struct drm_i915_private *dev_priv =
+		container_of(opregion, struct drm_i915_private, opregion);
+	struct drm_device *dev = dev_priv->dev;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 aslc_stat = 0;
+	u32 aslc_req;
+
+	if (!asle)
+		return;
+
+	aslc_req = ioread32(&asle->aslc);
+
+	if (!(aslc_req & ASLC_REQ_MSK)) {
+		DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+				 aslc_req);
+		return;
+	}
+
+	if (aslc_req & ASLC_SET_ALS_ILLUM)
+		aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+	if (aslc_req & ASLC_SET_BACKLIGHT)
+		aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+	if (aslc_req & ASLC_SET_PFIT)
+		aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+	if (aslc_req & ASLC_SET_PWM_FREQ)
+		aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+
+	if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+		aslc_stat |= asle_set_supported_rotation_angles(dev,
+							ioread32(&asle->srot));
+
+	if (aslc_req & ASLC_BUTTON_ARRAY)
+		aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
+
+	if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+		aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
+
+	if (aslc_req & ASLC_DOCKING_INDICATOR)
+		aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
+
+	if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+		aslc_stat |= asle_isct_state(dev);
+
+	iowrite32(aslc_stat, &asle->aslc);
 }
 
 void intel_opregion_asle_intr(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-	u32 asle_stat = 0;
-	u32 asle_req;
 
-	if (!asle)
-		return;
-
-	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
-
-	if (!asle_req) {
-		DRM_DEBUG_DRIVER("non asle set request??\n");
-		return;
-	}
-
-	if (asle_req & ASLE_SET_ALS_ILLUM)
-		asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
-
-	if (asle_req & ASLE_SET_BACKLIGHT)
-		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
-
-	if (asle_req & ASLE_SET_PFIT)
-		asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
-
-	if (asle_req & ASLE_SET_PWM_FREQ)
-		asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
-
-	iowrite32(asle_stat, &asle->aslc);
+	if (dev_priv->opregion.asle)
+		schedule_work(&dev_priv->opregion.asle_work);
 }
 
 #define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -432,6 +781,8 @@
 	if (opregion->asle)
 		iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
 
+	cancel_work_sync(&dev_priv->opregion.asle_work);
+
 	if (opregion->acpi) {
 		iowrite32(0, &opregion->acpi->drdy);
 
@@ -446,8 +797,68 @@
 	opregion->swsci = NULL;
 	opregion->asle = NULL;
 	opregion->vbt = NULL;
+	opregion->lid_state = NULL;
 }
-#endif
+
+static void swsci_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	bool requested_callbacks = false;
+	u32 tmp;
+
+	/* Sub-function code 0 is okay, let's allow them. */
+	opregion->swsci_gbda_sub_functions = 1;
+	opregion->swsci_sbcb_sub_functions = 1;
+
+	/* We use GBDA to ask for supported GBDA calls. */
+	if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+		/* make the bits match the sub-function codes */
+		tmp <<= 1;
+		opregion->swsci_gbda_sub_functions |= tmp;
+	}
+
+	/*
+	 * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+	 * must not call interfaces that are not specifically requested by the
+	 * bios.
+	 */
+	if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+		/* here, the bits already match sub-function codes */
+		opregion->swsci_sbcb_sub_functions |= tmp;
+		requested_callbacks = true;
+	}
+
+	/*
+	 * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+	 * the callback is _requested_. But we still can't call interfaces that
+	 * are not requested.
+	 */
+	if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+		/* make the bits match the sub-function codes */
+		u32 low = tmp & 0x7ff;
+		u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+		tmp = (high << 4) | (low << 1) | 1;
+
+		/* best guess what to do with supported wrt requested */
+		if (requested_callbacks) {
+			u32 req = opregion->swsci_sbcb_sub_functions;
+			if ((req & tmp) != req)
+				DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+			/* XXX: for now, trust the requested callbacks */
+			/* opregion->swsci_sbcb_sub_functions &= tmp; */
+		} else {
+			opregion->swsci_sbcb_sub_functions |= tmp;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+			 opregion->swsci_gbda_sub_functions,
+			 opregion->swsci_sbcb_sub_functions);
+}
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif  /* CONFIG_ACPI */
 
 int intel_opregion_setup(struct drm_device *dev)
 {
@@ -465,6 +876,8 @@
 		return -ENOTSUPP;
 	}
 
+	INIT_WORK(&opregion->asle_work, asle_work);
+
 	base = acpi_os_ioremap(asls, OPREGION_SIZE);
 	if (!base)
 		return -ENOMEM;
@@ -490,6 +903,7 @@
 	if (mboxes & MBOX_SWSCI) {
 		DRM_DEBUG_DRIVER("SWSCI supported\n");
 		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+		swsci_setup(dev);
 	}
 	if (mboxes & MBOX_ASLE) {
 		DRM_DEBUG_DRIVER("ASLE supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ddfd0ae..a98a990 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -821,14 +821,11 @@
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
 					  struct intel_crtc *crtc)
 {
-	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-
 	if (!crtc->active)
 		return -EINVAL;
 
 	/* can't use the overlay with double wide pipe */
-	if (INTEL_INFO(overlay->dev)->gen < 4 &&
-	    (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+	if (crtc->config.double_wide)
 		return -EINVAL;
 
 	return 0;
@@ -1056,7 +1053,7 @@
 		return ret;
 	}
 
-	params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+	params = kmalloc(sizeof(*params), GFP_KERNEL);
 	if (!params)
 		return -ENOMEM;
 
@@ -1323,7 +1320,7 @@
 	if (!HAS_OVERLAY(dev))
 		return;
 
-	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
 	if (!overlay)
 		return;
 
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 293564a..f161ac0 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -50,23 +50,22 @@
 			struct intel_crtc_config *pipe_config,
 			int fitting_mode)
 {
-	struct drm_display_mode *mode, *adjusted_mode;
+	struct drm_display_mode *adjusted_mode;
 	int x, y, width, height;
 
-	mode = &pipe_config->requested_mode;
 	adjusted_mode = &pipe_config->adjusted_mode;
 
 	x = y = width = height = 0;
 
 	/* Native modes don't need fitting */
-	if (adjusted_mode->hdisplay == mode->hdisplay &&
-	    adjusted_mode->vdisplay == mode->vdisplay)
+	if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+	    adjusted_mode->vdisplay == pipe_config->pipe_src_h)
 		goto done;
 
 	switch (fitting_mode) {
 	case DRM_MODE_SCALE_CENTER:
-		width = mode->hdisplay;
-		height = mode->vdisplay;
+		width = pipe_config->pipe_src_w;
+		height = pipe_config->pipe_src_h;
 		x = (adjusted_mode->hdisplay - width + 1)/2;
 		y = (adjusted_mode->vdisplay - height + 1)/2;
 		break;
@@ -74,17 +73,19 @@
 	case DRM_MODE_SCALE_ASPECT:
 		/* Scale but preserve the aspect ratio */
 		{
-			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+			u32 scaled_width = adjusted_mode->hdisplay
+				* pipe_config->pipe_src_h;
+			u32 scaled_height = pipe_config->pipe_src_w
+				* adjusted_mode->vdisplay;
 			if (scaled_width > scaled_height) { /* pillar */
-				width = scaled_height / mode->vdisplay;
+				width = scaled_height / pipe_config->pipe_src_h;
 				if (width & 1)
 					width++;
 				x = (adjusted_mode->hdisplay - width + 1) / 2;
 				y = 0;
 				height = adjusted_mode->vdisplay;
 			} else if (scaled_width < scaled_height) { /* letter */
-				height = scaled_width / mode->hdisplay;
+				height = scaled_width / pipe_config->pipe_src_w;
 				if (height & 1)
 				    height++;
 				y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@
 	return (FACTOR * ratio + FACTOR/2) / FACTOR;
 }
 
+static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+			      u32 *pfit_control)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	u32 scaled_width = adjusted_mode->hdisplay *
+		pipe_config->pipe_src_h;
+	u32 scaled_height = pipe_config->pipe_src_w *
+		adjusted_mode->vdisplay;
+
+	/* 965+ is easy, it does everything in hw */
+	if (scaled_width > scaled_height)
+		*pfit_control |= PFIT_ENABLE |
+			PFIT_SCALING_PILLAR;
+	else if (scaled_width < scaled_height)
+		*pfit_control |= PFIT_ENABLE |
+			PFIT_SCALING_LETTER;
+	else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
+		*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+			      u32 *pfit_control, u32 *pfit_pgm_ratios,
+			      u32 *border)
+{
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	u32 scaled_width = adjusted_mode->hdisplay *
+		pipe_config->pipe_src_h;
+	u32 scaled_height = pipe_config->pipe_src_w *
+		adjusted_mode->vdisplay;
+	u32 bits;
+
+	/*
+	 * For earlier chips we have to calculate the scaling
+	 * ratio by hand and program it into the
+	 * PFIT_PGM_RATIO register
+	 */
+	if (scaled_width > scaled_height) { /* pillar */
+		centre_horizontally(adjusted_mode,
+				    scaled_height /
+				    pipe_config->pipe_src_h);
+
+		*border = LVDS_BORDER_ENABLE;
+		if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
+			bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+						    adjusted_mode->vdisplay);
+
+			*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+					     bits << PFIT_VERT_SCALE_SHIFT);
+			*pfit_control |= (PFIT_ENABLE |
+					  VERT_INTERP_BILINEAR |
+					  HORIZ_INTERP_BILINEAR);
+		}
+	} else if (scaled_width < scaled_height) { /* letter */
+		centre_vertically(adjusted_mode,
+				  scaled_width /
+				  pipe_config->pipe_src_w);
+
+		*border = LVDS_BORDER_ENABLE;
+		if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+			bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+						    adjusted_mode->hdisplay);
+
+			*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+					     bits << PFIT_VERT_SCALE_SHIFT);
+			*pfit_control |= (PFIT_ENABLE |
+					  VERT_INTERP_BILINEAR |
+					  HORIZ_INTERP_BILINEAR);
+		}
+	} else {
+		/* Aspects match, Let hw scale both directions */
+		*pfit_control |= (PFIT_ENABLE |
+				  VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+				  VERT_INTERP_BILINEAR |
+				  HORIZ_INTERP_BILINEAR);
+	}
+}
+
 void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
 			      struct intel_crtc_config *pipe_config,
 			      int fitting_mode)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
-	struct drm_display_mode *mode, *adjusted_mode;
+	struct drm_display_mode *adjusted_mode;
 
-	mode = &pipe_config->requested_mode;
 	adjusted_mode = &pipe_config->adjusted_mode;
 
 	/* Native modes don't need fitting */
-	if (adjusted_mode->hdisplay == mode->hdisplay &&
-	    adjusted_mode->vdisplay == mode->vdisplay)
+	if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+	    adjusted_mode->vdisplay == pipe_config->pipe_src_h)
 		goto out;
 
 	switch (fitting_mode) {
@@ -193,81 +270,25 @@
 		 * For centered modes, we have to calculate border widths &
 		 * heights and modify the values programmed into the CRTC.
 		 */
-		centre_horizontally(adjusted_mode, mode->hdisplay);
-		centre_vertically(adjusted_mode, mode->vdisplay);
+		centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+		centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
 		border = LVDS_BORDER_ENABLE;
 		break;
 	case DRM_MODE_SCALE_ASPECT:
 		/* Scale but preserve the aspect ratio */
-		if (INTEL_INFO(dev)->gen >= 4) {
-			u32 scaled_width = adjusted_mode->hdisplay *
-				mode->vdisplay;
-			u32 scaled_height = mode->hdisplay *
-				adjusted_mode->vdisplay;
-
-			/* 965+ is easy, it does everything in hw */
-			if (scaled_width > scaled_height)
-				pfit_control |= PFIT_ENABLE |
-					PFIT_SCALING_PILLAR;
-			else if (scaled_width < scaled_height)
-				pfit_control |= PFIT_ENABLE |
-					PFIT_SCALING_LETTER;
-			else if (adjusted_mode->hdisplay != mode->hdisplay)
-				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
-		} else {
-			u32 scaled_width = adjusted_mode->hdisplay *
-				mode->vdisplay;
-			u32 scaled_height = mode->hdisplay *
-				adjusted_mode->vdisplay;
-			/*
-			 * For earlier chips we have to calculate the scaling
-			 * ratio by hand and program it into the
-			 * PFIT_PGM_RATIO register
-			 */
-			if (scaled_width > scaled_height) { /* pillar */
-				centre_horizontally(adjusted_mode,
-						    scaled_height /
-						    mode->vdisplay);
-
-				border = LVDS_BORDER_ENABLE;
-				if (mode->vdisplay != adjusted_mode->vdisplay) {
-					u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
-					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-							    bits << PFIT_VERT_SCALE_SHIFT);
-					pfit_control |= (PFIT_ENABLE |
-							 VERT_INTERP_BILINEAR |
-							 HORIZ_INTERP_BILINEAR);
-				}
-			} else if (scaled_width < scaled_height) { /* letter */
-				centre_vertically(adjusted_mode,
-						  scaled_width /
-						  mode->hdisplay);
-
-				border = LVDS_BORDER_ENABLE;
-				if (mode->hdisplay != adjusted_mode->hdisplay) {
-					u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
-					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-							    bits << PFIT_VERT_SCALE_SHIFT);
-					pfit_control |= (PFIT_ENABLE |
-							 VERT_INTERP_BILINEAR |
-							 HORIZ_INTERP_BILINEAR);
-				}
-			} else {
-				/* Aspects match, Let hw scale both directions */
-				pfit_control |= (PFIT_ENABLE |
-						 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-						 VERT_INTERP_BILINEAR |
-						 HORIZ_INTERP_BILINEAR);
-			}
-		}
+		if (INTEL_INFO(dev)->gen >= 4)
+			i965_scale_aspect(pipe_config, &pfit_control);
+		else
+			i9xx_scale_aspect(pipe_config, &pfit_control,
+					  &pfit_pgm_ratios, &border);
 		break;
 	case DRM_MODE_SCALE_FULLSCREEN:
 		/*
 		 * Full scaling, even if it changes the aspect ratio.
 		 * Fortunately this is all done for us in hw.
 		 */
-		if (mode->vdisplay != adjusted_mode->vdisplay ||
-		    mode->hdisplay != adjusted_mode->hdisplay) {
+		if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
+		    pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
 			pfit_control |= PFIT_ENABLE;
 			if (INTEL_INFO(dev)->gen >= 4)
 				pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev)->gen >= 4)
+	if (IS_GEN4(dev))
 		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
 
 	if (IS_GEN2(dev))
@@ -320,7 +341,7 @@
 /* XXX: query mode clock or hardware clock and program max PWM appropriately
  * when it's 0.
  */
-static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
@@ -337,6 +358,21 @@
 			val = dev_priv->regfile.saveBLC_PWM_CTL2;
 			I915_WRITE(BLC_PWM_PCH_CTL2, val);
 		}
+	} else if (IS_VALLEYVIEW(dev)) {
+		val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+			dev_priv->regfile.saveBLC_PWM_CTL = val;
+			dev_priv->regfile.saveBLC_PWM_CTL2 =
+				I915_READ(VLV_BLC_PWM_CTL2(pipe));
+		} else if (val == 0) {
+			val = dev_priv->regfile.saveBLC_PWM_CTL;
+			I915_WRITE(VLV_BLC_PWM_CTL(pipe), val);
+			I915_WRITE(VLV_BLC_PWM_CTL2(pipe),
+				   dev_priv->regfile.saveBLC_PWM_CTL2);
+		}
+
+		if (!val)
+			val = 0x0f42ffff;
 	} else {
 		val = I915_READ(BLC_PWM_CTL);
 		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
@@ -356,11 +392,12 @@
 	return val;
 }
 
-static u32 intel_panel_get_max_backlight(struct drm_device *dev)
+static u32 intel_panel_get_max_backlight(struct drm_device *dev,
+					 enum pipe pipe)
 {
 	u32 max;
 
-	max = i915_read_blc_pwm_ctl(dev);
+	max = i915_read_blc_pwm_ctl(dev, pipe);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		max >>= 16;
@@ -386,7 +423,8 @@
 	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
 	"It will then be included in an upcoming module version.");
 module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
-static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+static u32 intel_panel_compute_brightness(struct drm_device *dev,
+					  enum pipe pipe, u32 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -395,7 +433,7 @@
 
 	if (i915_panel_invert_brightness > 0 ||
 	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
-		u32 max = intel_panel_get_max_backlight(dev);
+		u32 max = intel_panel_get_max_backlight(dev, pipe);
 		if (max)
 			return max - val;
 	}
@@ -403,18 +441,25 @@
 	return val;
 }
 
-static u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev,
+				     enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 val;
 	unsigned long flags;
+	int reg;
 
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	if (HAS_PCH_SPLIT(dev)) {
 		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
 	} else {
-		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+		if (IS_VALLEYVIEW(dev))
+			reg = VLV_BLC_PWM_CTL(pipe);
+		else
+			reg = BLC_PWM_CTL;
+
+		val = I915_READ(reg) & BACKLIGHT_DUTY_CYCLE_MASK;
 		if (INTEL_INFO(dev)->gen < 4)
 			val >>= 1;
 
@@ -426,7 +471,7 @@
 		}
 	}
 
-	val = intel_panel_compute_brightness(dev, val);
+	val = intel_panel_compute_brightness(dev, pipe, val);
 
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
@@ -441,19 +486,21 @@
 	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
 }
 
-static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev,
+					       enum pipe pipe, u32 level)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
+	int reg;
 
 	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
-	level = intel_panel_compute_brightness(dev, level);
+	level = intel_panel_compute_brightness(dev, pipe, level);
 
 	if (HAS_PCH_SPLIT(dev))
 		return intel_pch_panel_set_backlight(dev, level);
 
 	if (is_backlight_combination_mode(dev)) {
-		u32 max = intel_panel_get_max_backlight(dev);
+		u32 max = intel_panel_get_max_backlight(dev, pipe);
 		u8 lbpc;
 
 		/* we're screwed, but keep behaviour backwards compatible */
@@ -465,23 +512,34 @@
 		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
 	}
 
-	tmp = I915_READ(BLC_PWM_CTL);
+	if (IS_VALLEYVIEW(dev))
+		reg = VLV_BLC_PWM_CTL(pipe);
+	else
+		reg = BLC_PWM_CTL;
+
+	tmp = I915_READ(reg);
 	if (INTEL_INFO(dev)->gen < 4)
 		level <<= 1;
 	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(BLC_PWM_CTL, tmp | level);
+	I915_WRITE(reg, tmp | level);
 }
 
 /* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
+void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
+			       u32 max)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	u32 freq;
 	unsigned long flags;
 
+	if (pipe == INVALID_PIPE)
+		return;
+
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
-	freq = intel_panel_get_max_backlight(dev);
+	freq = intel_panel_get_max_backlight(dev, pipe);
 	if (!freq) {
 		/* we are screwed, bail out */
 		goto out;
@@ -498,16 +556,21 @@
 		dev_priv->backlight.device->props.brightness = level;
 
 	if (dev_priv->backlight.enabled)
-		intel_panel_actually_set_backlight(dev, level);
+		intel_panel_actually_set_backlight(dev, pipe, level);
 out:
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
-void intel_panel_disable_backlight(struct drm_device *dev)
+void intel_panel_disable_backlight(struct intel_connector *connector)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	unsigned long flags;
 
+	if (pipe == INVALID_PIPE)
+		return;
+
 	/*
 	 * Do not disable backlight on the vgaswitcheroo path. When switching
 	 * away from i915, the other client may depend on i915 to handle the
@@ -522,12 +585,17 @@
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	dev_priv->backlight.enabled = false;
-	intel_panel_actually_set_backlight(dev, 0);
+	intel_panel_actually_set_backlight(dev, pipe, 0);
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		uint32_t reg, tmp;
 
-		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+		if (HAS_PCH_SPLIT(dev))
+			reg = BLC_PWM_CPU_CTL2;
+		else if (IS_VALLEYVIEW(dev))
+			reg = VLV_BLC_PWM_CTL2(pipe);
+		else
+			reg = BLC_PWM_CTL2;
 
 		I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
 
@@ -541,18 +609,25 @@
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
-void intel_panel_enable_backlight(struct drm_device *dev,
-				  enum pipe pipe)
+void intel_panel_enable_backlight(struct intel_connector *connector)
 {
+	struct drm_device *dev = connector->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_get_pipe_from_connector(connector);
 	enum transcoder cpu_transcoder =
 		intel_pipe_to_cpu_transcoder(dev_priv, pipe);
 	unsigned long flags;
 
+	if (pipe == INVALID_PIPE)
+		return;
+
+	DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
 	if (dev_priv->backlight.level == 0) {
-		dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+		dev_priv->backlight.level = intel_panel_get_max_backlight(dev,
+									  pipe);
 		if (dev_priv->backlight.device)
 			dev_priv->backlight.device->props.brightness =
 				dev_priv->backlight.level;
@@ -561,8 +636,12 @@
 	if (INTEL_INFO(dev)->gen >= 4) {
 		uint32_t reg, tmp;
 
-		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
-
+		if (HAS_PCH_SPLIT(dev))
+			reg = BLC_PWM_CPU_CTL2;
+		else if (IS_VALLEYVIEW(dev))
+			reg = VLV_BLC_PWM_CTL2(pipe);
+		else
+			reg = BLC_PWM_CTL2;
 
 		tmp = I915_READ(reg);
 
@@ -602,16 +681,41 @@
 	 * registers are set.
 	 */
 	dev_priv->backlight.enabled = true;
-	intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
+	intel_panel_actually_set_backlight(dev, pipe,
+					   dev_priv->backlight.level);
 
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
+/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
+static void intel_panel_init_backlight_regs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		enum pipe pipe;
+
+		for_each_pipe(pipe) {
+			u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
+
+			/* Skip if the modulation freq is already set */
+			if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
+				continue;
+
+			cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
+			I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
+				   cur_val);
+		}
+	}
+}
+
 static void intel_panel_init_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	dev_priv->backlight.level = intel_panel_get_backlight(dev);
+	intel_panel_init_backlight_regs(dev);
+
+	dev_priv->backlight.level = intel_panel_get_backlight(dev, 0);
 	dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
 }
 
@@ -637,19 +741,34 @@
 	}
 }
 
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 static int intel_panel_update_status(struct backlight_device *bd)
 {
-	struct drm_device *dev = bl_get_data(bd);
-	intel_panel_set_backlight(dev, bd->props.brightness,
+	struct intel_connector *connector = bl_get_data(bd);
+	struct drm_device *dev = connector->base.dev;
+
+	mutex_lock(&dev->mode_config.mutex);
+	DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+		      bd->props.brightness, bd->props.max_brightness);
+	intel_panel_set_backlight(connector, bd->props.brightness,
 				  bd->props.max_brightness);
+	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
 }
 
 static int intel_panel_get_brightness(struct backlight_device *bd)
 {
-	struct drm_device *dev = bl_get_data(bd);
-	return intel_panel_get_backlight(dev);
+	struct intel_connector *connector = bl_get_data(bd);
+	struct drm_device *dev = connector->base.dev;
+	enum pipe pipe;
+
+	mutex_lock(&dev->mode_config.mutex);
+	pipe = intel_get_pipe_from_connector(connector);
+	mutex_unlock(&dev->mode_config.mutex);
+	if (pipe == INVALID_PIPE)
+		return 0;
+
+	return intel_panel_get_backlight(connector->base.dev, pipe);
 }
 
 static const struct backlight_ops intel_panel_bl_ops = {
@@ -674,7 +793,7 @@
 	props.brightness = dev_priv->backlight.level;
 
 	spin_lock_irqsave(&dev_priv->backlight.lock, flags);
-	props.max_brightness = intel_panel_get_max_backlight(dev);
+	props.max_brightness = intel_panel_get_max_backlight(dev, 0);
 	spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 
 	if (props.max_brightness == 0) {
@@ -683,7 +802,8 @@
 	}
 	dev_priv->backlight.device =
 		backlight_device_register("intel_backlight",
-					  &connector->kdev, dev,
+					  connector->kdev,
+					  to_intel_connector(connector),
 					  &intel_panel_bl_ops, &props);
 
 	if (IS_ERR(dev_priv->backlight.device)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 26c2ea3..09ac9e7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -32,6 +32,27 @@
 #include <linux/module.h>
 #include <drm/i915_powerwell.h>
 
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE			(1<<0)
+#define INTEL_RC6p_ENABLE			(1<<1)
+#define INTEL_RC6pp_ENABLE			(1<<2)
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
@@ -43,14 +64,6 @@
  * i915.i915_enable_fbc parameter
  */
 
-static bool intel_crtc_active(struct drm_crtc *crtc)
-{
-	/* Be paranoid as we can arrive here with only partial
-	 * state retrieved from the hardware during setup.
-	 */
-	return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
-}
-
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -241,18 +254,6 @@
 		dpfc_ctl &= ~DPFC_CTL_EN;
 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
-		if (IS_IVYBRIDGE(dev))
-			/* WaFbcDisableDpfcClockGating:ivb */
-			I915_WRITE(ILK_DSPCLK_GATE_D,
-				   I915_READ(ILK_DSPCLK_GATE_D) &
-				   ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
-
-		if (IS_HASWELL(dev))
-			/* WaFbcDisableDpfcClockGating:hsw */
-			I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-				   I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
-				   ~HSW_DPFC_GATING_DISABLE);
-
 		DRM_DEBUG_KMS("disabled FBC\n");
 	}
 }
@@ -282,18 +283,10 @@
 	if (IS_IVYBRIDGE(dev)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
 		I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
-		/* WaFbcDisableDpfcClockGating:ivb */
-		I915_WRITE(ILK_DSPCLK_GATE_D,
-			   I915_READ(ILK_DSPCLK_GATE_D) |
-			   ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
 	} else {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw */
 		I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
 			   HSW_BYPASS_FBC_QUEUE);
-		/* WaFbcDisableDpfcClockGating:hsw */
-		I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
-			   I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
-			   HSW_DPFC_GATING_DISABLE);
 	}
 
 	I915_WRITE(SNB_DPFC_CTL_SA,
@@ -378,7 +371,7 @@
 
 	intel_cancel_fbc_work(dev_priv);
 
-	work = kzalloc(sizeof *work, GFP_KERNEL);
+	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL) {
 		DRM_ERROR("Failed to allocate FBC work structure\n");
 		dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +451,8 @@
 	struct drm_framebuffer *fb;
 	struct intel_framebuffer *intel_fb;
 	struct drm_i915_gem_object *obj;
-	unsigned int max_hdisplay, max_vdisplay;
+	const struct drm_display_mode *adjusted_mode;
+	unsigned int max_width, max_height;
 
 	if (!I915_HAS_FBC(dev)) {
 		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +476,7 @@
 	 */
 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
 		if (intel_crtc_active(tmp_crtc) &&
-		    !to_intel_crtc(tmp_crtc)->primary_disabled) {
+		    to_intel_crtc(tmp_crtc)->primary_enabled) {
 			if (crtc) {
 				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
 					DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +496,7 @@
 	fb = crtc->fb;
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
+	adjusted_mode = &intel_crtc->config.adjusted_mode;
 
 	if (i915_enable_fbc < 0 &&
 	    INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +509,8 @@
 			DRM_DEBUG_KMS("fbc disabled per module param\n");
 		goto out_disable;
 	}
-	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
-	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
 		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
 			DRM_DEBUG_KMS("mode incompatible with compression, "
 				      "disabling\n");
@@ -523,14 +518,14 @@
 	}
 
 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-		max_hdisplay = 4096;
-		max_vdisplay = 2048;
+		max_width = 4096;
+		max_height = 2048;
 	} else {
-		max_hdisplay = 2048;
-		max_vdisplay = 1536;
+		max_width = 2048;
+		max_height = 1536;
 	}
-	if ((crtc->mode.hdisplay > max_hdisplay) ||
-	    (crtc->mode.vdisplay > max_vdisplay)) {
+	if (intel_crtc->config.pipe_src_w > max_width ||
+	    intel_crtc->config.pipe_src_h > max_height) {
 		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
 			DRM_DEBUG_KMS("mode too large for compression, disabling\n");
 		goto out_disable;
@@ -1087,8 +1082,9 @@
 	return enabled;
 }
 
-static void pineview_update_wm(struct drm_device *dev)
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	const struct cxsr_latency *latency;
@@ -1105,8 +1101,12 @@
 
 	crtc = single_enabled_crtc(dev);
 	if (crtc) {
-		int clock = crtc->mode.clock;
+		const struct drm_display_mode *adjusted_mode;
 		int pixel_size = crtc->fb->bits_per_pixel / 8;
+		int clock;
+
+		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+		clock = adjusted_mode->crtc_clock;
 
 		/* Display SR */
 		wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1166,7 @@
 			    int *cursor_wm)
 {
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	int htotal, hdisplay, clock, pixel_size;
 	int line_time_us, line_count;
 	int entries, tlb_miss;
@@ -1177,9 +1178,10 @@
 		return false;
 	}
 
-	htotal = crtc->mode.htotal;
-	hdisplay = crtc->mode.hdisplay;
-	clock = crtc->mode.clock;
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->htotal;
+	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	/* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1252,7 @@
 			     int *display_wm, int *cursor_wm)
 {
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	int hdisplay, htotal, pixel_size, clock;
 	unsigned long line_time_us;
 	int line_count, line_size;
@@ -1262,9 +1265,10 @@
 	}
 
 	crtc = intel_get_crtc_for_plane(dev, plane);
-	hdisplay = crtc->mode.hdisplay;
-	htotal = crtc->mode.htotal;
-	clock = crtc->mode.clock;
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->htotal;
+	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1307,7 @@
 	if (!intel_crtc_active(crtc))
 		return false;
 
-	clock = crtc->mode.clock;	/* VESA DOT Clock */
+	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
 
 	entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1369,9 @@
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
 
-static void valleyview_update_wm(struct drm_device *dev)
+static void valleyview_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	static const int sr_latency_ns = 12000;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1429,9 @@
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void g4x_update_wm(struct drm_device *dev)
+static void g4x_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	static const int sr_latency_ns = 12000;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1482,9 @@
 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i965_update_wm(struct drm_device *dev)
+static void i965_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	int srwm = 1;
@@ -1488,9 +1495,11 @@
 	if (crtc) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 12000;
-		int clock = crtc->mode.clock;
-		int htotal = crtc->mode.htotal;
-		int hdisplay = crtc->mode.hdisplay;
+		const struct drm_display_mode *adjusted_mode =
+			&to_intel_crtc(crtc)->config.adjusted_mode;
+		int clock = adjusted_mode->crtc_clock;
+		int htotal = adjusted_mode->htotal;
+		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 		int pixel_size = crtc->fb->bits_per_pixel / 8;
 		unsigned long line_time_us;
 		int entries;
@@ -1541,8 +1550,9 @@
 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i9xx_update_wm(struct drm_device *dev)
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct intel_watermark_params *wm_info;
 	uint32_t fwater_lo;
@@ -1562,11 +1572,13 @@
 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
 	crtc = intel_get_crtc_for_plane(dev, 0);
 	if (intel_crtc_active(crtc)) {
+		const struct drm_display_mode *adjusted_mode;
 		int cpp = crtc->fb->bits_per_pixel / 8;
 		if (IS_GEN2(dev))
 			cpp = 4;
 
-		planea_wm = intel_calculate_wm(crtc->mode.clock,
+		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 					       wm_info, fifo_size, cpp,
 					       latency_ns);
 		enabled = crtc;
@@ -1576,11 +1588,13 @@
 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
 	crtc = intel_get_crtc_for_plane(dev, 1);
 	if (intel_crtc_active(crtc)) {
+		const struct drm_display_mode *adjusted_mode;
 		int cpp = crtc->fb->bits_per_pixel / 8;
 		if (IS_GEN2(dev))
 			cpp = 4;
 
-		planeb_wm = intel_calculate_wm(crtc->mode.clock,
+		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
 					       wm_info, fifo_size, cpp,
 					       latency_ns);
 		if (enabled == NULL)
@@ -1607,9 +1621,11 @@
 	if (HAS_FW_BLC(dev) && enabled) {
 		/* self-refresh has much higher latency */
 		static const int sr_latency_ns = 6000;
-		int clock = enabled->mode.clock;
-		int htotal = enabled->mode.htotal;
-		int hdisplay = enabled->mode.hdisplay;
+		const struct drm_display_mode *adjusted_mode =
+			&to_intel_crtc(enabled)->config.adjusted_mode;
+		int clock = adjusted_mode->crtc_clock;
+		int htotal = adjusted_mode->htotal;
+		int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 		int pixel_size = enabled->fb->bits_per_pixel / 8;
 		unsigned long line_time_us;
 		int entries;
@@ -1658,10 +1674,12 @@
 	}
 }
 
-static void i830_update_wm(struct drm_device *dev)
+static void i830_update_wm(struct drm_crtc *unused_crtc)
 {
+	struct drm_device *dev = unused_crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	uint32_t fwater_lo;
 	int planea_wm;
 
@@ -1669,7 +1687,9 @@
 	if (crtc == NULL)
 		return;
 
-	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+				       &i830_wm_info,
 				       dev_priv->display.get_fifo_size(dev, 0),
 				       4, latency_ns);
 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1761,7 @@
 				  int *fbc_wm, int *display_wm, int *cursor_wm)
 {
 	struct drm_crtc *crtc;
+	const struct drm_display_mode *adjusted_mode;
 	unsigned long line_time_us;
 	int hdisplay, htotal, pixel_size, clock;
 	int line_count, line_size;
@@ -1753,9 +1774,10 @@
 	}
 
 	crtc = intel_get_crtc_for_plane(dev, plane);
-	hdisplay = crtc->mode.hdisplay;
-	htotal = crtc->mode.htotal;
-	clock = crtc->mode.clock;
+	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+	clock = adjusted_mode->crtc_clock;
+	htotal = adjusted_mode->htotal;
+	hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1807,9 @@
 				   display, cursor);
 }
 
-static void ironlake_update_wm(struct drm_device *dev)
+static void ironlake_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int fbc_wm, plane_wm, cursor_wm;
 	unsigned int enabled;
@@ -1868,8 +1891,9 @@
 	 */
 }
 
-static void sandybridge_update_wm(struct drm_device *dev)
+static void sandybridge_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
 	u32 val;
@@ -1970,8 +1994,9 @@
 		   cursor_wm);
 }
 
-static void ivybridge_update_wm(struct drm_device *dev)
+static void ivybridge_update_wm(struct drm_crtc *crtc)
 {
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int latency = dev_priv->wm.pri_latency[0] * 100;	/* In unit 0.1us */
 	u32 val;
@@ -2098,7 +2123,7 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pixel_rate;
 
-	pixel_rate = intel_crtc->config.adjusted_mode.clock;
+	pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
 
 	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
 	 * adjust the pixel_rate here. */
@@ -2107,8 +2132,8 @@
 		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
 		uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
-		pipe_w = intel_crtc->config.requested_mode.hdisplay;
-		pipe_h = intel_crtc->config.requested_mode.vdisplay;
+		pipe_w = intel_crtc->config.pipe_src_w;
+		pipe_h = intel_crtc->config.pipe_src_h;
 		pfit_w = (pfit_size >> 16) & 0xFFFF;
 		pfit_h = pfit_size & 0xFFFF;
 		if (pipe_w < pfit_w)
@@ -2176,27 +2201,18 @@
 	uint16_t fbc;
 };
 
-struct hsw_wm_values {
-	uint32_t wm_pipe[3];
-	uint32_t wm_lp[3];
-	uint32_t wm_lp_spr[3];
-	uint32_t wm_linetime[3];
-	bool enable_fbc_wm;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
 	unsigned int num_pipes_active;
 	bool sprites_enabled;
 	bool sprites_scaled;
-	bool fbc_wm_enabled;
 };
 
 /*
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t mem_value,
 				   bool is_lp)
 {
@@ -2225,7 +2241,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t mem_value)
 {
 	uint32_t method1, method2;
@@ -2248,7 +2264,7 @@
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t mem_value)
 {
 	if (!params->active || !params->cur.enabled)
@@ -2262,7 +2278,7 @@
 }
 
 /* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
 				   uint32_t pri_val)
 {
 	if (!params->active || !params->pri.enabled)
@@ -2356,11 +2372,11 @@
 	return 15;
 }
 
-static void ilk_wm_max(struct drm_device *dev,
-		       int level,
-		       const struct intel_wm_config *config,
-		       enum intel_ddb_partitioning ddb_partitioning,
-		       struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+				    int level,
+				    const struct intel_wm_config *config,
+				    enum intel_ddb_partitioning ddb_partitioning,
+				    struct hsw_wm_maximums *max)
 {
 	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
 	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2368,9 +2384,9 @@
 	max->fbc = ilk_fbc_wm_max();
 }
 
-static bool ilk_check_wm(int level,
-			 const struct hsw_wm_maximums *max,
-			 struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+				  const struct hsw_wm_maximums *max,
+				  struct intel_wm_level *result)
 {
 	bool ret;
 
@@ -2406,14 +2422,12 @@
 		result->enable = true;
 	}
 
-	DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
 	return ret;
 }
 
 static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
 				 int level,
-				 struct hsw_pipe_wm_parameters *p,
+				 const struct hsw_pipe_wm_parameters *p,
 				 struct intel_wm_level *result)
 {
 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2448,6 @@
 	result->enable = true;
 }
 
-static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
-			      int level, struct hsw_wm_maximums *max,
-			      struct hsw_pipe_wm_parameters *params,
-			      struct intel_wm_level *result)
-{
-	enum pipe pipe;
-	struct intel_wm_level res[3];
-
-	for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
-		ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
-
-	result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
-	result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
-	result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
-	result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
-	result->enable = true;
-
-	return ilk_check_wm(level, max, result);
-}
-
-static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
-				    enum pipe pipe,
-				    struct hsw_pipe_wm_parameters *params)
-{
-	uint32_t pri_val, cur_val, spr_val;
-	/* WM0 latency values stored in 0.1us units */
-	uint16_t pri_latency = dev_priv->wm.pri_latency[0];
-	uint16_t spr_latency = dev_priv->wm.spr_latency[0];
-	uint16_t cur_latency = dev_priv->wm.cur_latency[0];
-
-	pri_val = ilk_compute_pri_wm(params, pri_latency, false);
-	spr_val = ilk_compute_spr_wm(params, spr_latency);
-	cur_val = ilk_compute_cur_wm(params, cur_latency);
-
-	WARN(pri_val > 127,
-	     "Primary WM error, mode not supported for pipe %c\n",
-	     pipe_name(pipe));
-	WARN(spr_val > 127,
-	     "Sprite WM error, mode not supported for pipe %c\n",
-	     pipe_name(pipe));
-	WARN(cur_val > 63,
-	     "Cursor WM error, mode not supported for pipe %c\n",
-	     pipe_name(pipe));
-
-	return (pri_val << WM0_PIPE_PLANE_SHIFT) |
-	       (spr_val << WM0_PIPE_SPRITE_SHIFT) |
-	       cur_val;
-}
-
 static uint32_t
 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
 {
@@ -2554,19 +2519,22 @@
 		wm[3] *= 2;
 }
 
+static int ilk_wm_max_level(const struct drm_device *dev)
+{
+	/* how many WM levels are we expecting */
+	if (IS_HASWELL(dev))
+		return 4;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return 3;
+	else
+		return 2;
+}
+
 static void intel_print_wm_latency(struct drm_device *dev,
 				   const char *name,
 				   const uint16_t wm[5])
 {
-	int level, max_level;
-
-	/* how many WM levels are we expecting */
-	if (IS_HASWELL(dev))
-		max_level = 4;
-	else if (INTEL_INFO(dev)->gen >= 6)
-		max_level = 3;
-	else
-		max_level = 2;
+	int level, max_level = ilk_wm_max_level(dev);
 
 	for (level = 0; level <= max_level; level++) {
 		unsigned int latency = wm[level];
@@ -2606,101 +2574,154 @@
 	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 }
 
-static void hsw_compute_wm_parameters(struct drm_device *dev,
-				      struct hsw_pipe_wm_parameters *params,
-				      struct hsw_wm_maximums *lp_max_1_2,
-				      struct hsw_wm_maximums *lp_max_5_6)
+static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
+				      struct hsw_pipe_wm_parameters *p,
+				      struct intel_wm_config *config)
 {
-	struct drm_crtc *crtc;
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum pipe pipe = intel_crtc->pipe;
 	struct drm_plane *plane;
-	enum pipe pipe;
-	struct intel_wm_config config = {};
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-		struct hsw_pipe_wm_parameters *p;
-
-		pipe = intel_crtc->pipe;
-		p = &params[pipe];
-
-		p->active = intel_crtc_active(crtc);
-		if (!p->active)
-			continue;
-
-		config.num_pipes_active++;
-
+	p->active = intel_crtc_active(crtc);
+	if (p->active) {
 		p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
 		p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
 		p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
 		p->cur.bytes_per_pixel = 4;
-		p->pri.horiz_pixels =
-			intel_crtc->config.requested_mode.hdisplay;
+		p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
 		p->cur.horiz_pixels = 64;
 		/* TODO: for now, assume primary and cursor planes are always enabled. */
 		p->pri.enabled = true;
 		p->cur.enabled = true;
 	}
 
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		config->num_pipes_active += intel_crtc_active(crtc);
+
 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
 		struct intel_plane *intel_plane = to_intel_plane(plane);
-		struct hsw_pipe_wm_parameters *p;
 
-		pipe = intel_plane->pipe;
-		p = &params[pipe];
+		if (intel_plane->pipe == pipe)
+			p->spr = intel_plane->wm;
 
-		p->spr = intel_plane->wm;
+		config->sprites_enabled |= intel_plane->wm.enabled;
+		config->sprites_scaled |= intel_plane->wm.scaled;
+	}
+}
 
-		config.sprites_enabled |= p->spr.enabled;
-		config.sprites_scaled |= p->spr.scaled;
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
+				  const struct hsw_pipe_wm_parameters *params,
+				  struct intel_pipe_wm *pipe_wm)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int level, max_level = ilk_wm_max_level(dev);
+	/* LP0 watermark maximums depend on this pipe alone */
+	struct intel_wm_config config = {
+		.num_pipes_active = 1,
+		.sprites_enabled = params->spr.enabled,
+		.sprites_scaled = params->spr.scaled,
+	};
+	struct hsw_wm_maximums max;
+
+	/* LP0 watermarks always use 1/2 DDB partitioning */
+	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+	for (level = 0; level <= max_level; level++)
+		ilk_compute_wm_level(dev_priv, level, params,
+				     &pipe_wm->wm[level]);
+
+	pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+	/* At least LP0 must be valid */
+	return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
+}
+
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+			       int level,
+			       struct intel_wm_level *ret_wm)
+{
+	const struct intel_crtc *intel_crtc;
+
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+		const struct intel_wm_level *wm =
+			&intel_crtc->wm.active.wm[level];
+
+		if (!wm->enable)
+			return;
+
+		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
 	}
 
-	ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+	ret_wm->enable = true;
+}
 
-	/* 5/6 split only in single pipe config on IVB+ */
-	if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
-		ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
-	else
-		*lp_max_5_6 = *lp_max_1_2;
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+			 const struct hsw_wm_maximums *max,
+			 struct intel_pipe_wm *merged)
+{
+	int level, max_level = ilk_wm_max_level(dev);
+
+	merged->fbc_wm_enabled = true;
+
+	/* merge each WM1+ level */
+	for (level = 1; level <= max_level; level++) {
+		struct intel_wm_level *wm = &merged->wm[level];
+
+		ilk_merge_wm_level(dev, level, wm);
+
+		if (!ilk_validate_wm_level(level, max, wm))
+			break;
+
+		/*
+		 * The spec says it is preferred to disable
+		 * FBC WMs instead of disabling a WM level.
+		 */
+		if (wm->fbc_val > max->fbc) {
+			merged->fbc_wm_enabled = false;
+			wm->fbc_val = 0;
+		}
+	}
+}
+
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
 }
 
 static void hsw_compute_wm_results(struct drm_device *dev,
-				   struct hsw_pipe_wm_parameters *params,
-				   struct hsw_wm_maximums *lp_maximums,
+				   const struct intel_pipe_wm *merged,
+				   enum intel_ddb_partitioning partitioning,
 				   struct hsw_wm_values *results)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
-	struct intel_wm_level lp_results[4] = {};
-	enum pipe pipe;
-	int level, max_level, wm_lp;
+	struct intel_crtc *intel_crtc;
+	int level, wm_lp;
 
-	for (level = 1; level <= 4; level++)
-		if (!hsw_compute_lp_wm(dev_priv, level,
-				       lp_maximums, params,
-				       &lp_results[level - 1]))
-			break;
-	max_level = level - 1;
+	results->enable_fbc_wm = merged->fbc_wm_enabled;
+	results->partitioning = partitioning;
 
-	memset(results, 0, sizeof(*results));
-
-	/* The spec says it is preferred to disable FBC WMs instead of disabling
-	 * a WM level. */
-	results->enable_fbc_wm = true;
-	for (level = 1; level <= max_level; level++) {
-		if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
-			results->enable_fbc_wm = false;
-			lp_results[level - 1].fbc_val = 0;
-		}
-	}
-
+	/* LP1+ register values */
 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
 		const struct intel_wm_level *r;
 
-		level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
-		if (level > max_level)
+		level = ilk_wm_lp_to_level(wm_lp, merged);
+
+		r = &merged->wm[level];
+		if (!r->enable)
 			break;
 
-		r = &lp_results[level - 1];
 		results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
 							  r->fbc_val,
 							  r->pri_val,
@@ -2708,116 +2729,158 @@
 		results->wm_lp_spr[wm_lp - 1] = r->spr_val;
 	}
 
-	for_each_pipe(pipe)
-		results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
-							     &params[pipe]);
+	/* LP0 register values */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+		enum pipe pipe = intel_crtc->pipe;
+		const struct intel_wm_level *r =
+			&intel_crtc->wm.active.wm[0];
 
-	for_each_pipe(pipe) {
-		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-		results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+		if (WARN_ON(!r->enable))
+			continue;
+
+		results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+		results->wm_pipe[pipe] =
+			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+			r->cur_val;
 	}
 }
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
-						  struct hsw_wm_values *r2)
+static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+						  struct intel_pipe_wm *r1,
+						  struct intel_pipe_wm *r2)
 {
-	int i, val_r1 = 0, val_r2 = 0;
+	int level, max_level = ilk_wm_max_level(dev);
+	int level1 = 0, level2 = 0;
 
-	for (i = 0; i < 3; i++) {
-		if (r1->wm_lp[i] & WM3_LP_EN)
-			val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
-		if (r2->wm_lp[i] & WM3_LP_EN)
-			val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+	for (level = 1; level <= max_level; level++) {
+		if (r1->wm[level].enable)
+			level1 = level;
+		if (r2->wm[level].enable)
+			level2 = level;
 	}
 
-	if (val_r1 == val_r2) {
-		if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+	if (level1 == level2) {
+		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
 			return r2;
 		else
 			return r1;
-	} else if (val_r1 > val_r2) {
+	} else if (level1 > level2) {
 		return r1;
 	} else {
 		return r2;
 	}
 }
 
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+					 const struct hsw_wm_values *old,
+					 const struct hsw_wm_values *new)
+{
+	unsigned int dirty = 0;
+	enum pipe pipe;
+	int wm_lp;
+
+	for_each_pipe(pipe) {
+		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+			dirty |= WM_DIRTY_LINETIME(pipe);
+			/* Must disable LP1+ watermarks too */
+			dirty |= WM_DIRTY_LP_ALL;
+		}
+
+		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+			dirty |= WM_DIRTY_PIPE(pipe);
+			/* Must disable LP1+ watermarks too */
+			dirty |= WM_DIRTY_LP_ALL;
+		}
+	}
+
+	if (old->enable_fbc_wm != new->enable_fbc_wm) {
+		dirty |= WM_DIRTY_FBC;
+		/* Must disable LP1+ watermarks too */
+		dirty |= WM_DIRTY_LP_ALL;
+	}
+
+	if (old->partitioning != new->partitioning) {
+		dirty |= WM_DIRTY_DDB;
+		/* Must disable LP1+ watermarks too */
+		dirty |= WM_DIRTY_LP_ALL;
+	}
+
+	/* LP1+ watermarks already deemed dirty, no need to continue */
+	if (dirty & WM_DIRTY_LP_ALL)
+		return dirty;
+
+	/* Find the lowest numbered LP1+ watermark in need of an update... */
+	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+			break;
+	}
+
+	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+	for (; wm_lp <= 3; wm_lp++)
+		dirty |= WM_DIRTY_LP(wm_lp);
+
+	return dirty;
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
  */
 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-				struct hsw_wm_values *results,
-				enum intel_ddb_partitioning partitioning)
+				struct hsw_wm_values *results)
 {
-	struct hsw_wm_values previous;
+	struct hsw_wm_values *previous = &dev_priv->wm.hw;
+	unsigned int dirty;
 	uint32_t val;
-	enum intel_ddb_partitioning prev_partitioning;
-	bool prev_enable_fbc_wm;
 
-	previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
-	previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
-	previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
-	previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
-	previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
-	previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
-	previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-	previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
-	previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
-	previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
-	previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
-	previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
-	prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-				INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
-	prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-
-	if (memcmp(results->wm_pipe, previous.wm_pipe,
-		   sizeof(results->wm_pipe)) == 0 &&
-	    memcmp(results->wm_lp, previous.wm_lp,
-		   sizeof(results->wm_lp)) == 0 &&
-	    memcmp(results->wm_lp_spr, previous.wm_lp_spr,
-		   sizeof(results->wm_lp_spr)) == 0 &&
-	    memcmp(results->wm_linetime, previous.wm_linetime,
-		   sizeof(results->wm_linetime)) == 0 &&
-	    partitioning == prev_partitioning &&
-	    results->enable_fbc_wm == prev_enable_fbc_wm)
+	dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+	if (!dirty)
 		return;
 
-	if (previous.wm_lp[2] != 0)
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
 		I915_WRITE(WM3_LP_ILK, 0);
-	if (previous.wm_lp[1] != 0)
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
 		I915_WRITE(WM2_LP_ILK, 0);
-	if (previous.wm_lp[0] != 0)
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
 		I915_WRITE(WM1_LP_ILK, 0);
 
-	if (previous.wm_pipe[0] != results->wm_pipe[0])
+	if (dirty & WM_DIRTY_PIPE(PIPE_A))
 		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
-	if (previous.wm_pipe[1] != results->wm_pipe[1])
+	if (dirty & WM_DIRTY_PIPE(PIPE_B))
 		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
-	if (previous.wm_pipe[2] != results->wm_pipe[2])
+	if (dirty & WM_DIRTY_PIPE(PIPE_C))
 		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
-	if (previous.wm_linetime[0] != results->wm_linetime[0])
+	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
-	if (previous.wm_linetime[1] != results->wm_linetime[1])
+	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
-	if (previous.wm_linetime[2] != results->wm_linetime[2])
+	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
 		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
-	if (prev_partitioning != partitioning) {
+	if (dirty & WM_DIRTY_DDB) {
 		val = I915_READ(WM_MISC);
-		if (partitioning == INTEL_DDB_PART_1_2)
+		if (results->partitioning == INTEL_DDB_PART_1_2)
 			val &= ~WM_MISC_DATA_PARTITION_5_6;
 		else
 			val |= WM_MISC_DATA_PARTITION_5_6;
 		I915_WRITE(WM_MISC, val);
 	}
 
-	if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+	if (dirty & WM_DIRTY_FBC) {
 		val = I915_READ(DISP_ARB_CTL);
 		if (results->enable_fbc_wm)
 			val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2889,65 @@
 		I915_WRITE(DISP_ARB_CTL, val);
 	}
 
-	if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
 		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-	if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
 		I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
-	if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
 		I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
-	if (results->wm_lp[0] != 0)
+	if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
 		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-	if (results->wm_lp[1] != 0)
+	if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
 		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-	if (results->wm_lp[2] != 0)
+	if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
 		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+	dev_priv->wm.hw = *results;
 }
 
-static void haswell_update_wm(struct drm_device *dev)
+static void haswell_update_wm(struct drm_crtc *crtc)
 {
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
-	struct hsw_pipe_wm_parameters params[3];
-	struct hsw_wm_values results_1_2, results_5_6, *best_results;
+	struct hsw_wm_maximums max;
+	struct hsw_pipe_wm_parameters params = {};
+	struct hsw_wm_values results = {};
 	enum intel_ddb_partitioning partitioning;
+	struct intel_pipe_wm pipe_wm = {};
+	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+	struct intel_wm_config config = {};
 
-	hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
+	hsw_compute_wm_parameters(crtc, &params, &config);
 
-	hsw_compute_wm_results(dev, params,
-			       &lp_max_1_2, &results_1_2);
-	if (lp_max_1_2.pri != lp_max_5_6.pri) {
-		hsw_compute_wm_results(dev, params,
-				       &lp_max_5_6, &results_5_6);
-		best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+	intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+
+	if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+		return;
+
+	intel_crtc->wm.active = pipe_wm;
+
+	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+	ilk_wm_merge(dev, &max, &lp_wm_1_2);
+
+	/* 5/6 split only in single pipe config on IVB+ */
+	if (INTEL_INFO(dev)->gen >= 7 &&
+	    config.num_pipes_active == 1 && config.sprites_enabled) {
+		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+		ilk_wm_merge(dev, &max, &lp_wm_5_6);
+
+		best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
 	} else {
-		best_results = &results_1_2;
+		best_lp_wm = &lp_wm_1_2;
 	}
 
-	partitioning = (best_results == &results_1_2) ?
+	partitioning = (best_lp_wm == &lp_wm_1_2) ?
 		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-	hsw_write_wm_values(dev_priv, best_results, partitioning);
+	hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+	hsw_write_wm_values(dev_priv, &results);
 }
 
 static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2962,7 @@
 	intel_plane->wm.horiz_pixels = sprite_width;
 	intel_plane->wm.bytes_per_pixel = pixel_size;
 
-	haswell_update_wm(plane->dev);
+	haswell_update_wm(crtc);
 }
 
 static bool
@@ -2898,7 +2981,7 @@
 		return false;
 	}
 
-	clock = crtc->mode.clock;
+	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 
 	/* Use the small buffer method to calculate the sprite watermark */
 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3016,7 @@
 	}
 
 	crtc = intel_get_crtc_for_plane(dev, plane);
-	clock = crtc->mode.clock;
+	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 	if (!clock) {
 		*sprite_wm = 0;
 		return false;
@@ -3044,6 +3127,74 @@
 	I915_WRITE(WM3S_LP_IVB, sprite_wm);
 }
 
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct hsw_wm_values *hw = &dev_priv->wm.hw;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_pipe_wm *active = &intel_crtc->wm.active;
+	enum pipe pipe = intel_crtc->pipe;
+	static const unsigned int wm0_pipe_reg[] = {
+		[PIPE_A] = WM0_PIPEA_ILK,
+		[PIPE_B] = WM0_PIPEB_ILK,
+		[PIPE_C] = WM0_PIPEC_IVB,
+	};
+
+	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+	if (intel_crtc_active(crtc)) {
+		u32 tmp = hw->wm_pipe[pipe];
+
+		/*
+		 * For active pipes LP0 watermark is marked as
+		 * enabled, and LP1+ watermaks as disabled since
+		 * we can't really reverse compute them in case
+		 * multiple pipes are active.
+		 */
+		active->wm[0].enable = true;
+		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+		active->linetime = hw->wm_linetime[pipe];
+	} else {
+		int level, max_level = ilk_wm_max_level(dev);
+
+		/*
+		 * For inactive pipes, all watermark levels
+		 * should be marked as enabled but zeroed,
+		 * which is what we'd compute them to.
+		 */
+		for (level = 0; level <= max_level; level++)
+			active->wm[level].enable = true;
+	}
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct hsw_wm_values *hw = &dev_priv->wm.hw;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		ilk_pipe_wm_get_hw_state(crtc);
+
+	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+	hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+	hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+
+	hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+		INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+	hw->enable_fbc_wm =
+		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -3076,12 +3227,12 @@
  * We don't use the sprite, so we can ignore that.  And on Crestline we have
  * to set the non-SR watermarks to 8.
  */
-void intel_update_watermarks(struct drm_device *dev)
+void intel_update_watermarks(struct drm_crtc *crtc)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 
 	if (dev_priv->display.update_wm)
-		dev_priv->display.update_wm(dev);
+		dev_priv->display.update_wm(crtc);
 }
 
 void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3438,98 @@
 	return limits;
 }
 
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+	int new_power;
+
+	new_power = dev_priv->rps.power;
+	switch (dev_priv->rps.power) {
+	case LOW_POWER:
+		if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+			new_power = BETWEEN;
+		break;
+
+	case BETWEEN:
+		if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+			new_power = LOW_POWER;
+		else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+			new_power = HIGH_POWER;
+		break;
+
+	case HIGH_POWER:
+		if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+			new_power = BETWEEN;
+		break;
+	}
+	/* Max/min bins are special */
+	if (val == dev_priv->rps.min_delay)
+		new_power = LOW_POWER;
+	if (val == dev_priv->rps.max_delay)
+		new_power = HIGH_POWER;
+	if (new_power == dev_priv->rps.power)
+		return;
+
+	/* Note the units here are not exactly 1us, but 1280ns. */
+	switch (new_power) {
+	case LOW_POWER:
+		/* Upclock if more than 95% busy over 16ms */
+		I915_WRITE(GEN6_RP_UP_EI, 12500);
+		I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+
+		/* Downclock if less than 85% busy over 32ms */
+		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+
+		I915_WRITE(GEN6_RP_CONTROL,
+			   GEN6_RP_MEDIA_TURBO |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+		break;
+
+	case BETWEEN:
+		/* Upclock if more than 90% busy over 13ms */
+		I915_WRITE(GEN6_RP_UP_EI, 10250);
+		I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+
+		/* Downclock if less than 75% busy over 32ms */
+		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+
+		I915_WRITE(GEN6_RP_CONTROL,
+			   GEN6_RP_MEDIA_TURBO |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+		break;
+
+	case HIGH_POWER:
+		/* Upclock if more than 85% busy over 10ms */
+		I915_WRITE(GEN6_RP_UP_EI, 8000);
+		I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+
+		/* Downclock if less than 60% busy over 32ms */
+		I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+		I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
+
+		I915_WRITE(GEN6_RP_CONTROL,
+			   GEN6_RP_MEDIA_TURBO |
+			   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+			   GEN6_RP_MEDIA_IS_GFX |
+			   GEN6_RP_ENABLE |
+			   GEN6_RP_UP_BUSY_AVG |
+			   GEN6_RP_DOWN_IDLE_AVG);
+		break;
+	}
+
+	dev_priv->rps.power = new_power;
+	dev_priv->rps.last_adj = 0;
+}
+
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3542,8 @@
 	if (val == dev_priv->rps.cur_delay)
 		return;
 
+	gen6_set_rps_thresholds(dev_priv, val);
+
 	if (IS_HASWELL(dev))
 		I915_WRITE(GEN6_RPNSWREQ,
 			   HSW_FREQUENCY(val));
@@ -3320,6 +3565,32 @@
 	trace_intel_gpu_freq_change(val * 50);
 }
 
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (dev_priv->rps.enabled) {
+		if (dev_priv->info->is_valleyview)
+			valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+		else
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+		dev_priv->rps.last_adj = 0;
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
+{
+	mutex_lock(&dev_priv->rps.hw_lock);
+	if (dev_priv->rps.enabled) {
+		if (dev_priv->info->is_valleyview)
+			valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+		else
+			gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+		dev_priv->rps.last_adj = 0;
+	}
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 /*
  * Wait until the previous freq change has completed,
  * or the timeout elapsed, and then update our notion
@@ -3415,6 +3686,20 @@
 	}
 }
 
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+	if (IS_GEN6(dev))
+		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+	if (IS_HASWELL(dev))
+		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+			(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+			(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+			(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
 int intel_enable_rc6(const struct drm_device *dev)
 {
 	/* No RC6 before Ironlake */
@@ -3429,18 +3714,13 @@
 	if (INTEL_INFO(dev)->gen == 5)
 		return 0;
 
-	if (IS_HASWELL(dev)) {
-		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+	if (IS_HASWELL(dev))
 		return INTEL_RC6_ENABLE;
-	}
 
 	/* snb/ivb have more than one rc6 state. */
-	if (INTEL_INFO(dev)->gen == 6) {
-		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+	if (INTEL_INFO(dev)->gen == 6)
 		return INTEL_RC6_ENABLE;
-	}
 
-	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
@@ -3501,7 +3781,10 @@
 
 	/* In units of 50MHz */
 	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
-	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+	dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
+	dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
+	dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
 	dev_priv->rps.cur_delay = 0;
 
 	/* disable the counters and set deterministic thresholds */
@@ -3539,48 +3822,16 @@
 			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
 	}
 
-	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-			(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-			(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-			(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+	intel_print_rc6_info(dev, rc6_mask);
 
 	I915_WRITE(GEN6_RC_CONTROL,
 		   rc6_mask |
 		   GEN6_RC_CTL_EI_MODE(1) |
 		   GEN6_RC_CTL_HW_ENABLE);
 
-	if (IS_HASWELL(dev)) {
-		I915_WRITE(GEN6_RPNSWREQ,
-			   HSW_FREQUENCY(10));
-		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-			   HSW_FREQUENCY(12));
-	} else {
-		I915_WRITE(GEN6_RPNSWREQ,
-			   GEN6_FREQUENCY(10) |
-			   GEN6_OFFSET(0) |
-			   GEN6_AGGRESSIVE_TURBO);
-		I915_WRITE(GEN6_RC_VIDEO_FREQ,
-			   GEN6_FREQUENCY(12));
-	}
-
-	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-		   dev_priv->rps.max_delay << 24 |
-		   dev_priv->rps.min_delay << 16);
-
-	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-	I915_WRITE(GEN6_RP_UP_EI, 66000);
-	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
+	/* Power down if completely idle for over 50ms */
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-	I915_WRITE(GEN6_RP_CONTROL,
-		   GEN6_RP_MEDIA_TURBO |
-		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-		   GEN6_RP_MEDIA_IS_GFX |
-		   GEN6_RP_ENABLE |
-		   GEN6_RP_UP_BUSY_AVG |
-		   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
 	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
 	if (!ret) {
@@ -3596,7 +3847,8 @@
 		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
 	}
 
-	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+	dev_priv->rps.power = HIGH_POWER; /* force a reset */
+	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
 
 	gen6_enable_rps_interrupts(dev);
 
@@ -3624,23 +3876,28 @@
 	unsigned int gpu_freq;
 	unsigned int max_ia_freq, min_ring_freq;
 	int scaling_factor = 180;
+	struct cpufreq_policy *policy;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-	max_ia_freq = cpufreq_quick_get_max(0);
-	/*
-	 * Default to measured freq if none found, PCU will ensure we don't go
-	 * over
-	 */
-	if (!max_ia_freq)
+	policy = cpufreq_cpu_get(0);
+	if (policy) {
+		max_ia_freq = policy->cpuinfo.max_freq;
+		cpufreq_cpu_put(policy);
+	} else {
+		/*
+		 * Default to measured freq if none found, PCU will ensure we
+		 * don't go over
+		 */
 		max_ia_freq = tsc_khz;
+	}
 
 	/* Convert from kHz to MHz */
 	max_ia_freq /= 1000;
 
-	min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
-	/* convert DDR frequency from units of 133.3MHz to bandwidth */
-	min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+	min_ring_freq = I915_READ(DCLK) & 0xf;
+	/* convert DDR frequency from units of 266.6MHz to bandwidth */
+	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
 	/*
 	 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3653,7 +3910,7 @@
 		unsigned int ia_freq = 0, ring_freq = 0;
 
 		if (IS_HASWELL(dev)) {
-			ring_freq = (gpu_freq * 5 + 3) / 4;
+			ring_freq = mult_frac(gpu_freq, 5, 4);
 			ring_freq = max(min_ring_freq, ring_freq);
 			/* leave ia_freq as the default, chosen by cpufreq */
 		} else {
@@ -3709,24 +3966,6 @@
 	return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
 }
 
-static void vlv_rps_timer_work(struct work_struct *work)
-{
-	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-						    rps.vlv_work.work);
-
-	/*
-	 * Timer fired, we must be idle.  Drop to min voltage state.
-	 * Note: we use RPe here since it should match the
-	 * Vmin we were shooting for.  That should give us better
-	 * perf when we come back out of RC6 than if we used the
-	 * min freq available.
-	 */
-	mutex_lock(&dev_priv->rps.hw_lock);
-	if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-		valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
-	mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
 static void valleyview_setup_pctx(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4012,14 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_ring_buffer *ring;
-	u32 gtfifodbg, val;
+	u32 gtfifodbg, val, rc6_mode = 0;
 	int i;
 
 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
 	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
-		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+				 gtfifodbg);
 		I915_WRITE(GTFIFODBG, gtfifodbg);
 	}
 
@@ -3812,9 +4052,16 @@
 	I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
 
 	/* allows RC6 residency counter to work */
-	I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
-	I915_WRITE(GEN6_RC_CONTROL,
-		   GEN7_RC_CTL_TO_MODE);
+	I915_WRITE(VLV_COUNTER_CONTROL,
+		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+				      VLV_MEDIA_RC6_COUNT_EN |
+				      VLV_RENDER_RC6_COUNT_EN));
+	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+		rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+	intel_print_rc6_info(dev, rc6_mode);
+
+	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 	switch ((val >> 6) & 3) {
@@ -3985,6 +4232,8 @@
 
 	I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+	intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4603,13 +4852,12 @@
 	} else if (INTEL_INFO(dev)->gen >= 6) {
 		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
 		cancel_work_sync(&dev_priv->rps.work);
-		if (IS_VALLEYVIEW(dev))
-			cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
 		mutex_lock(&dev_priv->rps.hw_lock);
 		if (IS_VALLEYVIEW(dev))
 			valleyview_disable_rps(dev);
 		else
 			gen6_disable_rps(dev);
+		dev_priv->rps.enabled = false;
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	}
 }
@@ -4629,6 +4877,7 @@
 		gen6_enable_rps(dev);
 		gen6_update_ring_freq(dev);
 	}
+	dev_priv->rps.enabled = true;
 	mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
@@ -4672,7 +4921,7 @@
 		I915_WRITE(DSPCNTR(pipe),
 			   I915_READ(DSPCNTR(pipe)) |
 			   DISPPLANE_TRICKLE_FEED_DISABLE);
-		intel_flush_display_plane(dev_priv, pipe);
+		intel_flush_primary_plane(dev_priv, pipe);
 	}
 }
 
@@ -5255,6 +5504,23 @@
 		lpt_suspend_hw(dev);
 }
 
+static bool is_always_on_power_domain(struct drm_device *dev,
+				      enum intel_display_power_domain domain)
+{
+	unsigned long always_on_domains;
+
+	BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+	if (IS_HASWELL(dev)) {
+		always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+	} else {
+		WARN_ON(1);
+		return true;
+	}
+
+	return BIT(domain) & always_on_domains;
+}
+
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -5268,23 +5534,11 @@
 	if (!HAS_POWER_WELL(dev))
 		return true;
 
-	switch (domain) {
-	case POWER_DOMAIN_PIPE_A:
-	case POWER_DOMAIN_TRANSCODER_EDP:
+	if (is_always_on_power_domain(dev, domain))
 		return true;
-	case POWER_DOMAIN_PIPE_B:
-	case POWER_DOMAIN_PIPE_C:
-	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-	case POWER_DOMAIN_TRANSCODER_A:
-	case POWER_DOMAIN_TRANSCODER_B:
-	case POWER_DOMAIN_TRANSCODER_C:
-		return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+	return I915_READ(HSW_PWR_WELL_DRIVER) ==
 		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-	default:
-		BUG();
-	}
 }
 
 static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5328,83 +5582,136 @@
 			spin_lock_irqsave(&dev->vbl_lock, irqflags);
 			for_each_pipe(p)
 				if (p != PIPE_A)
-					dev->last_vblank[p] = 0;
+					dev->vblank[p].last = 0;
 			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 		}
 	}
 }
 
-static struct i915_power_well *hsw_pwr;
+static void __intel_power_well_get(struct drm_device *dev,
+				   struct i915_power_well *power_well)
+{
+	if (!power_well->count++)
+		__intel_set_power_well(dev, true);
+}
+
+static void __intel_power_well_put(struct drm_device *dev,
+				   struct i915_power_well *power_well)
+{
+	WARN_ON(!power_well->count);
+	if (!--power_well->count && i915_disable_power_well)
+		__intel_set_power_well(dev, false);
+}
+
+void intel_display_power_get(struct drm_device *dev,
+			     enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+
+	if (!HAS_POWER_WELL(dev))
+		return;
+
+	if (is_always_on_power_domain(dev, domain))
+		return;
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+	__intel_power_well_get(dev, &power_domains->power_wells[0]);
+	mutex_unlock(&power_domains->lock);
+}
+
+void intel_display_power_put(struct drm_device *dev,
+			     enum intel_display_power_domain domain)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains;
+
+	if (!HAS_POWER_WELL(dev))
+		return;
+
+	if (is_always_on_power_domain(dev, domain))
+		return;
+
+	power_domains = &dev_priv->power_domains;
+
+	mutex_lock(&power_domains->lock);
+	__intel_power_well_put(dev, &power_domains->power_wells[0]);
+	mutex_unlock(&power_domains->lock);
+}
+
+static struct i915_power_domains *hsw_pwr;
 
 /* Display audio driver power well request */
 void i915_request_power_well(void)
 {
+	struct drm_i915_private *dev_priv;
+
 	if (WARN_ON(!hsw_pwr))
 		return;
 
-	spin_lock_irq(&hsw_pwr->lock);
-	if (!hsw_pwr->count++ &&
-			!hsw_pwr->i915_request)
-		__intel_set_power_well(hsw_pwr->device, true);
-	spin_unlock_irq(&hsw_pwr->lock);
+	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+				power_domains);
+
+	mutex_lock(&hsw_pwr->lock);
+	__intel_power_well_get(dev_priv->dev, &hsw_pwr->power_wells[0]);
+	mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 /* Display audio driver power well release */
 void i915_release_power_well(void)
 {
+	struct drm_i915_private *dev_priv;
+
 	if (WARN_ON(!hsw_pwr))
 		return;
 
-	spin_lock_irq(&hsw_pwr->lock);
-	WARN_ON(!hsw_pwr->count);
-	if (!--hsw_pwr->count &&
-		       !hsw_pwr->i915_request)
-		__intel_set_power_well(hsw_pwr->device, false);
-	spin_unlock_irq(&hsw_pwr->lock);
+	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+				power_domains);
+
+	mutex_lock(&hsw_pwr->lock);
+	__intel_power_well_put(dev_priv->dev, &hsw_pwr->power_wells[0]);
+	mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
-int i915_init_power_well(struct drm_device *dev)
+int intel_power_domains_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
 
-	hsw_pwr = &dev_priv->power_well;
+	mutex_init(&power_domains->lock);
+	hsw_pwr = power_domains;
 
-	hsw_pwr->device = dev;
-	spin_lock_init(&hsw_pwr->lock);
-	hsw_pwr->count = 0;
+	power_well = &power_domains->power_wells[0];
+	power_well->count = 0;
 
 	return 0;
 }
 
-void i915_remove_power_well(struct drm_device *dev)
+void intel_power_domains_remove(struct drm_device *dev)
 {
 	hsw_pwr = NULL;
 }
 
-void intel_set_power_well(struct drm_device *dev, bool enable)
+static void intel_power_domains_resume(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct i915_power_well *power_well = &dev_priv->power_well;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
+	struct i915_power_well *power_well;
 
 	if (!HAS_POWER_WELL(dev))
 		return;
 
-	if (!i915_disable_power_well && !enable)
-		return;
+	mutex_lock(&power_domains->lock);
 
-	spin_lock_irq(&power_well->lock);
-	power_well->i915_request = enable;
+	power_well = &power_domains->power_wells[0];
+	__intel_set_power_well(dev, power_well->count > 0);
 
-	/* only reject "disable" power well request */
-	if (power_well->count && !enable) {
-		spin_unlock_irq(&power_well->lock);
-		return;
-	}
-
-	__intel_set_power_well(dev, enable);
-	spin_unlock_irq(&power_well->lock);
+	mutex_unlock(&power_domains->lock);
 }
 
 /*
@@ -5413,7 +5720,7 @@
  * to be enabled, and it will only be disabled if none of the registers is
  * requesting it to be enabled.
  */
-void intel_init_power_well(struct drm_device *dev)
+void intel_power_domains_init_hw(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -5421,7 +5728,8 @@
 		return;
 
 	/* For now, we need the power well to be always enabled. */
-	intel_set_power_well(dev, true);
+	intel_display_set_init_power(dev, true);
+	intel_power_domains_resume(dev);
 
 	/* We're taking over the BIOS, so clear any requests made by it since
 	 * the driver is in charge now. */
@@ -5686,7 +5994,5 @@
 
 	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
 			  intel_gen6_powersave_work);
-
-	INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 460ee10..2dec134 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -41,6 +41,16 @@
 	return space;
 }
 
+void __intel_ring_advance(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	ring->tail &= ring->size - 1;
+	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+		return;
+	ring->write_tail(ring, ring->tail);
+}
+
 static int
 gen2_render_ring_flush(struct intel_ring_buffer *ring,
 		       u32	invalidate_domains,
@@ -385,8 +395,7 @@
 	int ret = 0;
 	u32 head;
 
-	if (HAS_FORCE_WAKE(dev))
-		gen6_gt_force_wake_get(dev_priv);
+	gen6_gt_force_wake_get(dev_priv);
 
 	if (I915_NEED_GFX_HWS(dev))
 		intel_ring_setup_status_page(ring);
@@ -459,8 +468,7 @@
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
 out:
-	if (HAS_FORCE_WAKE(dev))
-		gen6_gt_force_wake_put(dev_priv);
+	gen6_gt_force_wake_put(dev_priv);
 
 	return ret;
 }
@@ -559,8 +567,8 @@
 	if (INTEL_INFO(dev)->gen >= 6)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (HAS_L3_GPU_CACHE(dev))
-		I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	if (HAS_L3_DPF(dev))
+		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 
 	return ret;
 }
@@ -593,7 +601,7 @@
 #define MBOX_UPDATE_DWORDS 4
 	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 	intel_ring_emit(ring, mmio_offset);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, MI_NOOP);
 }
 
@@ -629,9 +637,9 @@
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 
 	return 0;
 }
@@ -723,7 +731,7 @@
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +750,9 @@
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 
 	return 0;
 }
@@ -963,9 +971,9 @@
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
+	__intel_ring_advance(ring);
 
 	return 0;
 }
@@ -987,10 +995,10 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+		if (HAS_L3_DPF(dev) && ring->id == RCS)
 			I915_WRITE_IMR(ring,
 				       ~(ring->irq_enable_mask |
-					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+					 GT_PARITY_ERROR(dev)));
 		else
 			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1017,8 @@
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring,
-				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+		if (HAS_L3_DPF(dev) && ring->id == RCS)
+			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 		else
 			I915_WRITE_IMR(ring, ~0);
 		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1317,7 +1324,7 @@
 	/* Disable the ring buffer. The ring must be idle at this point */
 	dev_priv = ring->dev->dev_private;
 	ret = intel_ring_idle(ring);
-	if (ret)
+	if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 			  ring->name, ret);
 
@@ -1328,6 +1335,8 @@
 	i915_gem_object_unpin(ring->obj);
 	drm_gem_object_unreference(&ring->obj->base);
 	ring->obj = NULL;
+	ring->preallocated_lazy_request = NULL;
+	ring->outstanding_lazy_seqno = 0;
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -1414,6 +1423,9 @@
 	if (ret != -ENOSPC)
 		return ret;
 
+	/* force the tail write in case we have been skipping them */
+	__intel_ring_advance(ring);
+
 	trace_i915_ring_wait_begin(ring);
 	/* With GEM the hangcheck timer should kick us out of the loop,
 	 * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1487,7 @@
 	int ret;
 
 	/* We need to add any requests required to flush the objects and ring */
-	if (ring->outstanding_lazy_request) {
+	if (ring->outstanding_lazy_seqno) {
 		ret = i915_add_request(ring, NULL);
 		if (ret)
 			return ret;
@@ -1495,10 +1507,20 @@
 static int
 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
 {
-	if (ring->outstanding_lazy_request)
+	if (ring->outstanding_lazy_seqno)
 		return 0;
 
-	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+	if (ring->preallocated_lazy_request == NULL) {
+		struct drm_i915_gem_request *request;
+
+		request = kmalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ring->preallocated_lazy_request = request;
+	}
+
+	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
 static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1567,7 @@
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
-	BUG_ON(ring->outstanding_lazy_request);
+	BUG_ON(ring->outstanding_lazy_seqno);
 
 	if (INTEL_INFO(ring->dev)->gen >= 6) {
 		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1580,6 @@
 	ring->hangcheck.seqno = seqno;
 }
 
-void intel_ring_advance(struct intel_ring_buffer *ring)
-{
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-	ring->tail &= ring->size - 1;
-	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
-		return;
-	ring->write_tail(ring, ring->tail);
-}
-
-
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 				     u32 value)
 {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68b1ca974..71a73f4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 enum intel_ring_hangcheck_action {
+	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
 	HANGCHECK_KICK,
@@ -140,7 +141,8 @@
 	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
-	u32 outstanding_lazy_request;
+	struct drm_i915_gem_request *preallocated_lazy_request;
+	u32 outstanding_lazy_seqno;
 	bool gpu_caches_dirty;
 	bool fbc_dirty;
 
@@ -237,7 +239,12 @@
 	iowrite32(data, ring->virtual_start + ring->tail);
 	ring->tail += 4;
 }
-void intel_ring_advance(struct intel_ring_buffer *ring);
+static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+	ring->tail &= ring->size - 1;
+}
+void __intel_ring_advance(struct intel_ring_buffer *ring);
+
 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@
 
 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
 {
-	BUG_ON(ring->outstanding_lazy_request == 0);
-	return ring->outstanding_lazy_request;
+	BUG_ON(ring->outstanding_lazy_seqno == 0);
+	return ring->outstanding_lazy_seqno;
 }
 
 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 49482fd..a583e8f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@
 		goto log_fail;
 
 	while ((status == SDVO_CMD_STATUS_PENDING ||
-			status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+		status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
 		if (retry < 10)
 			msleep(15);
 		else
@@ -1068,7 +1068,7 @@
 
 static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
 {
-	unsigned dotclock = pipe_config->adjusted_mode.clock;
+	unsigned dotclock = pipe_config->port_clock;
 	struct dpll *clock = &pipe_config->dpll;
 
 	/* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@
 	 */
 	pipe_config->pixel_multiplier =
 		intel_sdvo_get_pixel_multiplier(adjusted_mode);
-	adjusted_mode->clock *= pipe_config->pixel_multiplier;
 
 	if (intel_sdvo->color_range_auto) {
 		/* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@
 	    !intel_sdvo_set_tv_format(intel_sdvo))
 		return;
 
-	/* We have tried to get input timing in mode_fixup, and filled into
-	 * adjusted_mode.
-	 */
 	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-	input_dtd.part1.clock /= crtc->config.pixel_multiplier;
 
 	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
 		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@
 	struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
 	struct intel_sdvo_dtd dtd;
 	int encoder_pixel_multiplier = 0;
+	int dotclock;
 	u32 flags = 0, sdvox;
 	u8 val;
 	bool ret;
@@ -1368,6 +1364,13 @@
 			 >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
 	}
 
+	dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+	pipe_config->adjusted_mode.crtc_clock = dotclock;
+
 	/* Cross check the port pixel multiplier with the sdvo encoder state. */
 	if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
 				 &val, 1)) {
@@ -1770,6 +1773,9 @@
 {
 	struct edid *edid;
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector));
+
 	/* set the bus switch and get the modes */
 	edid = intel_sdvo_get_edid(connector);
 
@@ -1865,6 +1871,9 @@
 	uint32_t reply = 0, format_map = 0;
 	int i;
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector));
+
 	/* Read the list of supported input resolutions for the selected TV
 	 * format.
 	 */
@@ -1899,6 +1908,9 @@
 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 	struct drm_display_mode *newmode;
 
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector));
+
 	/*
 	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
 	 * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@
 			break;
 		}
 	}
-
 }
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@
 				     intel_sdvo_connector->tv_format);
 
 	intel_sdvo_destroy_enhance_property(connector);
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(intel_sdvo_connector);
 }
@@ -2394,7 +2404,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2442,7 +2454,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2467,6 +2481,7 @@
 	return true;
 
 err:
+	drm_sysfs_connector_remove(connector);
 	intel_sdvo_destroy(connector);
 	return false;
 }
@@ -2479,7 +2494,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2510,7 +2527,9 @@
 	struct intel_connector *intel_connector;
 	struct intel_sdvo_connector *intel_sdvo_connector;
 
-	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+	intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
 	if (!intel_sdvo_connector)
 		return false;
 
@@ -2534,6 +2553,7 @@
 	return true;
 
 err:
+	drm_sysfs_connector_remove(connector);
 	intel_sdvo_destroy(connector);
 	return false;
 }
@@ -2605,8 +2625,10 @@
 
 	list_for_each_entry_safe(connector, tmp,
 				 &dev->mode_config.connector_list, head) {
-		if (intel_attached_encoder(connector) == &intel_sdvo->base)
+		if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+			drm_sysfs_connector_remove(connector);
 			intel_sdvo_destroy(connector);
+		}
 	}
 }
 
@@ -2876,7 +2898,7 @@
 	struct intel_encoder *intel_encoder;
 	struct intel_sdvo *intel_sdvo;
 	int i;
-	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+	intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
 	if (!intel_sdvo)
 		return false;
 
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9a0e6c5..9944d81 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -25,7 +25,10 @@
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-/* IOSF sideband */
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
 static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
 			   u32 port, u32 opcode, u32 addr, u32 *val)
 {
@@ -101,19 +104,83 @@
 	return val;
 }
 
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
 {
 	u32 val = 0;
-
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
-			DPIO_OPCODE_REG_READ, reg, &val);
-
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+			PUNIT_OPCODE_REG_READ, reg, &val);
 	return val;
 }
 
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
 {
-	vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val = 0;
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+			PUNIT_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+			PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+static u32 vlv_get_phy_port(enum pipe pipe)
+{
+	u32 port = IOSF_PORT_DPIO;
+
+	WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
+
+	return port;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+	u32 val = 0;
+
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+			DPIO_OPCODE_REG_READ, reg, &val);
+	return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
+{
+	vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
 			DPIO_OPCODE_REG_WRITE, reg, &val);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index ad6ec4b..07b13dc 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -288,7 +288,7 @@
 		dev_priv->sprite_scaling_enabled |= 1 << pipe;
 
 		if (!scaling_was_enabled) {
-			intel_update_watermarks(dev);
+			intel_update_watermarks(crtc);
 			intel_wait_for_vblank(dev, pipe);
 		}
 		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -323,7 +323,7 @@
 
 	/* potentially re-enable LP watermarks */
 	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-		intel_update_watermarks(dev);
+		intel_update_watermarks(crtc);
 }
 
 static void
@@ -349,7 +349,7 @@
 
 	/* potentially re-enable LP watermarks */
 	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-		intel_update_watermarks(dev);
+		intel_update_watermarks(crtc);
 }
 
 static int
@@ -521,13 +521,28 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int reg = DSPCNTR(intel_crtc->plane);
 
-	if (!intel_crtc->primary_disabled)
+	if (intel_crtc->primary_enabled)
 		return;
 
-	intel_crtc->primary_disabled = false;
-	intel_update_fbc(dev);
+	intel_crtc->primary_enabled = true;
 
 	I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+	intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	if (intel_crtc->config.ips_enabled) {
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		hsw_enable_ips(intel_crtc);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -538,13 +553,26 @@
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int reg = DSPCNTR(intel_crtc->plane);
 
-	if (intel_crtc->primary_disabled)
+	if (!intel_crtc->primary_enabled)
 		return;
 
-	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+	intel_crtc->primary_enabled = false;
 
-	intel_crtc->primary_disabled = true;
-	intel_update_fbc(dev);
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->fbc.plane == intel_crtc->plane)
+		intel_disable_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	/*
+	 * FIXME IPS should be fine as long as one plane is
+	 * enabled, but in practice it seems to have problems
+	 * when going from primary only to sprite only and vice
+	 * versa.
+	 */
+	hsw_disable_ips(intel_crtc);
+
+	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+	intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 }
 
 static int
@@ -623,15 +651,12 @@
 		   uint32_t src_w, uint32_t src_h)
 {
 	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj, *old_obj;
-	int pipe = intel_plane->pipe;
-	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-								      pipe);
-	int ret = 0;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct drm_i915_gem_object *old_obj = intel_plane->obj;
+	int ret;
 	bool disable_primary = false;
 	bool visible;
 	int hscale, vscale;
@@ -652,29 +677,23 @@
 		.y2 = crtc_y + crtc_h,
 	};
 	const struct drm_rect clip = {
-		.x2 = crtc->mode.hdisplay,
-		.y2 = crtc->mode.vdisplay,
+		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
 	};
-
-	intel_fb = to_intel_framebuffer(fb);
-	obj = intel_fb->obj;
-
-	old_obj = intel_plane->obj;
-
-	intel_plane->crtc_x = crtc_x;
-	intel_plane->crtc_y = crtc_y;
-	intel_plane->crtc_w = crtc_w;
-	intel_plane->crtc_h = crtc_h;
-	intel_plane->src_x = src_x;
-	intel_plane->src_y = src_y;
-	intel_plane->src_w = src_w;
-	intel_plane->src_h = src_h;
-
-	/* Pipe must be running... */
-	if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
-		DRM_DEBUG_KMS("Pipe disabled\n");
-		return -EINVAL;
-	}
+	const struct {
+		int crtc_x, crtc_y;
+		unsigned int crtc_w, crtc_h;
+		uint32_t src_x, src_y, src_w, src_h;
+	} orig = {
+		.crtc_x = crtc_x,
+		.crtc_y = crtc_y,
+		.crtc_w = crtc_w,
+		.crtc_h = crtc_h,
+		.src_x = src_x,
+		.src_y = src_y,
+		.src_w = src_w,
+		.src_h = src_h,
+	};
 
 	/* Don't modify another pipe's plane */
 	if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@
 	 * we can disable the primary and save power.
 	 */
 	disable_primary = drm_rect_equals(&dst, &clip);
-	WARN_ON(disable_primary && !visible);
+	WARN_ON(disable_primary && !visible && intel_crtc->active);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -820,27 +839,40 @@
 	 * the sprite planes only require 128KiB alignment and 32 PTE padding.
 	 */
 	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-	if (ret)
-		goto out_unlock;
 
+	mutex_unlock(&dev->struct_mutex);
+
+	if (ret)
+		return ret;
+
+	intel_plane->crtc_x = orig.crtc_x;
+	intel_plane->crtc_y = orig.crtc_y;
+	intel_plane->crtc_w = orig.crtc_w;
+	intel_plane->crtc_h = orig.crtc_h;
+	intel_plane->src_x = orig.src_x;
+	intel_plane->src_y = orig.src_y;
+	intel_plane->src_w = orig.src_w;
+	intel_plane->src_h = orig.src_h;
 	intel_plane->obj = obj;
 
-	/*
-	 * Be sure to re-enable the primary before the sprite is no longer
-	 * covering it fully.
-	 */
-	if (!disable_primary)
-		intel_enable_primary(crtc);
+	if (intel_crtc->active) {
+		/*
+		 * Be sure to re-enable the primary before the sprite is no longer
+		 * covering it fully.
+		 */
+		if (!disable_primary)
+			intel_enable_primary(crtc);
 
-	if (visible)
-		intel_plane->update_plane(plane, crtc, fb, obj,
-					  crtc_x, crtc_y, crtc_w, crtc_h,
-					  src_x, src_y, src_w, src_h);
-	else
-		intel_plane->disable_plane(plane, crtc);
+		if (visible)
+			intel_plane->update_plane(plane, crtc, fb, obj,
+						  crtc_x, crtc_y, crtc_w, crtc_h,
+						  src_x, src_y, src_w, src_h);
+		else
+			intel_plane->disable_plane(plane, crtc);
 
-	if (disable_primary)
-		intel_disable_primary(crtc);
+		if (disable_primary)
+			intel_disable_primary(crtc);
+	}
 
 	/* Unpin old obj after new one is active to avoid ugliness */
 	if (old_obj) {
@@ -850,17 +882,15 @@
 		 * wait for vblank to avoid ugliness, we only need to
 		 * do the pin & ref bookkeeping.
 		 */
-		if (old_obj != obj) {
-			mutex_unlock(&dev->struct_mutex);
-			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
-			mutex_lock(&dev->struct_mutex);
-		}
+		if (old_obj != obj && intel_crtc->active)
+			intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+		mutex_lock(&dev->struct_mutex);
 		intel_unpin_fb_obj(old_obj);
+		mutex_unlock(&dev->struct_mutex);
 	}
 
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	return 0;
 }
 
 static int
@@ -868,7 +898,7 @@
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	int ret = 0;
+	struct intel_crtc *intel_crtc;
 
 	if (!plane->fb)
 		return 0;
@@ -876,21 +906,25 @@
 	if (WARN_ON(!plane->crtc))
 		return -EINVAL;
 
-	intel_enable_primary(plane->crtc);
-	intel_plane->disable_plane(plane, plane->crtc);
+	intel_crtc = to_intel_crtc(plane->crtc);
 
-	if (!intel_plane->obj)
-		goto out;
+	if (intel_crtc->active) {
+		intel_enable_primary(plane->crtc);
+		intel_plane->disable_plane(plane, plane->crtc);
+	}
 
-	intel_wait_for_vblank(dev, intel_plane->pipe);
+	if (intel_plane->obj) {
+		if (intel_crtc->active)
+			intel_wait_for_vblank(dev, intel_plane->pipe);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(intel_plane->obj);
-	intel_plane->obj = NULL;
-	mutex_unlock(&dev->struct_mutex);
-out:
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(intel_plane->obj);
+		mutex_unlock(&dev->struct_mutex);
 
-	return ret;
+		intel_plane->obj = NULL;
+	}
+
+	return 0;
 }
 
 static void intel_destroy_plane(struct drm_plane *plane)
@@ -921,7 +955,7 @@
 
 	obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_unlock;
 	}
 
@@ -950,7 +984,7 @@
 
 	obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_unlock;
 	}
 
@@ -1034,7 +1068,7 @@
 	if (INTEL_INFO(dev)->gen < 5)
 		return -ENODEV;
 
-	intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+	intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
 	if (!intel_plane)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index dd6f84b..18c4062 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@
 	if (!tv_mode)
 		return false;
 
-	pipe_config->adjusted_mode.clock = tv_mode->clock;
+	pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
 	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
 	pipe_config->pipe_bpp = 8*3;
 
@@ -1044,7 +1044,7 @@
 		tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
 
 	/* Enable two fixes for the chips that need them. */
-	if (dev->pci_device < 0x2772)
+	if (dev->pdev->device < 0x2772)
 		tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
 
 	I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1094,7 @@
 		unsigned int xsize, ysize;
 		/* Pipe must be off here */
 		I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-		intel_flush_display_plane(dev_priv, intel_crtc->plane);
+		intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
 		/* Wait for vblank for the disable to take effect */
 		if (IS_GEN2(dev))
@@ -1123,7 +1123,7 @@
 
 		I915_WRITE(pipeconf_reg, pipeconf);
 		I915_WRITE(dspcntr_reg, dspcntr);
-		intel_flush_display_plane(dev_priv, intel_crtc->plane);
+		intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 	}
 
 	j = 0;
@@ -1433,7 +1433,6 @@
 static void
 intel_tv_destroy(struct drm_connector *connector)
 {
-	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
 	kfree(connector);
 }
@@ -1518,7 +1517,7 @@
 static int tv_is_present_in_vbt(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct child_device_config *p_child;
+	union child_device_config *p_child;
 	int i, ret;
 
 	if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@
 		/*
 		 * If the device type is not TV, continue.
 		 */
-		if (p_child->device_type != DEVICE_TYPE_INT_TV &&
-			p_child->device_type != DEVICE_TYPE_TV)
+		if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
+			p_child->old.device_type != DEVICE_TYPE_TV)
 			continue;
 		/* Only when the addin_offset is non-zero, it is regarded
 		 * as present.
 		 */
-		if (p_child->addin_offset) {
+		if (p_child->old.addin_offset) {
 			ret = 1;
 			break;
 		}
@@ -1590,12 +1589,12 @@
 	    (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
 		return;
 
-	intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+	intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
 	if (!intel_tv) {
 		return;
 	}
 
-	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
 	if (!intel_connector) {
 		kfree(intel_tv);
 		return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c..f6fae35 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -204,18 +204,291 @@
 	gen6_gt_check_fifodbg(dev_priv);
 }
 
+static void gen6_force_wake_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (--dev_priv->uncore.forcewake_count == 0)
+		dev_priv->uncore.funcs.force_wake_put(dev_priv);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
 void intel_uncore_early_sanitize(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
 		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+	if (IS_HASWELL(dev) &&
+	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+		/* The docs do not explain exactly how the calculation can be
+		 * made. It is somewhat guessable, but for now, it's always
+		 * 128MB.
+		 * NB: We can't write IDICR yet because we do not have gt funcs
+		 * set up */
+		dev_priv->ellc_size = 128;
+		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+	}
 }
 
+static void intel_uncore_forcewake_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		vlv_force_wake_reset(dev_priv);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		__gen6_gt_force_wake_reset(dev_priv);
+		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+			__gen6_gt_force_wake_mt_reset(dev_priv);
+	}
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg_val;
+
+	intel_uncore_forcewake_reset(dev);
+
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	intel_disable_gt_powersave(dev);
+
+	/* Turn off power gate, require especially for the BIOS less system */
+	if (IS_VALLEYVIEW(dev)) {
+
+		mutex_lock(&dev_priv->rps.hw_lock);
+		reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
+
+		if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+			vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
+
+		mutex_unlock(&dev_priv->rps.hw_lock);
+
+	}
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	if (!dev_priv->uncore.funcs.force_wake_get)
+		return;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (dev_priv->uncore.forcewake_count++ == 0)
+		dev_priv->uncore.funcs.force_wake_get(dev_priv);
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	if (!dev_priv->uncore.funcs.force_wake_put)
+		return;
+
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+	if (--dev_priv->uncore.forcewake_count == 0) {
+		dev_priv->uncore.forcewake_count++;
+		mod_delayed_work(dev_priv->wq,
+				 &dev_priv->uncore.force_wake_work,
+				 1);
+	}
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+	 ((reg) < 0x40000 && (reg) != FORCEWAKE)
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+	 * the chip from rc6 before touching it for real. MI_MODE is masked,
+	 * hence harmless to write 0 into. */
+	__raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+			  reg);
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+	if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
+		DRM_ERROR("Unclaimed write to %x\n", reg);
+		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}
+
+#define REG_READ_HEADER(x) \
+	unsigned long irqflags; \
+	u##x val = 0; \
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_READ_FOOTER \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+	return val
+
+#define __gen4_read(x) \
+static u##x \
+gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	REG_READ_HEADER(x); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	REG_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	REG_READ_HEADER(x); \
+	ilk_dummy_write(dev_priv); \
+	val = __raw_i915_read##x(dev_priv, reg); \
+	REG_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+	REG_READ_HEADER(x); \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		if (dev_priv->uncore.forcewake_count == 0) \
+			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+		val = __raw_i915_read##x(dev_priv, reg); \
+		if (dev_priv->uncore.forcewake_count == 0) \
+			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+	} else { \
+		val = __raw_i915_read##x(dev_priv, reg); \
+	} \
+	REG_READ_FOOTER; \
+}
+
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen4_read(8)
+__gen4_read(16)
+__gen4_read(32)
+__gen4_read(64)
+
+#undef __gen6_read
+#undef __gen5_read
+#undef __gen4_read
+#undef REG_READ_FOOTER
+#undef REG_READ_HEADER
+
+#define REG_WRITE_HEADER \
+	unsigned long irqflags; \
+	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define __gen4_write(x) \
+static void \
+gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	REG_WRITE_HEADER; \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	REG_WRITE_HEADER; \
+	ilk_dummy_write(dev_priv); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	u32 __fifo_ret = 0; \
+	REG_WRITE_HEADER; \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+	u32 __fifo_ret = 0; \
+	REG_WRITE_HEADER; \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	hsw_unclaimed_reg_clear(dev_priv, reg); \
+	__raw_i915_write##x(dev_priv, reg, val); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	hsw_unclaimed_reg_check(dev_priv, reg); \
+	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen4_write(8)
+__gen4_write(16)
+__gen4_write(32)
+__gen4_write(64)
+
+#undef __hsw_write
+#undef __gen6_write
+#undef __gen5_write
+#undef __gen4_write
+#undef REG_WRITE_HEADER
+
 void intel_uncore_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
+			  gen6_force_wake_work);
+
 	if (IS_VALLEYVIEW(dev)) {
 		dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
 		dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
@@ -259,146 +532,61 @@
 		dev_priv->uncore.funcs.force_wake_put =
 			__gen6_gt_force_wake_put;
 	}
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		if (IS_HASWELL(dev)) {
+			dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
+			dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
+			dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
+			dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
+		} else {
+			dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
+			dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
+			dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
+			dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
+		}
+		dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+		break;
+	case 5:
+		dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
+		dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
+		dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
+		dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
+		dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
+		break;
+	case 4:
+	case 3:
+	case 2:
+		dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
+		dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
+		dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
+		dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
+		dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
+		dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
+		dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
+		dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
+		break;
+	}
 }
 
-static void intel_uncore_forcewake_reset(struct drm_device *dev)
+void intel_uncore_fini(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (IS_VALLEYVIEW(dev)) {
-		vlv_force_wake_reset(dev_priv);
-	} else if (INTEL_INFO(dev)->gen >= 6) {
-		__gen6_gt_force_wake_reset(dev_priv);
-		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
-			__gen6_gt_force_wake_mt_reset(dev_priv);
-	}
+	flush_delayed_work(&dev_priv->uncore.force_wake_work);
+
+	/* Paranoia: make sure we have disabled everything before we exit. */
+	intel_uncore_sanitize(dev);
 }
 
-void intel_uncore_sanitize(struct drm_device *dev)
-{
-	intel_uncore_forcewake_reset(dev);
-
-	/* BIOS often leaves RC6 enabled, but disable it for hw init */
-	intel_disable_gt_powersave(dev);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-	if (dev_priv->uncore.forcewake_count++ == 0)
-		dev_priv->uncore.funcs.force_wake_get(dev_priv);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-	if (--dev_priv->uncore.forcewake_count == 0)
-		dev_priv->uncore.funcs.force_wake_put(dev_priv);
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
-	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
-	 ((reg) < 0x40000) &&            \
-	 ((reg) != FORCEWAKE))
-
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
-	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
-	 * the chip from rc6 before touching it for real. MI_MODE is masked,
-	 * hence harmless to write 0 into. */
-	__raw_i915_write32(dev_priv, MI_MODE, 0);
-}
-
-static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-{
-	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
-			  reg);
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-	}
-}
-
-static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-{
-	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-	    (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-		DRM_ERROR("Unclaimed write to %x\n", reg);
-		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-	}
-}
-
-#define __i915_read(x) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
-	unsigned long irqflags; \
-	u##x val = 0; \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
-	if (dev_priv->info->gen == 5) \
-		ilk_dummy_write(dev_priv); \
-	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_get(dev_priv); \
-		val = __raw_i915_read##x(dev_priv, reg); \
-		if (dev_priv->uncore.forcewake_count == 0) \
-			dev_priv->uncore.funcs.force_wake_put(dev_priv); \
-	} else { \
-		val = __raw_i915_read##x(dev_priv, reg); \
-	} \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
-	return val; \
-}
-
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
-
-#define __i915_write(x) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
-	unsigned long irqflags; \
-	u32 __fifo_ret = 0; \
-	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
-	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
-	} \
-	if (dev_priv->info->gen == 5) \
-		ilk_dummy_write(dev_priv); \
-	hsw_unclaimed_reg_clear(dev_priv, reg); \
-	__raw_i915_write##x(dev_priv, reg, val); \
-	if (unlikely(__fifo_ret)) { \
-		gen6_gt_check_fifodbg(dev_priv); \
-	} \
-	hsw_unclaimed_reg_check(dev_priv, reg); \
-	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-}
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
-
 static const struct register_whitelist {
 	uint64_t offset;
 	uint32_t size;
@@ -445,36 +633,6 @@
 	return 0;
 }
 
-static int i8xx_do_reset(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (IS_I85X(dev))
-		return -ENODEV;
-
-	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
-	POSTING_READ(D_STATE);
-
-	if (IS_I830(dev) || IS_845G(dev)) {
-		I915_WRITE(DEBUG_RESET_I830,
-			   DEBUG_RESET_DISPLAY |
-			   DEBUG_RESET_RENDER |
-			   DEBUG_RESET_FULL);
-		POSTING_READ(DEBUG_RESET_I830);
-		msleep(1);
-
-		I915_WRITE(DEBUG_RESET_I830, 0);
-		POSTING_READ(DEBUG_RESET_I830);
-	}
-
-	msleep(1);
-
-	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
-	POSTING_READ(D_STATE);
-
-	return 0;
-}
-
 static int i965_reset_complete(struct drm_device *dev)
 {
 	u8 gdrst;
@@ -576,7 +734,6 @@
 	case 6: return gen6_do_reset(dev);
 	case 5: return ironlake_do_reset(dev);
 	case 4: return i965_do_reset(dev);
-	case 2: return i8xx_do_reset(dev);
 	default: return -ENODEV;
 	}
 }
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index cc3166d..087db33 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -406,11 +406,6 @@
 	dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
 	dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
 
-	dev->counters += 3;
-	dev->types[6] = _DRM_STAT_IRQ;
-	dev->types[7] = _DRM_STAT_PRIMARY;
-	dev->types[8] = _DRM_STAT_SECONDARY;
-
 	ret = drm_vblank_init(dev, 1);
 
 	if (ret) {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 598c281..2b0ceb8 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -169,5 +169,5 @@
 	/* Disable *all* interrupts */
 	MGA_WRITE(MGA_IEN, 0);
 
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 }
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index b487cde..3a1c5fb 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -5,6 +5,7 @@
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	help
 	 This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index fcce7b2..f15ea3c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -99,7 +99,6 @@
 	.minor = DRIVER_MINOR,
 	.patchlevel = DRIVER_PATCHLEVEL,
 
-	.gem_init_object = mgag200_gem_init_object,
 	.gem_free_object = mgag200_gem_free_object,
 	.dumb_create = mgag200_dumb_create,
 	.dumb_map_offset = mgag200_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index baaae193..cf11ee6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -260,7 +260,6 @@
 int mgag200_gem_create(struct drm_device *dev,
 		   u32 size, bool iskernel,
 		       struct drm_gem_object **obj);
-int mgag200_gem_init_object(struct drm_gem_object *obj);
 int mgag200_dumb_create(struct drm_file *file,
 			struct drm_device *dev,
 			struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 0f8b861..b1120cb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,12 +310,6 @@
 	return 0;
 }
 
-int mgag200_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-	return 0;
-}
-
 void mgag200_bo_unref(struct mgag200_bo **bo)
 {
 	struct ttm_buffer_object *tbo;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 503a414..ee6ed63 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -765,8 +765,6 @@
 	}
 	mgag200_bo_unreserve(bo);
 
-	DRM_INFO("mga base %llx\n", gpu_addr);
-
 	mga_set_start_address(crtc, (u32)gpu_addr);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a06c19c..f39ab75 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -14,6 +14,7 @@
 config DRM_MSM_FBDEV
 	bool "Enable legacy fbdev support for MSM modesetting driver"
 	depends on DRM_MSM
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ff80f12..7cf787d 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,6 +3,7 @@
 	depends on DRM && PCI
         select FW_LOADER
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_TTM
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index d939a1d..edcf801 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -28,7 +28,9 @@
 nouveau-y += core/subdev/bar/nvc0.o
 nouveau-y += core/subdev/bios/base.o
 nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/boost.o
 nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/cstep.o
 nouveau-y += core/subdev/bios/dcb.o
 nouveau-y += core/subdev/bios/disp.o
 nouveau-y += core/subdev/bios/dp.o
@@ -39,17 +41,26 @@
 nouveau-y += core/subdev/bios/mxm.o
 nouveau-y += core/subdev/bios/perf.o
 nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/rammap.o
+nouveau-y += core/subdev/bios/timing.o
 nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/vmap.o
+nouveau-y += core/subdev/bios/volt.o
 nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/hwsq.o
 nouveau-y += core/subdev/bus/nv04.o
 nouveau-y += core/subdev/bus/nv31.o
 nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nv94.o
 nouveau-y += core/subdev/bus/nvc0.o
+nouveau-y += core/subdev/clock/base.o
 nouveau-y += core/subdev/clock/nv04.o
 nouveau-y += core/subdev/clock/nv40.o
 nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nv84.o
 nouveau-y += core/subdev/clock/nva3.o
 nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/nve0.o
 nouveau-y += core/subdev/clock/pllnv04.o
 nouveau-y += core/subdev/clock/pllnva3.o
 nouveau-y += core/subdev/devinit/base.o
@@ -78,7 +89,12 @@
 nouveau-y += core/subdev/fb/nv49.o
 nouveau-y += core/subdev/fb/nv4e.o
 nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nv84.o
+nouveau-y += core/subdev/fb/nva3.o
+nouveau-y += core/subdev/fb/nvaa.o
+nouveau-y += core/subdev/fb/nvaf.o
 nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/fb/nve0.o
 nouveau-y += core/subdev/fb/ramnv04.o
 nouveau-y += core/subdev/fb/ramnv10.o
 nouveau-y += core/subdev/fb/ramnv1a.o
@@ -89,7 +105,12 @@
 nouveau-y += core/subdev/fb/ramnv49.o
 nouveau-y += core/subdev/fb/ramnv4e.o
 nouveau-y += core/subdev/fb/ramnv50.o
+nouveau-y += core/subdev/fb/ramnva3.o
+nouveau-y += core/subdev/fb/ramnvaa.o
 nouveau-y += core/subdev/fb/ramnvc0.o
+nouveau-y += core/subdev/fb/ramnve0.o
+nouveau-y += core/subdev/fb/sddr3.o
+nouveau-y += core/subdev/fb/gddr5.o
 nouveau-y += core/subdev/gpio/base.o
 nouveau-y += core/subdev/gpio/nv10.o
 nouveau-y += core/subdev/gpio/nv50.o
@@ -113,13 +134,22 @@
 nouveau-y += core/subdev/ltcg/nvc0.o
 nouveau-y += core/subdev/mc/base.o
 nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv40.o
 nouveau-y += core/subdev/mc/nv44.o
 nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv94.o
 nouveau-y += core/subdev/mc/nv98.o
 nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mc/nvc3.o
 nouveau-y += core/subdev/mxm/base.o
 nouveau-y += core/subdev/mxm/mxms.o
 nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/pwr/base.o
+nouveau-y += core/subdev/pwr/memx.o
+nouveau-y += core/subdev/pwr/nva3.o
+nouveau-y += core/subdev/pwr/nvc0.o
+nouveau-y += core/subdev/pwr/nvd0.o
+nouveau-y += core/subdev/pwr/nv108.o
 nouveau-y += core/subdev/therm/base.o
 nouveau-y += core/subdev/therm/fan.o
 nouveau-y += core/subdev/therm/fannil.o
@@ -140,6 +170,9 @@
 nouveau-y += core/subdev/vm/nv44.o
 nouveau-y += core/subdev/vm/nv50.o
 nouveau-y += core/subdev/vm/nvc0.o
+nouveau-y += core/subdev/volt/base.o
+nouveau-y += core/subdev/volt/gpio.o
+nouveau-y += core/subdev/volt/nv40.o
 
 nouveau-y += core/engine/falcon.o
 nouveau-y += core/engine/xtensa.o
@@ -158,6 +191,7 @@
 nouveau-y += core/engine/crypt/nv84.o
 nouveau-y += core/engine/crypt/nv98.o
 nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/ctrl.o
 nouveau-y += core/engine/device/nv04.o
 nouveau-y += core/engine/device/nv10.o
 nouveau-y += core/engine/device/nv20.o
@@ -227,8 +261,18 @@
 nouveau-y += core/engine/graph/nvf0.o
 nouveau-y += core/engine/mpeg/nv31.o
 nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv44.o
 nouveau-y += core/engine/mpeg/nv50.o
 nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/perfmon/base.o
+nouveau-y += core/engine/perfmon/daemon.o
+nouveau-y += core/engine/perfmon/nv40.o
+nouveau-y += core/engine/perfmon/nv50.o
+nouveau-y += core/engine/perfmon/nv84.o
+nouveau-y += core/engine/perfmon/nva3.o
+nouveau-y += core/engine/perfmon/nvc0.o
+nouveau-y += core/engine/perfmon/nve0.o
+nouveau-y += core/engine/perfmon/nvf0.o
 nouveau-y += core/engine/ppp/nv98.o
 nouveau-y += core/engine/ppp/nvc0.o
 nouveau-y += core/engine/software/nv04.o
@@ -260,9 +304,7 @@
 nouveau-y += nv50_display.o
 
 # drm/pm
-nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
-nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
-nouveau-y += nouveau_mem.o
+nouveau-y += nouveau_hwmon.o nouveau_sysfs.o
 
 # other random bits
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index 7eb81c1..3f3c765 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -23,62 +23,114 @@
 #include <core/os.h>
 #include <core/event.h>
 
-static void
-nouveau_event_put_locked(struct nouveau_event *event, int index,
-			 struct nouveau_eventh *handler)
-{
-	if (!--event->index[index].refs) {
-		if (event->disable)
-			event->disable(event, index);
-	}
-	list_del(&handler->head);
-}
-
 void
-nouveau_event_put(struct nouveau_event *event, int index,
-		  struct nouveau_eventh *handler)
+nouveau_event_put(struct nouveau_eventh *handler)
 {
+	struct nouveau_event *event = handler->event;
 	unsigned long flags;
-
-	spin_lock_irqsave(&event->lock, flags);
-	if (index < event->index_nr)
-		nouveau_event_put_locked(event, index, handler);
-	spin_unlock_irqrestore(&event->lock, flags);
-}
-
-void
-nouveau_event_get(struct nouveau_event *event, int index,
-		  struct nouveau_eventh *handler)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&event->lock, flags);
-	if (index < event->index_nr) {
-		list_add(&handler->head, &event->index[index].list);
-		if (!event->index[index].refs++) {
-			if (event->enable)
-				event->enable(event, index);
+	if (__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
+		spin_lock_irqsave(&event->refs_lock, flags);
+		if (!--event->index[handler->index].refs) {
+			if (event->disable)
+				event->disable(event, handler->index);
 		}
+		spin_unlock_irqrestore(&event->refs_lock, flags);
 	}
-	spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_eventh *handler)
+{
+	struct nouveau_event *event = handler->event;
+	unsigned long flags;
+	if (!__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags)) {
+		spin_lock_irqsave(&event->refs_lock, flags);
+		if (!event->index[handler->index].refs++) {
+			if (event->enable)
+				event->enable(event, handler->index);
+		}
+		spin_unlock_irqrestore(&event->refs_lock, flags);
+	}
+}
+
+static void
+nouveau_event_fini(struct nouveau_eventh *handler)
+{
+	struct nouveau_event *event = handler->event;
+	unsigned long flags;
+	nouveau_event_put(handler);
+	spin_lock_irqsave(&event->list_lock, flags);
+	list_del(&handler->head);
+	spin_unlock_irqrestore(&event->list_lock, flags);
+}
+
+static int
+nouveau_event_init(struct nouveau_event *event, int index,
+		   int (*func)(void *, int), void *priv,
+		   struct nouveau_eventh *handler)
+{
+	unsigned long flags;
+
+	if (index >= event->index_nr)
+		return -EINVAL;
+
+	handler->event = event;
+	handler->flags = 0;
+	handler->index = index;
+	handler->func = func;
+	handler->priv = priv;
+
+	spin_lock_irqsave(&event->list_lock, flags);
+	list_add_tail(&handler->head, &event->index[index].list);
+	spin_unlock_irqrestore(&event->list_lock, flags);
+	return 0;
+}
+
+int
+nouveau_event_new(struct nouveau_event *event, int index,
+		  int (*func)(void *, int), void *priv,
+		  struct nouveau_eventh **phandler)
+{
+	struct nouveau_eventh *handler;
+	int ret = -ENOMEM;
+
+	handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
+	if (handler) {
+		ret = nouveau_event_init(event, index, func, priv, handler);
+		if (ret)
+			kfree(handler);
+	}
+
+	return ret;
+}
+
+void
+nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
+{
+	BUG_ON(handler != NULL);
+	if (*ref) {
+		nouveau_event_fini(*ref);
+		kfree(*ref);
+	}
+	*ref = handler;
 }
 
 void
 nouveau_event_trigger(struct nouveau_event *event, int index)
 {
-	struct nouveau_eventh *handler, *temp;
+	struct nouveau_eventh *handler;
 	unsigned long flags;
 
-	if (index >= event->index_nr)
+	if (WARN_ON(index >= event->index_nr))
 		return;
 
-	spin_lock_irqsave(&event->lock, flags);
-	list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
-		if (handler->func(handler, index) == NVKM_EVENT_DROP) {
-			nouveau_event_put_locked(event, index, handler);
-		}
+	spin_lock_irqsave(&event->list_lock, flags);
+	list_for_each_entry(handler, &event->index[index].list, head) {
+		if (test_bit(NVKM_EVENT_ENABLE, &handler->flags) &&
+		    handler->func(handler->priv, index) == NVKM_EVENT_DROP)
+			nouveau_event_put(handler);
 	}
-	spin_unlock_irqrestore(&event->lock, flags);
+	spin_unlock_irqrestore(&event->list_lock, flags);
 }
 
 void
@@ -102,7 +154,8 @@
 	if (!event)
 		return -ENOMEM;
 
-	spin_lock_init(&event->lock);
+	spin_lock_init(&event->list_lock);
+	spin_lock_init(&event->refs_lock);
 	for (i = 0; i < index_nr; i++)
 		INIT_LIST_HEAD(&event->index[i].list);
 	event->index_nr = index_nr;
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
index 62a432e..9f6fcc5 100644
--- a/drivers/gpu/drm/nouveau/core/core/option.c
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -25,15 +25,6 @@
 #include <core/option.h>
 #include <core/debug.h>
 
-/* compares unterminated string 'str' with zero-terminated string 'cmp' */
-static inline int
-strncasecmpz(const char *str, const char *cmp, size_t len)
-{
-	if (strlen(cmp) != len)
-		return len;
-	return strncasecmp(str, cmp, len);
-}
-
 const char *
 nouveau_stropt(const char *optstr, const char *opt, int *arglen)
 {
@@ -105,7 +96,7 @@
 				else if (!strncasecmpz(optstr, "warn", len))
 					level = NV_DBG_WARN;
 				else if (!strncasecmpz(optstr, "info", len))
-					level = NV_DBG_INFO;
+					level = NV_DBG_INFO_NORMAL;
 				else if (!strncasecmpz(optstr, "debug", len))
 					level = NV_DBG_DEBUG;
 				else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 52fb2aa..03e0060 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,16 +27,38 @@
 #include <core/subdev.h>
 #include <core/printk.h>
 
-int nv_printk_suspend_level = NV_DBG_DEBUG;
+int nv_info_debug_level = NV_DBG_INFO_NORMAL;
 
 void
-nv_printk_(struct nouveau_object *object, const char *pfx, int level,
-	   const char *fmt, ...)
+nv_printk_(struct nouveau_object *object, int level, const char *fmt, ...)
 {
 	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+	const char *pfx;
 	char mfmt[256];
 	va_list args;
 
+	switch (level) {
+	case NV_DBG_FATAL:
+		pfx = KERN_CRIT;
+		break;
+	case NV_DBG_ERROR:
+		pfx = KERN_ERR;
+		break;
+	case NV_DBG_WARN:
+		pfx = KERN_WARNING;
+		break;
+	case NV_DBG_INFO_NORMAL:
+		pfx = KERN_INFO;
+		break;
+	case NV_DBG_DEBUG:
+	case NV_DBG_PARANOIA:
+	case NV_DBG_TRACE:
+	case NV_DBG_SPAM:
+	default:
+		pfx = KERN_DEBUG;
+		break;
+	}
+
 	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
 		struct nouveau_object *device = object;
 		struct nouveau_object *subdev = object;
@@ -74,20 +96,3 @@
 	vprintk(mfmt, args);
 	va_end(args);
 }
-
-#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
-
-const char *nv_printk_level_to_pfx(int level)
-{
-	switch (level) {
-	CONV_LEVEL(FATAL);
-	CONV_LEVEL(ERROR);
-	CONV_LEVEL(WARN);
-	CONV_LEVEL(INFO);
-	CONV_LEVEL(DEBUG);
-	CONV_LEVEL(PARANOIA);
-	CONV_LEVEL(TRACE);
-	CONV_LEVEL(SPAM);
-	}
-	return NV_PRINTK_DEBUG;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 4c72571..9135b25a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,7 +29,7 @@
 
 #include <core/class.h>
 
-#include <engine/device.h>
+#include "priv.h"
 
 static DEFINE_MUTEX(nv_devices_mutex);
 static LIST_HEAD(nv_devices);
@@ -75,7 +75,9 @@
 	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_PWR]	= NV_DEVICE_DISABLE_CORE,
 	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_PERFMON]  = NV_DEVICE_DISABLE_CORE,
 	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_DISABLE_FIFO,
 	[NVDEV_ENGINE_SW]	= NV_DEVICE_DISABLE_FIFO,
 	[NVDEV_ENGINE_GR]	= NV_DEVICE_DISABLE_GRAPH,
@@ -87,7 +89,7 @@
 	[NVDEV_ENGINE_PPP]	= NV_DEVICE_DISABLE_PPP,
 	[NVDEV_ENGINE_COPY0]	= NV_DEVICE_DISABLE_COPY0,
 	[NVDEV_ENGINE_COPY1]	= NV_DEVICE_DISABLE_COPY1,
-	[NVDEV_ENGINE_UNK1C1]	= NV_DEVICE_DISABLE_UNK1C1,
+	[NVDEV_ENGINE_VIC]	= NV_DEVICE_DISABLE_VIC,
 	[NVDEV_ENGINE_VENC]	= NV_DEVICE_DISABLE_VENC,
 	[NVDEV_ENGINE_DISP]	= NV_DEVICE_DISABLE_DISP,
 	[NVDEV_SUBDEV_NR]	= 0,
@@ -119,10 +121,12 @@
 			return -ENODEV;
 	}
 
-	ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
+	ret = nouveau_parent_create(parent, nv_object(device), oclass, 0,
+				    nouveau_control_oclass,
 				    (1ULL << NVDEV_ENGINE_DMAOBJ) |
 				    (1ULL << NVDEV_ENGINE_FIFO) |
-				    (1ULL << NVDEV_ENGINE_DISP), &devobj);
+				    (1ULL << NVDEV_ENGINE_DISP) |
+				    (1ULL << NVDEV_ENGINE_PERFMON), &devobj);
 	*pobject = nv_object(devobj);
 	if (ret)
 		return ret;
@@ -158,22 +162,29 @@
 		iounmap(map);
 
 		/* determine chipset and derive architecture from it */
-		if ((boot0 & 0x0f000000) > 0) {
-			device->chipset = (boot0 & 0xff00000) >> 20;
-			switch (device->chipset & 0xf0) {
-			case 0x10: device->card_type = NV_10; break;
-			case 0x20: device->card_type = NV_20; break;
-			case 0x30: device->card_type = NV_30; break;
-			case 0x40:
-			case 0x60: device->card_type = NV_40; break;
-			case 0x50:
-			case 0x80:
-			case 0x90:
-			case 0xa0: device->card_type = NV_50; break;
-			case 0xc0: device->card_type = NV_C0; break;
-			case 0xd0: device->card_type = NV_D0; break;
-			case 0xe0:
-			case 0xf0: device->card_type = NV_E0; break;
+		if ((boot0 & 0x1f000000) > 0) {
+			device->chipset = (boot0 & 0x1ff00000) >> 20;
+			switch (device->chipset & 0x1f0) {
+			case 0x010: {
+				if (0x461 & (1 << (device->chipset & 0xf)))
+					device->card_type = NV_10;
+				else
+					device->card_type = NV_11;
+				break;
+			}
+			case 0x020: device->card_type = NV_20; break;
+			case 0x030: device->card_type = NV_30; break;
+			case 0x040:
+			case 0x060: device->card_type = NV_40; break;
+			case 0x050:
+			case 0x080:
+			case 0x090:
+			case 0x0a0: device->card_type = NV_50; break;
+			case 0x0c0: device->card_type = NV_C0; break;
+			case 0x0d0: device->card_type = NV_D0; break;
+			case 0x0e0:
+			case 0x0f0:
+			case 0x100: device->card_type = NV_E0; break;
 			default:
 				break;
 			}
@@ -188,7 +199,8 @@
 
 		switch (device->card_type) {
 		case NV_04: ret = nv04_identify(device); break;
-		case NV_10: ret = nv10_identify(device); break;
+		case NV_10:
+		case NV_11: ret = nv10_identify(device); break;
 		case NV_20: ret = nv20_identify(device); break;
 		case NV_30: ret = nv30_identify(device); break;
 		case NV_40: ret = nv40_identify(device); break;
@@ -212,7 +224,7 @@
 		nv_info(device, "Family : NV%02X\n", device->card_type);
 
 		/* determine frequency of timing crystal */
-		if ( device->chipset < 0x17 ||
+		if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
 		    (device->chipset >= 0x20 && device->chipset < 0x25))
 			strap &= 0x00000040;
 		else
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
new file mode 100644
index 0000000..4b69bf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+static int
+nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
+				void *data, u32 size)
+{
+	struct nouveau_clock *clk = nouveau_clock(object);
+	struct nv_control_pstate_info *args = data;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	if (clk) {
+		args->count  = clk->state_nr;
+		args->ustate = clk->ustate;
+		args->pstate = clk->pstate;
+	} else {
+		args->count  = 0;
+		args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
+		args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
+				void *data, u32 size)
+{
+	struct nouveau_clock *clk = nouveau_clock(object);
+	struct nv_control_pstate_attr *args = data;
+	struct nouveau_clocks *domain;
+	struct nouveau_pstate *pstate;
+	struct nouveau_cstate *cstate;
+	int i = 0, j = -1;
+	u32 lo, hi;
+
+	if ((size < sizeof(*args)) || !clk ||
+	    (args->state >= 0 && args->state >= clk->state_nr))
+		return -EINVAL;
+	domain = clk->domains;
+
+	while (domain->name != nv_clk_src_max) {
+		if (domain->mname && ++j == args->index)
+			break;
+		domain++;
+	}
+
+	if (domain->name == nv_clk_src_max)
+		return -EINVAL;
+
+	if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
+		list_for_each_entry(pstate, &clk->states, head) {
+			if (i++ == args->state)
+				break;
+		}
+
+		lo = pstate->base.domain[domain->name];
+		hi = lo;
+		list_for_each_entry(cstate, &pstate->list, head) {
+			lo = min(lo, cstate->domain[domain->name]);
+			hi = max(hi, cstate->domain[domain->name]);
+		}
+
+		args->state = pstate->pstate;
+	} else {
+		lo = max(clk->read(clk, domain->name), 0);
+		hi = lo;
+	}
+
+	snprintf(args->name, sizeof(args->name), "%s", domain->mname);
+	snprintf(args->unit, sizeof(args->unit), "MHz");
+	args->min = lo / domain->mdiv;
+	args->max = hi / domain->mdiv;
+
+	args->index = 0;
+	while ((++domain)->name != nv_clk_src_max) {
+		if (domain->mname) {
+			args->index = ++j;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
+				void *data, u32 size)
+{
+	struct nouveau_clock *clk = nouveau_clock(object);
+	struct nv_control_pstate_user *args = data;
+
+	if (size < sizeof(*args) || !clk)
+		return -EINVAL;
+
+	return nouveau_clock_ustate(clk, args->state);
+}
+
+struct nouveau_oclass
+nouveau_control_oclass[] = {
+	{ .handle = NV_CONTROL_CLASS,
+	  .ofuncs = &nouveau_object_ofuncs,
+	  .omthds = (struct nouveau_omthds[]) {
+		  { NV_CONTROL_PSTATE_INFO,
+		    NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
+		  { NV_CONTROL_PSTATE_ATTR,
+		    NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
+		  { NV_CONTROL_PSTATE_USER,
+		    NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
+		  {},
+	  },
+	},
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index a0284cf..dbd2dde 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -50,15 +50,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -68,15 +68,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 1b7809a..6e03dd6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -52,10 +52,10 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -69,15 +69,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -88,15 +88,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -107,15 +107,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -126,15 +126,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -145,15 +145,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -164,15 +164,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -183,15 +183,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 12a4005..dcde53b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -53,15 +53,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -72,15 +72,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -91,15 +91,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -110,15 +110,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index cef0f1e..7b8662e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -53,15 +53,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -72,15 +72,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv35_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
 		break;
@@ -91,15 +91,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
@@ -111,15 +111,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv36_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
@@ -131,15 +131,15 @@
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1719cb0..c8c41e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -35,6 +35,7 @@
 #include <subdev/fb.h>
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -43,6 +44,7 @@
 #include <engine/graph.h>
 #include <engine/mpeg.h>
 #include <engine/disp.h>
+#include <engine/perfmon.h>
 
 int
 nv40_identify(struct nouveau_device *device)
@@ -56,18 +58,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x41:
 		device->cname = "NV41";
@@ -77,18 +81,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x42:
 		device->cname = "NV42";
@@ -98,18 +104,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x43:
 		device->cname = "NV43";
@@ -119,18 +127,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x45:
 		device->cname = "NV45";
@@ -140,18 +150,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x47:
 		device->cname = "G70";
@@ -161,18 +173,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv47_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x49:
 		device->cname = "G71";
@@ -182,18 +196,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4b:
 		device->cname = "G73";
@@ -203,18 +219,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x44:
 		device->cname = "NV44";
@@ -224,18 +242,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x46:
 		device->cname = "G72";
@@ -245,18 +265,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4a:
 		device->cname = "NV44A";
@@ -266,18 +288,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4c:
 		device->cname = "C61";
@@ -287,18 +311,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x4e:
 		device->cname = "C51";
@@ -308,18 +334,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv4e_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x63:
 		device->cname = "C73";
@@ -329,18 +357,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x67:
 		device->cname = "C67";
@@ -350,18 +380,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	case 0x68:
 		device->cname = "C68";
@@ -371,18 +403,20 @@
 		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv40_perfmon_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ffc18b8..db13982 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -36,6 +36,8 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 #include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
 #include <engine/ppp.h>
 #include <engine/copy.h>
 #include <engine/disp.h>
+#include <engine/perfmon.h>
 
 int
 nv50_identify(struct nouveau_device *device)
@@ -59,257 +62,277 @@
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv50_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv50_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv50_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv50_perfmon_oclass;
 		break;
 	case 0x84:
 		device->cname = "G84";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x86:
 		device->cname = "G86";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x92:
 		device->cname = "G92";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x94:
 		device->cname = "G94";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x96:
 		device->cname = "G96";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv94_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0x98:
 		device->cname = "G98";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xa0:
 		device->cname = "G200";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nv84_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xaa:
 		device->cname = "MCP77/MCP78";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xac:
 		device->cname = "MCP79/MCP7A";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] =  nv84_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaa_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nv84_perfmon_oclass;
 		break;
 	case 0xa3:
 		device->cname = "GT215";
@@ -320,16 +343,18 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
@@ -337,6 +362,7 @@
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	case 0xa5:
 		device->cname = "GT216";
@@ -347,22 +373,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	case 0xa8:
 		device->cname = "GT218";
@@ -373,22 +402,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nva3_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	case 0xaf:
 		device->cname = "MCP89";
@@ -399,22 +431,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv94_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvaf_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nva3_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nv98_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nv98_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] =  nva3_perfmon_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 418f51f..606598f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -38,6 +38,8 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 #include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
 #include <engine/ppp.h>
 #include <engine/copy.h>
 #include <engine/disp.h>
+#include <engine/perfmon.h>
 
 int
 nvc0_identify(struct nouveau_device *device)
@@ -63,18 +66,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc0_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -82,6 +87,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc4:
 		device->cname = "GF104";
@@ -92,18 +98,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -111,6 +119,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc3:
 		device->cname = "GF106";
@@ -121,24 +130,27 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xce:
 		device->cname = "GF114";
@@ -149,18 +161,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -168,6 +182,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xcf:
 		device->cname = "GF116";
@@ -178,18 +193,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc3_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -197,6 +214,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc1:
 		device->cname = "GF108";
@@ -207,24 +225,27 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc1_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xc8:
 		device->cname = "GF110";
@@ -235,18 +256,20 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvc0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvc8_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
@@ -254,6 +277,7 @@
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xd9:
 		device->cname = "GF119";
@@ -264,24 +288,27 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvd9_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	case 0xd7:
 		device->cname = "GF117";
@@ -292,24 +319,25 @@
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nvc0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvd7_graph_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
 		break;
 	default:
 		nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 7aca187..3900104 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -38,6 +38,8 @@
 #include <subdev/instmem.h>
 #include <subdev/vm.h>
 #include <subdev/bar.h>
+#include <subdev/pwr.h>
+#include <subdev/volt.h>
 
 #include <engine/device.h>
 #include <engine/dmaobj.h>
@@ -49,6 +51,7 @@
 #include <engine/bsp.h>
 #include <engine/vp.h>
 #include <engine/ppp.h>
+#include <engine/perfmon.h>
 
 int
 nve0_identify(struct nouveau_device *device)
@@ -59,22 +62,24 @@
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nve4_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -83,28 +88,31 @@
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
 		break;
 	case 0xe7:
 		device->cname = "GK107";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nve4_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -113,28 +121,31 @@
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
 		break;
 	case 0xe6:
 		device->cname = "GK106";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nve4_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -143,28 +154,31 @@
 		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
 		break;
 	case 0xf0:
 		device->cname = "GK110";
 		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
 		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
 		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
 		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
 		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
 		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
 		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
 		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
 		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
 		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
 		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
 		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nvd0_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
 		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
 		device->oclass[NVDEV_ENGINE_GR     ] =  nvf0_graph_oclass;
 		device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
 		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
@@ -175,6 +189,43 @@
 		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
 		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
 #endif
+		device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
+		break;
+	case 0x108:
+		device->cname = "GK208";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nve0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] =  nvc3_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] =  nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] =  nve0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_SUBDEV_PWR    ] = &nv108_pwr_oclass;
+		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+#if 0
+		device->oclass[NVDEV_ENGINE_FIFO   ] =  nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] =  nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] =  nvf0_graph_oclass;
+#endif
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
+#if 0
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+#endif
 		break;
 	default:
 		nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/priv.h b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
new file mode 100644
index 0000000..035fd5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/priv.h
@@ -0,0 +1,8 @@
+#ifndef __NVKM_DEVICE_PRIV_H__
+#define __NVKM_DEVICE_PRIV_H__
+
+#include <engine/device.h>
+
+extern struct nouveau_oclass nouveau_control_oclass[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 054d9cf..1bd4c63 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -70,17 +70,10 @@
 	};
 	u32 lnkcmp;
 	u8 sink[2];
+	int ret;
 
 	DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
 
-	/* set desired link configuration on the sink */
-	sink[0] = dp->link_bw / 27000;
-	sink[1] = dp->link_nr;
-	if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
-		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
-
-	nv_wraux(dp->aux, DPCD_LC00, sink, 2);
-
 	/* set desired link configuration on the source */
 	if ((lnkcmp = dp->info.lnkcmp)) {
 		if (dp->version < 0x30) {
@@ -96,10 +89,22 @@
 		nvbios_exec(&init);
 	}
 
-	return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
-				 dp->link_nr, dp->link_bw / 27000,
-				 dp->dpcd[DPCD_RC02] &
-					  DPCD_RC02_ENHANCED_FRAME_CAP);
+	ret = dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+				dp->link_nr, dp->link_bw / 27000,
+				dp->dpcd[DPCD_RC02] &
+					 DPCD_RC02_ENHANCED_FRAME_CAP);
+	if (ret) {
+		ERR("lnk_ctl failed with %d\n", ret);
+		return ret;
+	}
+
+	/* set desired link configuration on the sink */
+	sink[0] = dp->link_bw / 27000;
+	sink[1] = dp->link_nr;
+	if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+	return nv_wraux(dp->aux, DPCD_LC00, sink, 2);
 }
 
 static void
@@ -294,8 +299,17 @@
 
 	ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
 	if (ret) {
+		/* it's possible the display has been unplugged before we
+		 * get here.  we still need to execute the full set of
+		 * vbios scripts, and program the OR at a high enough
+		 * frequency to satisfy the target mode.  failure to do
+		 * so results at best in an UPDATE hanging, and at worst
+		 * with PDISP running away to join the circus.
+		 */
+		dp->dpcd[1] = link_bw[0] / 27000;
+		dp->dpcd[2] = 4;
+		dp->dpcd[3] = 0x00;
 		ERR("failed to read DPCD\n");
-		return ret;
 	}
 
 	/* adjust required bandwidth for 8B/10B coding overhead */
@@ -308,7 +322,7 @@
 	while (*link_bw > (dp->dpcd[1] * 27000))
 		link_bw++;
 
-	while (link_bw[0]) {
+	while ((ret = -EIO) && link_bw[0]) {
 		/* find minimum required lane count at this link rate */
 		dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
 		while ((dp->link_nr >> 1) * link_bw[0] > datarate)
@@ -328,8 +342,10 @@
 			    !dp_link_train_eq(dp))
 				break;
 		} else
-		if (ret >= 1) {
-			/* dp_set_link_config() handled training */
+		if (ret) {
+			/* dp_set_link_config() handled training, or
+			 * we failed to communicate with the sink.
+			 */
 			break;
 		}
 
@@ -339,8 +355,10 @@
 
 	/* finish link training */
 	dp_set_training_pattern(dp, 0);
+	if (ret < 0)
+		ERR("link training failed\n");
 
 	/* execute post-train script from vbios */
 	dp_link_train_fini(dp);
-	return true;
+	return (ret < 0) ? false : true;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 05e903f..a0bc8a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -59,6 +59,7 @@
 	struct nv04_disp_priv *priv = (void *)subdev;
 	u32 crtc0 = nv_rd32(priv, 0x600100);
 	u32 crtc1 = nv_rd32(priv, 0x602100);
+	u32 pvideo;
 
 	if (crtc0 & 0x00000001) {
 		nouveau_event_trigger(priv->base.vblank, 0);
@@ -69,6 +70,14 @@
 		nouveau_event_trigger(priv->base.vblank, 1);
 		nv_wr32(priv, 0x602100, 0x00000001);
 	}
+
+	if (nv_device(priv)->chipset >= 0x10 &&
+	    nv_device(priv)->chipset <= 0x40) {
+		pvideo = nv_rd32(priv, 0x8100);
+		if (pvideo & ~0x11)
+			nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
+		nv_wr32(priv, 0x8100, pvideo);
+	}
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 52dd7a1..378a015 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -541,6 +541,15 @@
 	nv_wr32(priv, 0x6100a0, 0x00000000);
 	nv_wr32(priv, 0x6100b0, 0x00000307);
 
+	/* disable underflow reporting, preventing an intermittent issue
+	 * on some nve4 boards where the production vbios left this
+	 * setting enabled by default.
+	 *
+	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+	 */
+	for (i = 0; i < priv->head.nr; i++)
+		nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 7ec4ee83..eea3ef5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -97,8 +97,9 @@
 {
 	struct nouveau_bios *bios = nouveau_bios(disp);
 	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 shift = nv94_sor_dp_lane_map(priv, lane);
 	const u32 loff = nv94_sor_loff(outp);
-	u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
 	struct nvbios_dpout info;
 	struct nvbios_dpcfg ocfg;
@@ -113,9 +114,12 @@
 	if (!addr)
 		return -EINVAL;
 
-	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
-	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
-	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
+	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
+	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
+	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 9e1d435..d2df572 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -93,8 +93,9 @@
 {
 	struct nouveau_bios *bios = nouveau_bios(disp);
 	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 shift = nvd0_sor_dp_lane_map(priv, lane);
 	const u32 loff = nvd0_sor_loff(outp);
-	u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
 	struct nvbios_dpout info;
 	struct nvbios_dpcfg ocfg;
@@ -109,9 +110,12 @@
 	if (!addr)
 		return -EINVAL;
 
-	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
-	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
-	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nv_rd32(priv, 0x61c130 + loff) & ~(0x0000ff00);
+	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.drv << shift));
+	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pre << shift));
+	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.unk << 8));
 	nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index f877bd5..54f26cc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -632,8 +632,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_fifo_oclass = {
+struct nouveau_oclass *
+nv04_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x04),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2c927c1..571a22a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -159,8 +159,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv10_fifo_oclass = {
+struct nouveau_oclass *
+nv10_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x10),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv10_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index a9cb51d..f257602 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -196,8 +196,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv17_fifo_oclass = {
+struct nouveau_oclass *
+nv17_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x17),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv17_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 5c7433d..343487e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -337,8 +337,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv40_fifo_oclass = {
+struct nouveau_oclass *
+nv40_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x40),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv40_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7e5dff5..5f55578 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -502,8 +502,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_fifo_oclass = {
+struct nouveau_oclass *
+nv50_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x50),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 91a87cd..0908dc8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -144,7 +144,7 @@
 	case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
 	case NVDEV_ENGINE_VP    : context |= 0x00400000; break;
 	case NVDEV_ENGINE_CRYPT :
-	case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+	case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
 	case NVDEV_ENGINE_BSP   : context |= 0x00600000; break;
 	default:
 		return -EINVAL;
@@ -180,7 +180,7 @@
 					  (1ULL << NVDEV_ENGINE_BSP) |
 					  (1ULL << NVDEV_ENGINE_PPP) |
 					  (1ULL << NVDEV_ENGINE_COPY0) |
-					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+					  (1ULL << NVDEV_ENGINE_VIC), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -243,7 +243,7 @@
 					  (1ULL << NVDEV_ENGINE_BSP) |
 					  (1ULL << NVDEV_ENGINE_PPP) |
 					  (1ULL << NVDEV_ENGINE_COPY0) |
-					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+					  (1ULL << NVDEV_ENGINE_VIC), &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
@@ -435,8 +435,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv84_fifo_oclass = {
+struct nouveau_oclass *
+nv84_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0x84),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv84_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ce92f28..e21453a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -720,8 +720,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nvc0_fifo_oclass = {
+struct nouveau_oclass *
+nvc0_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0xc0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 8e8121a..fcd449e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -675,8 +675,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nve0_fifo_oclass = {
+struct nouveau_oclass *
+nve0_fifo_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(FIFO, 0xe0),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nve0_fifo_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index e5be3ee..71b4283 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -587,6 +587,7 @@
 	{ 0x405870,   4, 0x04, 0x00000001 },
 	{ 0x405a00,   2, 0x04, 0x00000000 },
 	{ 0x405a18,   1, 0x04, 0x00000000 },
+	{}
 };
 
 static struct nvc0_graph_init
@@ -598,6 +599,7 @@
 	{ 0x408904,   1, 0x04, 0x62000001 },
 	{ 0x408908,   1, 0x04, 0x00c80929 },
 	{ 0x408980,   1, 0x04, 0x0000011d },
+	{}
 };
 
 static struct nvc0_graph_init
@@ -671,6 +673,7 @@
 	{ 0x419000,   1, 0x04, 0x00000780 },
 	{ 0x419004,   2, 0x04, 0x00000000 },
 	{ 0x419014,   1, 0x04, 0x00000004 },
+	{}
 };
 
 static struct nvc0_graph_init
@@ -717,6 +720,7 @@
 	{ 0x419e98,   1, 0x04, 0x00000000 },
 	{ 0x419ee0,   1, 0x04, 0x00011110 },
 	{ 0x419f30,  11, 0x04, 0x00000000 },
+	{}
 };
 
 void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 438e784..c4740d5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -258,6 +258,7 @@
 	nvc0_grctx_init_unk78xx,
 	nvc0_grctx_init_unk80xx,
 	nvd9_grctx_init_rop,
+	NULL
 };
 
 struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index 818a475..a1102cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -466,6 +466,7 @@
 	nvc0_grctx_init_unk78xx,
 	nvc0_grctx_init_unk80xx,
 	nvd9_grctx_init_rop,
+	NULL
 };
 
 struct nvc0_graph_init *
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 23c143a..4532f7e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -945,7 +945,8 @@
 	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
 		nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
 
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
 			nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
 	}
@@ -970,7 +971,8 @@
 	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
 		chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
 
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
 			chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
 	}
@@ -1052,7 +1054,8 @@
 	NV_WRITE_CTX(0x00400e14, 0x00001000);
 	NV_WRITE_CTX(0x00400e30, 0x00080008);
 	NV_WRITE_CTX(0x00400e34, 0x00080008);
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		/* is it really needed ??? */
 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
 					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
@@ -1231,7 +1234,7 @@
 		nv_engine(priv)->sclass = nv10_graph_sclass;
 	else
 	if (nv_device(priv)->chipset <  0x17 ||
-	    nv_device(priv)->chipset == 0x1a)
+	    nv_device(priv)->card_type < NV_11)
 		nv_engine(priv)->sclass = nv15_graph_sclass;
 	else
 		nv_engine(priv)->sclass = nv17_graph_sclass;
@@ -1270,7 +1273,8 @@
 	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
 	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
 
-	if (nv_device(priv)->chipset >= 0x17) {
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17) {
 		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
 		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
 		nv_wr32(priv, 0x400838, 0x002f8684);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 3f4f35c..434bb4b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1138,7 +1138,7 @@
 	if (ret)
 		return ret;
 
-	nv_subdev(priv)->unit = 0x18001000;
+	nv_subdev(priv)->unit = 0x08001000;
 	nv_subdev(priv)->intr = nvc0_graph_intr;
 
 	priv->base.units = nvc0_graph_units;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index c190043..7eb6d94c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -34,16 +34,7 @@
 
 #include <engine/fifo.h>
 #include <engine/mpeg.h>
-#include <engine/graph/nv40.h>
-
-struct nv31_mpeg_priv {
-	struct nouveau_mpeg base;
-	atomic_t refcount;
-};
-
-struct nv31_mpeg_chan {
-	struct nouveau_object base;
-};
+#include <engine/mpeg/nv31.h>
 
 /*******************************************************************************
  * MPEG object classes
@@ -89,18 +80,18 @@
 
 	if (mthd == 0x0190) {
 		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
 		nv_wr32(priv, 0x00b334, base);
 		nv_wr32(priv, 0x00b324, size);
 	} else
 	if (mthd == 0x01a0) {
 		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
 		nv_wr32(priv, 0x00b360, base);
 		nv_wr32(priv, 0x00b364, size);
 	} else {
 		/* DMA_IMAGE, VRAM only */
-		if (dma0 & 0x000c0000)
+		if (dma0 & 0x00030000)
 			return -EINVAL;
 
 		nv_wr32(priv, 0x00b370, base);
@@ -110,7 +101,7 @@
 	return 0;
 }
 
-static struct nouveau_ofuncs
+struct nouveau_ofuncs
 nv31_mpeg_ofuncs = {
 	.ctor = nv31_mpeg_object_ctor,
 	.dtor = _nouveau_gpuobj_dtor,
@@ -146,16 +137,23 @@
 {
 	struct nv31_mpeg_priv *priv = (void *)engine;
 	struct nv31_mpeg_chan *chan;
+	unsigned long flags;
 	int ret;
 
-	if (!atomic_add_unless(&priv->refcount, 1, 1))
-		return -EBUSY;
-
 	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
 
+	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+	if (priv->chan) {
+		spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
+		nouveau_object_destroy(&chan->base);
+		*pobject = NULL;
+		return -EBUSY;
+	}
+	priv->chan = chan;
+	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
 	return 0;
 }
 
@@ -164,11 +162,15 @@
 {
 	struct nv31_mpeg_priv *priv = (void *)object->engine;
 	struct nv31_mpeg_chan *chan = (void *)object;
-	atomic_dec(&priv->refcount);
+	unsigned long flags;
+
+	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+	priv->chan = NULL;
+	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
 	nouveau_object_destroy(&chan->base);
 }
 
-static struct nouveau_oclass
+struct nouveau_oclass
 nv31_mpeg_cclass = {
 	.handle = NV_ENGCTX(MPEG, 0x31),
 	.ofuncs = &(struct nouveau_ofuncs) {
@@ -197,21 +199,19 @@
 void
 nv31_mpeg_intr(struct nouveau_subdev *subdev)
 {
-	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
-	struct nouveau_engine *engine = nv_engine(subdev);
-	struct nouveau_object *engctx;
-	struct nouveau_handle *handle;
 	struct nv31_mpeg_priv *priv = (void *)subdev;
-	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_handle *handle;
+	struct nouveau_object *engctx;
 	u32 stat = nv_rd32(priv, 0x00b100);
 	u32 type = nv_rd32(priv, 0x00b230);
 	u32 mthd = nv_rd32(priv, 0x00b234);
 	u32 data = nv_rd32(priv, 0x00b238);
 	u32 show = stat;
-	int chid;
+	unsigned long flags;
 
-	engctx = nouveau_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
+	engctx = nv_object(priv->chan);
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
@@ -220,7 +220,7 @@
 			show &= ~0x01000000;
 		}
 
-		if (type == 0x00000010) {
+		if (type == 0x00000010 && engctx) {
 			handle = nouveau_handle_get_class(engctx, 0x3174);
 			if (handle && !nv_call(handle->object, mthd, data))
 				show &= ~0x01000000;
@@ -232,13 +232,12 @@
 	nv_wr32(priv, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv,
-			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 chid, inst << 4, nouveau_client_name(engctx), stat,
-			 type, mthd, data);
+		nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 pfifo->chid(pfifo, engctx),
+			 nouveau_client_name(engctx), stat, type, mthd, data);
 	}
 
-	nouveau_engctx_put(engctx);
+	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
 }
 
 static int
@@ -284,10 +283,7 @@
 	/* PMPEG init */
 	nv_wr32(priv, 0x00b32c, 0x00000000);
 	nv_wr32(priv, 0x00b314, 0x00000100);
-	if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
-		nv_wr32(priv, 0x00b220, 0x00000044);
-	else
-		nv_wr32(priv, 0x00b220, 0x00000031);
+	nv_wr32(priv, 0x00b220, 0x00000031);
 	nv_wr32(priv, 0x00b300, 0x02001ec1);
 	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
new file mode 100644
index 0000000..d08629d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.h
@@ -0,0 +1,15 @@
+#ifndef __NV31_MPEG_H__
+#define __NV31_MPEG_H__
+
+#include <engine/mpeg.h>
+
+struct nv31_mpeg_chan {
+	struct nouveau_object base;
+};
+
+struct nv31_mpeg_priv {
+	struct nouveau_mpeg base;
+	struct nv31_mpeg_chan *chan;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index dd61960..d4e7ec0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -31,66 +31,63 @@
 #include <subdev/instmem.h>
 
 #include <engine/mpeg.h>
-#include <engine/graph/nv40.h>
-
-struct nv40_mpeg_priv {
-	struct nouveau_mpeg base;
-};
-
-struct nv40_mpeg_chan {
-	struct nouveau_mpeg_chan base;
-};
+#include <engine/mpeg/nv31.h>
 
 /*******************************************************************************
- * PMPEG context
+ * MPEG object classes
  ******************************************************************************/
 
 static int
-nv40_mpeg_context_ctor(struct nouveau_object *parent,
-		       struct nouveau_object *engine,
-		       struct nouveau_oclass *oclass, void *data, u32 size,
-		       struct nouveau_object **pobject)
+nv40_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
 {
-	struct nv40_mpeg_chan *chan;
-	int ret;
+	struct nouveau_instmem *imem = nouveau_instmem(object);
+	struct nv31_mpeg_priv *priv = (void *)object->engine;
+	u32 inst = *(u32 *)arg << 4;
+	u32 dma0 = nv_ro32(imem, inst + 0);
+	u32 dma1 = nv_ro32(imem, inst + 4);
+	u32 dma2 = nv_ro32(imem, inst + 8);
+	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+	u32 size = dma1 + 1;
 
-	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
-					  264 * 4, 16,
-					  NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	/* only allow linear DMA objects */
+	if (!(dma0 & 0x00002000))
+		return -EINVAL;
 
-	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
+	if (mthd == 0x0190) {
+		/* DMA_CMD */
+		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nv_wr32(priv, 0x00b334, base);
+		nv_wr32(priv, 0x00b324, size);
+	} else
+	if (mthd == 0x01a0) {
+		/* DMA_DATA */
+		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nv_wr32(priv, 0x00b360, base);
+		nv_wr32(priv, 0x00b364, size);
+	} else {
+		/* DMA_IMAGE, VRAM only */
+		if (dma0 & 0x00030000)
+			return -EINVAL;
+
+		nv_wr32(priv, 0x00b370, base);
+		nv_wr32(priv, 0x00b374, size);
+	}
+
 	return 0;
 }
 
-static int
-nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
-{
+static struct nouveau_omthds
+nv40_mpeg_omthds[] = {
+	{ 0x0190, 0x0190, nv40_mpeg_mthd_dma },
+	{ 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
+	{ 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
+	{}
+};
 
-	struct nv40_mpeg_priv *priv = (void *)object->engine;
-	struct nv40_mpeg_chan *chan = (void *)object;
-	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
-
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x00b318) == inst)
-		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-	return 0;
-}
-
-static struct nouveau_oclass
-nv40_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x40),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv40_mpeg_context_ctor,
-		.dtor = _nouveau_mpeg_context_dtor,
-		.init = _nouveau_mpeg_context_init,
-		.fini = nv40_mpeg_context_fini,
-		.rd32 = _nouveau_mpeg_context_rd32,
-		.wr32 = _nouveau_mpeg_context_wr32,
-	},
+struct nouveau_oclass
+nv40_mpeg_sclass[] = {
+	{ 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
+	{}
 };
 
 /*******************************************************************************
@@ -100,7 +97,7 @@
 static void
 nv40_mpeg_intr(struct nouveau_subdev *subdev)
 {
-	struct nv40_mpeg_priv *priv = (void *)subdev;
+	struct nv31_mpeg_priv *priv = (void *)subdev;
 	u32 stat;
 
 	if ((stat = nv_rd32(priv, 0x00b100)))
@@ -117,7 +114,7 @@
 	       struct nouveau_oclass *oclass, void *data, u32 size,
 	       struct nouveau_object **pobject)
 {
-	struct nv40_mpeg_priv *priv;
+	struct nv31_mpeg_priv *priv;
 	int ret;
 
 	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
@@ -127,8 +124,8 @@
 
 	nv_subdev(priv)->unit = 0x00000002;
 	nv_subdev(priv)->intr = nv40_mpeg_intr;
-	nv_engine(priv)->cclass = &nv40_mpeg_cclass;
-	nv_engine(priv)->sclass = nv31_mpeg_sclass;
+	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
+	nv_engine(priv)->sclass = nv40_mpeg_sclass;
 	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
new file mode 100644
index 0000000..3d8c2133
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/fifo.h>
+#include <engine/mpeg.h>
+
+struct nv44_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv44_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv44_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv44_mpeg_chan *chan;
+	int ret;
+
+	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
+					  264 * 4, 16,
+					  NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
+	return 0;
+}
+
+static int
+nv44_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+{
+
+	struct nv44_mpeg_priv *priv = (void *)object->engine;
+	struct nv44_mpeg_chan *chan = (void *)object;
+	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
+	if (nv_rd32(priv, 0x00b318) == inst)
+		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv44_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = nv44_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv44_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nv44_mpeg_priv *priv = (void *)subdev;
+	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+	u32 stat = nv_rd32(priv, 0x00b100);
+	u32 type = nv_rd32(priv, 0x00b230);
+	u32 mthd = nv_rd32(priv, 0x00b234);
+	u32 data = nv_rd32(priv, 0x00b238);
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x01000000) {
+		/* happens on initial binding of the object */
+		if (type == 0x00000020 && mthd == 0x0000) {
+			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			show &= ~0x01000000;
+		}
+
+		if (type == 0x00000010) {
+			handle = nouveau_handle_get_class(engctx, 0x3174);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~0x01000000;
+			nouveau_handle_put(handle);
+		}
+	}
+
+	nv_wr32(priv, 0x00b100, stat);
+	nv_wr32(priv, 0x00b230, 0x00000001);
+
+	if (show) {
+		nv_error(priv,
+			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 chid, inst << 4, nouveau_client_name(engctx), stat,
+			 type, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static void
+nv44_mpeg_me_intr(struct nouveau_subdev *subdev)
+{
+	struct nv44_mpeg_priv *priv = (void *)subdev;
+	u32 stat;
+
+	if ((stat = nv_rd32(priv, 0x00b100)))
+		nv44_mpeg_intr(subdev);
+
+	if ((stat = nv_rd32(priv, 0x00b800))) {
+		nv_error(priv, "PMSRCH 0x%08x\n", stat);
+		nv_wr32(priv, 0x00b800, stat);
+	}
+}
+
+static int
+nv44_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv44_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv44_mpeg_me_intr;
+	nv_engine(priv)->cclass = &nv44_mpeg_cclass;
+	nv_engine(priv)->sclass = nv40_mpeg_sclass;
+	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv44_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv31_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
new file mode 100644
index 0000000..e9c5e51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+#include <core/class.h>
+
+#include <subdev/clock.h>
+
+#include "priv.h"
+
+#define QUAD_MASK 0x0f
+#define QUAD_FREE 0x01
+
+static struct nouveau_perfsig *
+nouveau_perfsig_find_(struct nouveau_perfdom *dom, const char *name, u32 size)
+{
+	char path[64];
+	int i;
+
+	if (name[0] != '/') {
+		for (i = 0; i < dom->signal_nr; i++) {
+			if ( dom->signal[i].name &&
+			    !strncmp(name, dom->signal[i].name, size))
+				return &dom->signal[i];
+		}
+	} else {
+		for (i = 0; i < dom->signal_nr; i++) {
+			snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
+			if (!strncmp(name, path, size))
+				return &dom->signal[i];
+		}
+	}
+
+	return NULL;
+}
+
+struct nouveau_perfsig *
+nouveau_perfsig_find(struct nouveau_perfmon *ppm, const char *name, u32 size,
+		     struct nouveau_perfdom **pdom)
+{
+	struct nouveau_perfdom *dom = *pdom;
+	struct nouveau_perfsig *sig;
+
+	if (dom == NULL) {
+		list_for_each_entry(dom, &ppm->domains, head) {
+			sig = nouveau_perfsig_find_(dom, name, size);
+			if (sig) {
+				*pdom = dom;
+				return sig;
+			}
+		}
+
+		return NULL;
+	}
+
+	return nouveau_perfsig_find_(dom, name, size);
+}
+
+struct nouveau_perfctr *
+nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
+		     struct nouveau_perfdom **pdom)
+{
+	struct nouveau_perfsig *sig;
+	struct nouveau_perfctr *ctr;
+
+	sig = nouveau_perfsig_find(ppm, name, strlen(name), pdom);
+	if (!sig)
+		return NULL;
+
+	ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+	if (ctr) {
+		ctr->signal[0] = sig;
+		ctr->logic_op = 0xaaaa;
+	}
+
+	return ctr;
+}
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+static int
+nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
+		      void *data, u32 size)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nouveau_perfmon *ppm = (void *)object->engine;
+	struct nouveau_perfdom *dom = NULL, *chk;
+	struct nv_perfctr_query *args = data;
+	const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
+	const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
+	const char *name;
+	int tmp = 0, di, si;
+	char path[64];
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	di = (args->iter & 0xff000000) >> 24;
+	si = (args->iter & 0x00ffffff) - 1;
+
+	list_for_each_entry(chk, &ppm->domains, head) {
+		if (tmp++ == di) {
+			dom = chk;
+			break;
+		}
+	}
+
+	if (dom == NULL || si >= (int)dom->signal_nr)
+		return -EINVAL;
+
+	if (si >= 0) {
+		if (raw || !(name = dom->signal[si].name)) {
+			snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
+			name = path;
+		}
+
+		if (args->name)
+			strncpy(args->name, name, args->size);
+		args->size = strlen(name) + 1;
+	}
+
+	do {
+		while (++si < dom->signal_nr) {
+			if (all || dom->signal[si].name) {
+				args->iter = (di << 24) | ++si;
+				return 0;
+			}
+		}
+		si = -1;
+		di = di + 1;
+		dom = list_entry(dom->head.next, typeof(*dom), head);
+	} while (&dom->head != &ppm->domains);
+
+	args->iter = 0xffffffff;
+	return 0;
+}
+
+static int
+nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
+		       void *data, u32 size)
+{
+	struct nouveau_perfmon *ppm = (void *)object->engine;
+	struct nouveau_perfctr *ctr, *tmp;
+	struct nouveau_perfdom *dom;
+	struct nv_perfctr_sample *args = data;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+	ppm->sequence++;
+
+	list_for_each_entry(dom, &ppm->domains, head) {
+		/* sample previous batch of counters */
+		if (dom->quad != QUAD_MASK) {
+			dom->func->next(ppm, dom);
+			tmp = NULL;
+			while (!list_empty(&dom->list)) {
+				ctr = list_first_entry(&dom->list,
+							typeof(*ctr), head);
+				if (ctr->slot < 0) break;
+				if ( tmp && tmp == ctr) break;
+				if (!tmp) tmp = ctr;
+				dom->func->read(ppm, dom, ctr);
+				ctr->slot  = -1;
+				list_move_tail(&ctr->head, &dom->list);
+			}
+		}
+
+		dom->quad = QUAD_MASK;
+
+		/* setup next batch of counters for sampling */
+		list_for_each_entry(ctr, &dom->list, head) {
+			ctr->slot = ffs(dom->quad) - 1;
+			if (ctr->slot < 0)
+				break;
+			dom->quad &= ~(QUAD_FREE << ctr->slot);
+			dom->func->init(ppm, dom, ctr);
+		}
+
+		if (dom->quad != QUAD_MASK)
+			dom->func->next(ppm, dom);
+	}
+
+	return 0;
+}
+
+static int
+nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
+		     void *data, u32 size)
+{
+	struct nouveau_perfctr *ctr = (void *)object;
+	struct nv_perfctr_read *args = data;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+	if (!ctr->clk)
+		return -EAGAIN;
+
+	args->clk = ctr->clk;
+	args->ctr = ctr->ctr;
+	return 0;
+}
+
+static void
+nouveau_perfctr_dtor(struct nouveau_object *object)
+{
+	struct nouveau_perfctr *ctr = (void *)object;
+	if (ctr->head.next)
+		list_del(&ctr->head);
+	nouveau_object_destroy(&ctr->base);
+}
+
+static int
+nouveau_perfctr_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_perfmon *ppm = (void *)engine;
+	struct nouveau_perfdom *dom = NULL;
+	struct nouveau_perfsig *sig[4] = {};
+	struct nouveau_perfctr *ctr;
+	struct nv_perfctr_class *args = data;
+	int ret, i;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
+		sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
+					      args->signal[i].size, &dom);
+		if (!sig[i])
+			return -EINVAL;
+	}
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &ctr);
+	*pobject = nv_object(ctr);
+	if (ret)
+		return ret;
+
+	ctr->slot = -1;
+	ctr->logic_op = args->logic_op;
+	ctr->signal[0] = sig[0];
+	ctr->signal[1] = sig[1];
+	ctr->signal[2] = sig[2];
+	ctr->signal[3] = sig[3];
+	if (dom)
+		list_add_tail(&ctr->head, &dom->list);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nouveau_perfctr_ofuncs = {
+	.ctor = nouveau_perfctr_ctor,
+	.dtor = nouveau_perfctr_dtor,
+	.init = nouveau_object_init,
+	.fini = nouveau_object_fini,
+};
+
+static struct nouveau_omthds
+nouveau_perfctr_omthds[] = {
+	{ NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
+	{ NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
+	{ NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
+	{}
+};
+
+struct nouveau_oclass
+nouveau_perfmon_sclass[] = {
+	{ .handle = NV_PERFCTR_CLASS,
+	  .ofuncs = &nouveau_perfctr_ofuncs,
+	  .omthds =  nouveau_perfctr_omthds,
+	},
+	{},
+};
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+static void
+nouveau_perfctx_dtor(struct nouveau_object *object)
+{
+	struct nouveau_perfmon *ppm = (void *)object->engine;
+	mutex_lock(&nv_subdev(ppm)->mutex);
+	ppm->context = NULL;
+	mutex_unlock(&nv_subdev(ppm)->mutex);
+}
+
+static int
+nouveau_perfctx_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_perfmon *ppm = (void *)engine;
+	struct nouveau_perfctx *ctx;
+	int ret;
+
+	ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+				    0, 0, 0, &ctx);
+	*pobject = nv_object(ctx);
+	if (ret)
+		return ret;
+
+	mutex_lock(&nv_subdev(ppm)->mutex);
+	if (ppm->context == NULL)
+		ppm->context = ctx;
+	mutex_unlock(&nv_subdev(ppm)->mutex);
+
+	if (ctx != ppm->context)
+		return -EBUSY;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nouveau_perfmon_cclass = {
+	.handle = NV_ENGCTX(PERFMON, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_perfctx_ctor,
+		.dtor = nouveau_perfctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+	},
+};
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+int
+nouveau_perfdom_new(struct nouveau_perfmon *ppm, const char *name, u32 mask,
+		    u32 base, u32 size_unit, u32 size_domain,
+		    const struct nouveau_specdom *spec)
+{
+	const struct nouveau_specdom *sdom;
+	const struct nouveau_specsig *ssig;
+	struct nouveau_perfdom *dom;
+	int i;
+
+	for (i = 0; i == 0 || mask; i++) {
+		u32 addr = base + (i * size_unit);
+		if (i && !(mask & (1 << i)))
+			continue;
+
+		sdom = spec;
+		while (sdom->signal_nr) {
+			dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
+				      sizeof(*dom->signal), GFP_KERNEL);
+			if (!dom)
+				return -ENOMEM;
+
+			if (mask) {
+				snprintf(dom->name, sizeof(dom->name),
+					 "%s/%02x/%02x", name, i,
+					 (int)(sdom - spec));
+			} else {
+				snprintf(dom->name, sizeof(dom->name),
+					 "%s/%02x", name, (int)(sdom - spec));
+			}
+
+			list_add_tail(&dom->head, &ppm->domains);
+			INIT_LIST_HEAD(&dom->list);
+			dom->func = sdom->func;
+			dom->addr = addr;
+			dom->quad = QUAD_MASK;
+			dom->signal_nr = sdom->signal_nr;
+
+			ssig = (sdom++)->signal;
+			while (ssig->name) {
+				dom->signal[ssig->signal].name = ssig->name;
+				ssig++;
+			}
+
+			addr += size_domain;
+		}
+
+		mask &= ~(1 << i);
+	}
+
+	return 0;
+}
+
+int
+_nouveau_perfmon_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_perfmon *ppm = (void *)object;
+	return nouveau_engine_fini(&ppm->base, suspend);
+}
+
+int
+_nouveau_perfmon_init(struct nouveau_object *object)
+{
+	struct nouveau_perfmon *ppm = (void *)object;
+	return nouveau_engine_init(&ppm->base);
+}
+
+void
+_nouveau_perfmon_dtor(struct nouveau_object *object)
+{
+	struct nouveau_perfmon *ppm = (void *)object;
+	struct nouveau_perfdom *dom, *tmp;
+
+	list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+		list_del(&dom->head);
+		kfree(dom);
+	}
+
+	nouveau_engine_destroy(&ppm->base);
+}
+
+int
+nouveau_perfmon_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
+{
+	struct nouveau_perfmon *ppm;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, true, "PPM",
+				     "perfmon", length, pobject);
+	ppm = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&ppm->domains);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
new file mode 100644
index 0000000..50696cc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/daemon.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static void
+pwr_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		 struct nouveau_perfctr *ctr)
+{
+	u32 mask = 0x00000000;
+	u32 ctrl = 0x00000001;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
+		mask |= 1 << (ctr->signal[i] - dom->signal);
+
+	nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
+	nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
+	nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
+}
+
+static void
+pwr_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		 struct nouveau_perfctr *ctr)
+{
+	ctr->ctr = ppm->pwr[ctr->slot];
+	ctr->clk = ppm->pwr[ppm->last];
+}
+
+static void
+pwr_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+	int i;
+
+	for (i = 0; i <= ppm->last; i++) {
+		ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
+		nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
+	}
+}
+
+static const struct nouveau_funcdom
+pwr_perfctr_func = {
+	.init = pwr_perfctr_init,
+	.read = pwr_perfctr_read,
+	.next = pwr_perfctr_next,
+};
+
+const struct nouveau_specdom
+nva3_perfmon_pwr[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{ 0x00, "pwr_gr_idle" },
+			{ 0x04, "pwr_bsp_idle" },
+			{ 0x05, "pwr_vp_idle" },
+			{ 0x06, "pwr_ppp_idle" },
+			{ 0x13, "pwr_ce0_idle" },
+			{}
+		}, &pwr_perfctr_func },
+	{}
+};
+
+const struct nouveau_specdom
+nvc0_perfmon_pwr[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{ 0x00, "pwr_gr_idle" },
+			{ 0x04, "pwr_bsp_idle" },
+			{ 0x05, "pwr_vp_idle" },
+			{ 0x06, "pwr_ppp_idle" },
+			{ 0x13, "pwr_ce0_idle" },
+			{ 0x14, "pwr_ce1_idle" },
+			{}
+		}, &pwr_perfctr_func },
+	{}
+};
+
+const struct nouveau_specdom
+nve0_perfmon_pwr[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{ 0x00, "pwr_gr_idle" },
+			{ 0x04, "pwr_bsp_idle" },
+			{ 0x05, "pwr_vp_idle" },
+			{ 0x06, "pwr_ppp_idle" },
+			{ 0x13, "pwr_ce0_idle" },
+			{ 0x14, "pwr_ce1_idle" },
+			{ 0x15, "pwr_ce2_idle" },
+			{}
+		}, &pwr_perfctr_func },
+	{}
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
new file mode 100644
index 0000000..b2a1078
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nv40_perfmon_priv *priv = (void *)ppm;
+	struct nv40_perfmon_cntr *cntr = (void *)ctr;
+	u32 log = ctr->logic_op;
+	u32 src = 0x00000000;
+	int i;
+
+	for (i = 0; i < 4 && ctr->signal[i]; i++)
+		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+
+	nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
+	nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
+	nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+}
+
+static void
+nv40_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nv40_perfmon_priv *priv = (void *)ppm;
+	struct nv40_perfmon_cntr *cntr = (void *)ctr;
+
+	switch (cntr->base.slot) {
+	case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
+	case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
+	case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
+	case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+	}
+	cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+}
+
+static void
+nv40_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+	struct nv40_perfmon_priv *priv = (void *)ppm;
+	if (priv->sequence != ppm->sequence) {
+		nv_wr32(priv, 0x400084, 0x00000020);
+		priv->sequence = ppm->sequence;
+	}
+}
+
+const struct nouveau_funcdom
+nv40_perfctr_func = {
+	.init = nv40_perfctr_init,
+	.read = nv40_perfctr_read,
+	.next = nv40_perfctr_next,
+};
+
+static const struct nouveau_specdom
+nv40_perfmon[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+int
+nv40_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv40_perfmon_oclass *mclass = (void *)oclass;
+	struct nv40_perfmon_priv *priv;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	return 0;
+}
+
+struct nouveau_oclass *
+nv40_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0x40),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nv40_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
new file mode 100644
index 0000000..1b5792d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv40.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_PM_NV40_H__
+#define __NVKM_PM_NV40_H__
+
+#include "priv.h"
+
+struct nv40_perfmon_oclass {
+	struct nouveau_oclass base;
+	const struct nouveau_specdom *doms;
+};
+
+struct nv40_perfmon_priv {
+	struct nouveau_perfmon base;
+	u32 sequence;
+};
+
+int nv40_perfmon_ctor(struct nouveau_object *, struct nouveau_object *,
+		      struct nouveau_oclass *, void *data, u32 size,
+		      struct nouveau_object **pobject);
+
+struct nv40_perfmon_cntr {
+	struct nouveau_perfctr base;
+};
+
+extern const struct nouveau_funcdom nv40_perfctr_func;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
new file mode 100644
index 0000000..9421769
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv50.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nv50_perfmon[] = {
+	{ 0x040, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x100, (const struct nouveau_specsig[]) {
+			{ 0xc8, "gr_idle" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x100, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x020, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x040, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+struct nouveau_oclass *
+nv50_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nv50_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
new file mode 100644
index 0000000..9232c7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nv84.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nv84_perfmon[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+struct nouveau_oclass *
+nv84_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0x84),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nv84_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
new file mode 100644
index 0000000..6197ebd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nva3.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv40.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nva3_perfmon[] = {
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nouveau_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+static int
+nva3_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **object)
+{
+	int ret = nv40_perfmon_ctor(parent, engine, oclass, data, size, object);
+	if (ret == 0) {
+		struct nv40_perfmon_priv *priv = (void *)*object;
+		ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+					   nva3_perfmon_pwr);
+		if (ret)
+			return ret;
+
+		priv->base.last = 3;
+	}
+	return ret;
+}
+
+struct nouveau_oclass *
+nva3_perfmon_oclass = &(struct nv40_perfmon_oclass) {
+	.base.handle = NV_ENGINE(PERFMON, 0xa3),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = _nouveau_perfmon_fini,
+	},
+	.doms = nva3_perfmon,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
new file mode 100644
index 0000000..74b2410
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nvc0_perfmon_hub[] = {
+	{}
+};
+
+static const struct nouveau_specdom
+nvc0_perfmon_gpc[] = {
+	{}
+};
+
+static const struct nouveau_specdom
+nvc0_perfmon_part[] = {
+	{}
+};
+
+static void
+nvc0_perfctr_init(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nvc0_perfmon_priv *priv = (void *)ppm;
+	struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+	u32 log = ctr->logic_op;
+	u32 src = 0x00000000;
+	int i;
+
+	for (i = 0; i < 4 && ctr->signal[i]; i++)
+		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+
+	nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
+	nv_wr32(priv, dom->addr + 0x100, 0x00000000);
+	nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
+	nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+}
+
+static void
+nvc0_perfctr_read(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom,
+		  struct nouveau_perfctr *ctr)
+{
+	struct nvc0_perfmon_priv *priv = (void *)ppm;
+	struct nvc0_perfmon_cntr *cntr = (void *)ctr;
+
+	switch (cntr->base.slot) {
+	case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
+	case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
+	case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
+	case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+	}
+	cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+}
+
+static void
+nvc0_perfctr_next(struct nouveau_perfmon *ppm, struct nouveau_perfdom *dom)
+{
+	struct nvc0_perfmon_priv *priv = (void *)ppm;
+	nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+	nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+}
+
+const struct nouveau_funcdom
+nvc0_perfctr_func = {
+	.init = nvc0_perfctr_init,
+	.read = nvc0_perfctr_read,
+	.next = nvc0_perfctr_next,
+};
+
+int
+nvc0_perfmon_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nvc0_perfmon_priv *priv = (void *)object;
+	nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
+	nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
+	return nouveau_perfmon_fini(&priv->base, suspend);
+}
+
+static int
+nvc0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvc0_perfmon_priv *priv;
+	u32 mask;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+				   nvc0_perfmon_pwr);
+	if (ret)
+		return ret;
+
+	/* HUB */
+	ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+				   nvc0_perfmon_hub);
+	if (ret)
+		return ret;
+
+	/* GPC */
+	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
+	mask &= ~nv_rd32(priv, 0x022504);
+	mask &= ~nv_rd32(priv, 0x022584);
+
+	ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+				  0x1000, 0x200, nvc0_perfmon_gpc);
+	if (ret)
+		return ret;
+
+	/* PART */
+	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
+	mask &= ~nv_rd32(priv, 0x022548);
+	mask &= ~nv_rd32(priv, 0x0225c8);
+
+	ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+				  0x1000, 0x200, nvc0_perfmon_part);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	priv->base.last = 7;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_perfmon_oclass = {
+	.handle = NV_ENGINE(PERFMON, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = nvc0_perfmon_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
new file mode 100644
index 0000000..f66bca4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvc0.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_PM_NVC0_H__
+#define __NVKM_PM_NVC0_H__
+
+#include "priv.h"
+
+struct nvc0_perfmon_priv {
+	struct nouveau_perfmon base;
+};
+
+struct nvc0_perfmon_cntr {
+	struct nouveau_perfctr base;
+};
+
+extern const struct nouveau_funcdom nvc0_perfctr_func;
+int nvc0_perfmon_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
new file mode 100644
index 0000000..71d718c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nve0.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_specdom
+nve0_perfmon_hub[] = {
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub00_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x40, (const struct nouveau_specsig[]) {
+			{ 0x27, "hub01_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub02_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub03_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x40, (const struct nouveau_specsig[]) {
+			{ 0x03, "host_mmio_rd" },
+			{ 0x27, "hub04_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub05_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0xc0, (const struct nouveau_specsig[]) {
+			{ 0x74, "host_fb_rd3x" },
+			{ 0x75, "host_fb_rd3x_2" },
+			{ 0xa7, "hub06_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "hub07_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{}
+};
+
+static const struct nouveau_specdom
+nve0_perfmon_gpc[] = {
+	{ 0xe0, (const struct nouveau_specsig[]) {
+			{ 0xc7, "gpc00_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{}
+};
+
+static const struct nouveau_specdom
+nve0_perfmon_part[] = {
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "part00_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{ 0x60, (const struct nouveau_specsig[]) {
+			{ 0x47, "part01_user_0" },
+			{}
+		}, &nvc0_perfctr_func },
+	{}
+};
+
+static int
+nve0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvc0_perfmon_priv *priv;
+	u32 mask;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* PDAEMON */
+	ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+				   nve0_perfmon_pwr);
+	if (ret)
+		return ret;
+
+	/* HUB */
+	ret = nouveau_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
+				   nve0_perfmon_hub);
+	if (ret)
+		return ret;
+
+	/* GPC */
+	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
+	mask &= ~nv_rd32(priv, 0x022504);
+	mask &= ~nv_rd32(priv, 0x022584);
+
+	ret = nouveau_perfdom_new(&priv->base, "gpc", mask, 0x180000,
+				  0x1000, 0x200, nve0_perfmon_gpc);
+	if (ret)
+		return ret;
+
+	/* PART */
+	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
+	mask &= ~nv_rd32(priv, 0x022548);
+	mask &= ~nv_rd32(priv, 0x0225c8);
+
+	ret = nouveau_perfdom_new(&priv->base, "part", mask, 0x1a0000,
+				  0x1000, 0x200, nve0_perfmon_part);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	priv->base.last = 7;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_perfmon_oclass = {
+	.handle = NV_ENGINE(PERFMON, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = nvc0_perfmon_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
new file mode 100644
index 0000000..47256f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/nvf0.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+/*******************************************************************************
+ * Perfmon object classes
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM context
+ ******************************************************************************/
+
+/*******************************************************************************
+ * PPM engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvf0_perfmon_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nvc0_perfmon_priv *priv;
+	int ret;
+
+	ret = nouveau_perfmon_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
+				   nve0_perfmon_pwr);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nouveau_perfmon_cclass;
+	nv_engine(priv)->sclass =  nouveau_perfmon_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvf0_perfmon_oclass = {
+	.handle = NV_ENGINE(PERFMON, 0xf0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvf0_perfmon_ctor,
+		.dtor = _nouveau_perfmon_dtor,
+		.init = _nouveau_perfmon_init,
+		.fini = nvc0_perfmon_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
new file mode 100644
index 0000000..0ac8714
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/priv.h
@@ -0,0 +1,91 @@
+#ifndef __NVKM_PERFMON_PRIV_H__
+#define __NVKM_PERFMON_PRIV_H__
+
+#include <engine/perfmon.h>
+
+struct nouveau_perfctr {
+	struct nouveau_object base;
+	struct list_head head;
+	struct nouveau_perfsig *signal[4];
+	int slot;
+	u32 logic_op;
+	u32 clk;
+	u32 ctr;
+};
+
+extern struct nouveau_oclass nouveau_perfmon_sclass[];
+
+struct nouveau_perfctx {
+	struct nouveau_engctx base;
+};
+
+extern struct nouveau_oclass nouveau_perfmon_cclass;
+
+struct nouveau_specsig {
+	u8 signal;
+	const char *name;
+};
+
+struct nouveau_perfsig {
+	const char *name;
+};
+
+struct nouveau_perfdom;
+struct nouveau_perfctr *
+nouveau_perfsig_wrap(struct nouveau_perfmon *, const char *,
+		     struct nouveau_perfdom **);
+
+struct nouveau_specdom {
+	u16 signal_nr;
+	const struct nouveau_specsig *signal;
+	const struct nouveau_funcdom *func;
+};
+
+extern const struct nouveau_specdom nva3_perfmon_pwr[];
+extern const struct nouveau_specdom nvc0_perfmon_pwr[];
+extern const struct nouveau_specdom nve0_perfmon_pwr[];
+
+struct nouveau_perfdom {
+	struct list_head head;
+	struct list_head list;
+	const struct nouveau_funcdom *func;
+	char name[32];
+	u32 addr;
+	u8  quad;
+	u32 signal_nr;
+	struct nouveau_perfsig signal[];
+};
+
+struct nouveau_funcdom {
+	void (*init)(struct nouveau_perfmon *, struct nouveau_perfdom *,
+		     struct nouveau_perfctr *);
+	void (*read)(struct nouveau_perfmon *, struct nouveau_perfdom *,
+		     struct nouveau_perfctr *);
+	void (*next)(struct nouveau_perfmon *, struct nouveau_perfdom *);
+};
+
+int nouveau_perfdom_new(struct nouveau_perfmon *, const char *, u32,
+			u32, u32, u32, const struct nouveau_specdom *);
+
+#define nouveau_perfmon_create(p,e,o,d)                                        \
+	nouveau_perfmon_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_perfmon_dtor(p) ({                                             \
+	struct nouveau_perfmon *c = (p);                                       \
+	_nouveau_perfmon_dtor(nv_object(c));                                   \
+})
+#define nouveau_perfmon_init(p) ({                                             \
+	struct nouveau_perfmon *c = (p);                                       \
+	_nouveau_perfmon_init(nv_object(c));                                   \
+})
+#define nouveau_perfmon_fini(p,s) ({                                           \
+	struct nouveau_perfmon *c = (p);                                       \
+	_nouveau_perfmon_fini(nv_object(c), (s));                              \
+})
+
+int nouveau_perfmon_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+void _nouveau_perfmon_dtor(struct nouveau_object *);
+int  _nouveau_perfmon_init(struct nouveau_object *);
+int  _nouveau_perfmon_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 2a859a3..c571758 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -135,8 +135,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_software_oclass = {
+struct nouveau_oclass *
+nv04_software_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(SW, 0x04),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a019364..a62f11a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -117,8 +117,8 @@
 	return 0;
 }
 
-struct nouveau_oclass
-nv10_software_oclass = {
+struct nouveau_oclass *
+nv10_software_oclass = &(struct nouveau_oclass) {
 	.handle = NV_ENGINE(SW, 0x10),
 	.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv10_software_ctor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index c48e749..b574dd4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -32,16 +32,9 @@
 
 #include <subdev/bar.h>
 
-#include <engine/software.h>
 #include <engine/disp.h>
 
-struct nv50_software_priv {
-	struct nouveau_software base;
-};
-
-struct nv50_software_chan {
-	struct nouveau_software_chan base;
-};
+#include "nv50.h"
 
 /*******************************************************************************
  * software object classes
@@ -62,7 +55,7 @@
 
 	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
 		struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
-		chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
+		chan->vblank.ctxdma = gpuobj->node->offset >> 4;
 		ret = 0;
 	}
 	nouveau_namedb_put(handle);
@@ -74,34 +67,33 @@
 				 void *args, u32 size)
 {
 	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
-	chan->base.vblank.offset = *(u32 *)args;
+	chan->vblank.offset = *(u32 *)args;
 	return 0;
 }
 
-static int
+int
 nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
 				void *args, u32 size)
 {
 	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
-	chan->base.vblank.value = *(u32 *)args;
+	chan->vblank.value = *(u32 *)args;
 	return 0;
 }
 
-static int
+int
 nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
 				  void *args, u32 size)
 {
 	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
-	struct nouveau_disp *disp = nouveau_disp(object);
-	u32 crtc = *(u32 *)args;
-	if (crtc > 1)
+	u32 head = *(u32 *)args;
+	if (head >= chan->vblank.nr_event)
 		return -EINVAL;
 
-	nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
+	nouveau_event_get(chan->vblank.event[head]);
 	return 0;
 }
 
-static int
+int
 nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 			void *args, u32 size)
 {
@@ -132,10 +124,9 @@
  ******************************************************************************/
 
 static int
-nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+nv50_software_vblsem_release(void *data, int head)
 {
-	struct nouveau_software_chan *chan =
-		container_of(event, struct nouveau_software_chan, vblank.event);
+	struct nv50_software_chan *chan = data;
 	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
 	struct nouveau_bar *bar = nouveau_bar(priv);
 
@@ -154,45 +145,76 @@
 	return NVKM_EVENT_DROP;
 }
 
-static int
+void
+nv50_software_context_dtor(struct nouveau_object *object)
+{
+	struct nv50_software_chan *chan = (void *)object;
+	int i;
+
+	if (chan->vblank.event) {
+		for (i = 0; i < chan->vblank.nr_event; i++)
+			nouveau_event_ref(NULL, &chan->vblank.event[i]);
+		kfree(chan->vblank.event);
+	}
+
+	nouveau_software_context_destroy(&chan->base);
+}
+
+int
 nv50_software_context_ctor(struct nouveau_object *parent,
 			   struct nouveau_object *engine,
 			   struct nouveau_oclass *oclass, void *data, u32 size,
 			   struct nouveau_object **pobject)
 {
+	struct nouveau_disp *pdisp = nouveau_disp(parent);
+	struct nv50_software_cclass *pclass = (void *)oclass;
 	struct nv50_software_chan *chan;
-	int ret;
+	int ret, i;
 
 	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
 	*pobject = nv_object(chan);
 	if (ret)
 		return ret;
 
-	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
-	chan->base.vblank.event.func = nv50_software_vblsem_release;
+	chan->vblank.nr_event = pdisp->vblank->index_nr;
+	chan->vblank.event = kzalloc(chan->vblank.nr_event *
+				     sizeof(*chan->vblank.event), GFP_KERNEL);
+	if (!chan->vblank.event)
+		return -ENOMEM;
+
+	for (i = 0; i < chan->vblank.nr_event; i++) {
+		ret = nouveau_event_new(pdisp->vblank, i, pclass->vblank,
+					chan, &chan->vblank.event[i]);
+		if (ret)
+			return ret;
+	}
+
+	chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
 	return 0;
 }
 
-static struct nouveau_oclass
+static struct nv50_software_cclass
 nv50_software_cclass = {
-	.handle = NV_ENGCTX(SW, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+	.base.handle = NV_ENGCTX(SW, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_software_context_ctor,
 		.dtor = _nouveau_software_context_dtor,
 		.init = _nouveau_software_context_init,
 		.fini = _nouveau_software_context_fini,
 	},
+	.vblank = nv50_software_vblsem_release,
 };
 
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
+int
 nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		   struct nouveau_oclass *oclass, void *data, u32 size,
 		   struct nouveau_object **pobject)
 {
+	struct nv50_software_oclass *pclass = (void *)oclass;
 	struct nv50_software_priv *priv;
 	int ret;
 
@@ -201,19 +223,21 @@
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->cclass = &nv50_software_cclass;
-	nv_engine(priv)->sclass = nv50_software_sclass;
+	nv_engine(priv)->cclass = pclass->cclass;
+	nv_engine(priv)->sclass = pclass->sclass;
 	nv_subdev(priv)->intr = nv04_software_intr;
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_software_oclass = {
-	.handle = NV_ENGINE(SW, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_software_oclass = &(struct nv50_software_oclass) {
+	.base.handle = NV_ENGINE(SW, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_software_ctor,
 		.dtor = _nouveau_software_dtor,
 		.init = _nouveau_software_init,
 		.fini = _nouveau_software_fini,
 	},
-};
+	.cclass = &nv50_software_cclass.base,
+	.sclass =  nv50_software_sclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
new file mode 100644
index 0000000..2de370c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -0,0 +1,47 @@
+#ifndef __NVKM_SW_NV50_H__
+#define __NVKM_SW_NV50_H__
+
+#include <engine/software.h>
+
+struct nv50_software_oclass {
+	struct nouveau_oclass base;
+	struct nouveau_oclass *cclass;
+	struct nouveau_oclass *sclass;
+};
+
+struct nv50_software_priv {
+	struct nouveau_software base;
+};
+
+int  nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, void *, u32,
+			struct nouveau_object **);
+
+struct nv50_software_cclass {
+	struct nouveau_oclass base;
+	int (*vblank)(void *, int);
+};
+
+struct nv50_software_chan {
+	struct nouveau_software_chan base;
+	struct {
+		struct nouveau_eventh **event;
+		int nr_event;
+		u32 channel;
+		u32 ctxdma;
+		u64 offset;
+		u32 value;
+	} vblank;
+};
+
+int  nv50_software_context_ctor(struct nouveau_object *,
+				struct nouveau_object *,
+				struct nouveau_oclass *, void *, u32,
+				struct nouveau_object **);
+void nv50_software_context_dtor(struct nouveau_object *);
+
+int nv50_software_mthd_vblsem_value(struct nouveau_object *, u32, void *, u32);
+int nv50_software_mthd_vblsem_release(struct nouveau_object *, u32, void *, u32);
+int nv50_software_mthd_flip(struct nouveau_object *, u32, void *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index d698e710..f9430c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -32,13 +32,7 @@
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nvc0_software_priv {
-	struct nouveau_software base;
-};
-
-struct nvc0_software_chan {
-	struct nouveau_software_chan base;
-};
+#include "nv50.h"
 
 /*******************************************************************************
  * software object classes
@@ -48,58 +42,24 @@
 nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
 				 void *args, u32 size)
 {
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
 	u64 data = *(u32 *)args;
 	if (mthd == 0x0400) {
-		chan->base.vblank.offset &= 0x00ffffffffULL;
-		chan->base.vblank.offset |= data << 32;
+		chan->vblank.offset &= 0x00ffffffffULL;
+		chan->vblank.offset |= data << 32;
 	} else {
-		chan->base.vblank.offset &= 0xff00000000ULL;
-		chan->base.vblank.offset |= data;
+		chan->vblank.offset &= 0xff00000000ULL;
+		chan->vblank.offset |= data;
 	}
 	return 0;
 }
 
 static int
-nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
-				void *args, u32 size)
-{
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	chan->base.vblank.value = *(u32 *)args;
-	return 0;
-}
-
-static int
-nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
-				  void *args, u32 size)
-{
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	struct nouveau_disp *disp = nouveau_disp(object);
-	u32 crtc = *(u32 *)args;
-
-	if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
-		return -EINVAL;
-
-	nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
-	return 0;
-}
-
-static int
-nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
-			void *args, u32 size)
-{
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static int
 nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
                               void *args, u32 size)
 {
-	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
-	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
 	u32 data = *(u32 *)args;
 
 	switch (mthd) {
@@ -124,9 +84,9 @@
 nvc0_software_omthds[] = {
 	{ 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
 	{ 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
-	{ 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
-	{ 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nvc0_software_mthd_flip },
+	{ 0x0408, 0x0408, nv50_software_mthd_vblsem_value },
+	{ 0x040c, 0x040c, nv50_software_mthd_vblsem_release },
+	{ 0x0500, 0x0500, nv50_software_mthd_flip },
 	{ 0x0600, 0x0600, nvc0_software_mthd_mp_control },
 	{ 0x0644, 0x0644, nvc0_software_mthd_mp_control },
 	{ 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
@@ -144,11 +104,10 @@
  ******************************************************************************/
 
 static int
-nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+nvc0_software_vblsem_release(void *data, int head)
 {
-	struct nouveau_software_chan *chan =
-		container_of(event, struct nouveau_software_chan, vblank.event);
-	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+	struct nv50_software_chan *chan = data;
+	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
 	struct nouveau_bar *bar = nouveau_bar(priv);
 
 	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
@@ -160,66 +119,31 @@
 	return NVKM_EVENT_DROP;
 }
 
-static int
-nvc0_software_context_ctor(struct nouveau_object *parent,
-			   struct nouveau_object *engine,
-			   struct nouveau_oclass *oclass, void *data, u32 size,
-			   struct nouveau_object **pobject)
-{
-	struct nvc0_software_chan *chan;
-	int ret;
-
-	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
-	chan->base.vblank.event.func = nvc0_software_vblsem_release;
-	return 0;
-}
-
-static struct nouveau_oclass
+static struct nv50_software_cclass
 nvc0_software_cclass = {
-	.handle = NV_ENGCTX(SW, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_software_context_ctor,
+	.base.handle = NV_ENGCTX(SW, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_context_ctor,
 		.dtor = _nouveau_software_context_dtor,
 		.init = _nouveau_software_context_init,
 		.fini = _nouveau_software_context_fini,
 	},
+	.vblank = nvc0_software_vblsem_release,
 };
 
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
-nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass, void *data, u32 size,
-		   struct nouveau_object **pobject)
-{
-	struct nvc0_software_priv *priv;
-	int ret;
-
-	ret = nouveau_software_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvc0_software_cclass;
-	nv_engine(priv)->sclass = nvc0_software_sclass;
-	nv_subdev(priv)->intr = nv04_software_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nvc0_software_oclass = {
-	.handle = NV_ENGINE(SW, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_software_ctor,
+struct nouveau_oclass *
+nvc0_software_oclass = &(struct nv50_software_oclass) {
+	.base.handle = NV_ENGINE(SW, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_ctor,
 		.dtor = _nouveau_software_dtor,
 		.init = _nouveau_software_init,
 		.fini = _nouveau_software_fini,
 	},
-};
+	.cclass = &nvc0_software_cclass.base,
+	.sclass =  nvc0_software_sclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 5a5961b..560c359 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -22,7 +22,7 @@
 #define NV_DEVICE_DISABLE_PPP                             0x0000004000000000ULL
 #define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
 #define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
-#define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VIC                             0x0000020000000000ULL
 #define NV_DEVICE_DISABLE_VENC                            0x0000040000000000ULL
 
 struct nv_device_class {
@@ -98,6 +98,77 @@
 	u32 conf0;
 };
 
+/* Perfmon counter class
+ *
+ * XXXX: NV_PERFCTR
+ */
+#define NV_PERFCTR_CLASS                                             0x0000ffff
+#define NV_PERFCTR_QUERY                                             0x00000000
+#define NV_PERFCTR_SAMPLE                                            0x00000001
+#define NV_PERFCTR_READ                                              0x00000002
+
+struct nv_perfctr_class {
+	u16 logic_op;
+	struct {
+		char __user *name; /*XXX: use cfu when exposed to userspace */
+		u32 size;
+	} signal[4];
+};
+
+struct nv_perfctr_query {
+	u32 iter;
+	u32 size;
+	char __user *name; /*XXX: use ctu when exposed to userspace */
+};
+
+struct nv_perfctr_sample {
+};
+
+struct nv_perfctr_read {
+	u32 ctr;
+	u32 clk;
+};
+
+/* Device control class
+ *
+ * XXXX: NV_CONTROL
+ */
+#define NV_CONTROL_CLASS                                             0x0000fffe
+
+#define NV_CONTROL_PSTATE_INFO                                       0x00000000
+#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE                              (-1)
+#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON                              (-2)
+#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN                              (-1)
+#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON                              (-2)
+#define NV_CONTROL_PSTATE_ATTR                                       0x00000001
+#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT                               (-1)
+#define NV_CONTROL_PSTATE_USER                                       0x00000002
+#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN                               (-1)
+#define NV_CONTROL_PSTATE_USER_STATE_PERFMON                               (-2)
+
+struct nv_control_pstate_info {
+	u32 count; /* out: number of power states */
+	s32 ustate; /* out: current target pstate index */
+	u32 pstate; /* out: current pstate index */
+};
+
+struct nv_control_pstate_attr {
+	s32 state; /*  in: index of pstate to query
+		    * out: pstate identifier
+		    */
+	u32 index; /*  in: index of attribute to query
+		    * out: index of next attribute, or 0 if no more
+		    */
+	char name[32];
+	char unit[16];
+	u32 min;
+	u32 max;
+};
+
+struct nv_control_pstate_user {
+	s32 state; /*  in: pstate identifier */
+};
+
 /* DMA FIFO channel classes
  *
  * 006b: NV03_CHANNEL_DMA
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
index 9ea18df..8092e2e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/debug.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -1,13 +1,20 @@
 #ifndef __NOUVEAU_DEBUG_H__
 #define __NOUVEAU_DEBUG_H__
 
+extern int nv_info_debug_level;
+
 #define NV_DBG_FATAL    0
 #define NV_DBG_ERROR    1
 #define NV_DBG_WARN     2
-#define NV_DBG_INFO     3
+#define NV_DBG_INFO     nv_info_debug_level
 #define NV_DBG_DEBUG    4
 #define NV_DBG_TRACE    5
 #define NV_DBG_PARANOIA 6
 #define NV_DBG_SPAM     7
 
+#define NV_DBG_INFO_NORMAL 3
+#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
+
+#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 99b6600..ac2881d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -33,9 +33,10 @@
 	NVDEV_SUBDEV_INSTMEM,
 	NVDEV_SUBDEV_VM,
 	NVDEV_SUBDEV_BAR,
+	NVDEV_SUBDEV_PWR,
 	NVDEV_SUBDEV_VOLT,
-	NVDEV_SUBDEV_CLOCK,
 	NVDEV_SUBDEV_THERM,
+	NVDEV_SUBDEV_CLOCK,
 
 	NVDEV_ENGINE_DMAOBJ,
 	NVDEV_ENGINE_FIFO,
@@ -50,9 +51,10 @@
 	NVDEV_ENGINE_COPY0,
 	NVDEV_ENGINE_COPY1,
 	NVDEV_ENGINE_COPY2,
-	NVDEV_ENGINE_UNK1C1,
+	NVDEV_ENGINE_VIC,
 	NVDEV_ENGINE_VENC,
 	NVDEV_ENGINE_DISP,
+	NVDEV_ENGINE_PERFMON,
 
 	NVDEV_SUBDEV_NR,
 };
@@ -72,6 +74,7 @@
 	enum {
 		NV_04    = 0x04,
 		NV_10    = 0x10,
+		NV_11    = 0x11,
 		NV_20    = 0x20,
 		NV_30    = 0x30,
 		NV_40    = 0x40,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index 9e09440..5d539eb 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -5,13 +5,21 @@
 #define NVKM_EVENT_DROP 0
 #define NVKM_EVENT_KEEP 1
 
+/* nouveau_eventh.flags bit #s */
+#define NVKM_EVENT_ENABLE 0
+
 struct nouveau_eventh {
+	struct nouveau_event *event;
 	struct list_head head;
-	int (*func)(struct nouveau_eventh *, int index);
+	unsigned long flags;
+	int index;
+	int (*func)(void *, int);
+	void *priv;
 };
 
 struct nouveau_event {
-	spinlock_t lock;
+	spinlock_t list_lock;
+	spinlock_t refs_lock;
 
 	void *priv;
 	void (*enable)(struct nouveau_event *, int index);
@@ -28,9 +36,11 @@
 void nouveau_event_destroy(struct nouveau_event **);
 void nouveau_event_trigger(struct nouveau_event *, int index);
 
-void nouveau_event_get(struct nouveau_event *, int index,
-		       struct nouveau_eventh *);
-void nouveau_event_put(struct nouveau_event *, int index,
-		       struct nouveau_eventh *);
+int  nouveau_event_new(struct nouveau_event *, int index,
+		       int (*func)(void *, int), void *,
+		       struct nouveau_eventh **);
+void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
+void nouveau_event_get(struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_eventh *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
index 2707495..ed05584 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/option.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -8,4 +8,13 @@
 
 int nouveau_dbgopt(const char *optstr, const char *sub);
 
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+	if (strlen(cmp) != len)
+		return len;
+	return strncasecmp(str, cmp, len);
+}
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index d87836e..0f9a37b 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -6,27 +6,12 @@
 
 struct nouveau_object;
 
-#define NV_PRINTK_FATAL    KERN_CRIT
-#define NV_PRINTK_ERROR    KERN_ERR
-#define NV_PRINTK_WARN     KERN_WARNING
-#define NV_PRINTK_INFO     KERN_INFO
-#define NV_PRINTK_DEBUG    KERN_DEBUG
-#define NV_PRINTK_PARANOIA KERN_DEBUG
-#define NV_PRINTK_TRACE    KERN_DEBUG
-#define NV_PRINTK_SPAM     KERN_DEBUG
-
-extern int nv_printk_suspend_level;
-
-#define NV_DBG_SUSPEND (nv_printk_suspend_level)
-#define NV_PRINTK_SUSPEND  (nv_printk_level_to_pfx(nv_printk_suspend_level))
-
-const char *nv_printk_level_to_pfx(int level);
-void __printf(4, 5)
-nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+void __printf(3, 4)
+nv_printk_(struct nouveau_object *, int, const char *, ...);
 
 #define nv_printk(o,l,f,a...) do {                                             \
 	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
-		nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a);   \
+		nv_printk_(nv_object(o), NV_DBG_##l, f, ##a);                  \
 } while(0)
 
 #define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
@@ -37,16 +22,9 @@
 #define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
 #define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
 
-#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
-
-static inline void nv_suspend_set_printk_level(int level)
-{
-	nv_printk_suspend_level = level;
-}
-
 #define nv_assert(f,a...) do {                                                 \
 	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
-		nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a);  \
+		nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a);                   \
 	BUG_ON(1);                                                             \
 } while(0)
 
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index 633c2f8..8c32cf4 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -101,14 +101,14 @@
 #define _nouveau_fifo_init _nouveau_engine_init
 #define _nouveau_fifo_fini _nouveau_engine_fini
 
-extern struct nouveau_oclass nv04_fifo_oclass;
-extern struct nouveau_oclass nv10_fifo_oclass;
-extern struct nouveau_oclass nv17_fifo_oclass;
-extern struct nouveau_oclass nv40_fifo_oclass;
-extern struct nouveau_oclass nv50_fifo_oclass;
-extern struct nouveau_oclass nv84_fifo_oclass;
-extern struct nouveau_oclass nvc0_fifo_oclass;
-extern struct nouveau_oclass nve0_fifo_oclass;
+extern struct nouveau_oclass *nv04_fifo_oclass;
+extern struct nouveau_oclass *nv10_fifo_oclass;
+extern struct nouveau_oclass *nv17_fifo_oclass;
+extern struct nouveau_oclass *nv40_fifo_oclass;
+extern struct nouveau_oclass *nv50_fifo_oclass;
+extern struct nouveau_oclass *nv84_fifo_oclass;
+extern struct nouveau_oclass *nvc0_fifo_oclass;
+extern struct nouveau_oclass *nve0_fifo_oclass;
 
 void nv04_fifo_intr(struct nouveau_subdev *);
 int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
index 1d1a89a..9b0d938 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -42,10 +42,13 @@
 
 extern struct nouveau_oclass nv31_mpeg_oclass;
 extern struct nouveau_oclass nv40_mpeg_oclass;
+extern struct nouveau_oclass nv44_mpeg_oclass;
 extern struct nouveau_oclass nv50_mpeg_oclass;
 extern struct nouveau_oclass nv84_mpeg_oclass;
-
+extern struct nouveau_ofuncs nv31_mpeg_ofuncs;
+extern struct nouveau_oclass nv31_mpeg_cclass;
 extern struct nouveau_oclass nv31_mpeg_sclass[];
+extern struct nouveau_oclass nv40_mpeg_sclass[];
 void nv31_mpeg_intr(struct nouveau_subdev *);
 void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
 int  nv31_mpeg_init(struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
new file mode 100644
index 0000000..49b0024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_PERFMON_H__
+#define __NVKM_PERFMON_H__
+
+#include <core/device.h>
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/class.h>
+
+struct nouveau_perfdom;
+struct nouveau_perfctr;
+struct nouveau_perfmon {
+	struct nouveau_engine base;
+
+	struct nouveau_perfctx *context;
+	void *profile_data;
+
+	struct list_head domains;
+	u32 sequence;
+
+	/*XXX: temp for daemon backend */
+	u32 pwr[8];
+	u32 last;
+};
+
+static inline struct nouveau_perfmon *
+nouveau_perfmon(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_PERFMON];
+}
+
+extern struct nouveau_oclass *nv40_perfmon_oclass;
+extern struct nouveau_oclass *nv50_perfmon_oclass;
+extern struct nouveau_oclass *nv84_perfmon_oclass;
+extern struct nouveau_oclass *nva3_perfmon_oclass;
+extern struct nouveau_oclass nvc0_perfmon_oclass;
+extern struct nouveau_oclass nve0_perfmon_oclass;
+extern struct nouveau_oclass nvf0_perfmon_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index 4579948..23a462b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,19 +3,10 @@
 
 #include <core/engine.h>
 #include <core/engctx.h>
-#include <core/event.h>
 
 struct nouveau_software_chan {
 	struct nouveau_engctx base;
 
-	struct {
-		struct nouveau_eventh event;
-		u32 channel;
-		u32 ctxdma;
-		u64 offset;
-		u32 value;
-	} vblank;
-
 	int (*flip)(void *);
 	void *flip_data;
 };
@@ -50,10 +41,10 @@
 #define _nouveau_software_init _nouveau_engine_init
 #define _nouveau_software_fini _nouveau_engine_fini
 
-extern struct nouveau_oclass nv04_software_oclass;
-extern struct nouveau_oclass nv10_software_oclass;
-extern struct nouveau_oclass nv50_software_oclass;
-extern struct nouveau_oclass nvc0_software_oclass;
+extern struct nouveau_oclass *nv04_software_oclass;
+extern struct nouveau_oclass *nv10_software_oclass;
+extern struct nouveau_oclass *nv50_software_oclass;
+extern struct nouveau_oclass *nvc0_software_oclass;
 
 void nv04_software_intr(struct nouveau_subdev *);
 
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
new file mode 100644
index 0000000..662b207
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/boost.h
@@ -0,0 +1,29 @@
+#ifndef __NVBIOS_BOOST_H__
+#define __NVBIOS_BOOST_H__
+
+u16 nvbios_boostTe(struct nouveau_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
+
+struct nvbios_boostE {
+	u8  pstate;
+	u32 min;
+	u32 max;
+};
+
+u16 nvbios_boostEe(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *);
+u16 nvbios_boostEp(struct nouveau_bios *, int idx, u8 *, u8 *, u8 *, u8 *,
+		   struct nvbios_boostE *);
+u16 nvbios_boostEm(struct nouveau_bios *, u8, u8 *, u8 *, u8 *, u8 *,
+		   struct nvbios_boostE *);
+
+struct nvbios_boostS {
+	u8  domain;
+	u8  percent;
+	u32 min;
+	u32 max;
+};
+
+u16 nvbios_boostSe(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8);
+u16 nvbios_boostSp(struct nouveau_bios *, int, u16, u8 *, u8 *, u8, u8,
+		   struct nvbios_boostS *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
new file mode 100644
index 0000000..a80a4380
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/cstep.h
@@ -0,0 +1,28 @@
+#ifndef __NVBIOS_CSTEP_H__
+#define __NVBIOS_CSTEP_H__
+
+u16 nvbios_cstepTe(struct nouveau_bios *,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz);
+
+struct nvbios_cstepE {
+	u8  pstate;
+	u8  index;
+};
+
+u16 nvbios_cstepEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+		   struct nvbios_cstepE *);
+u16 nvbios_cstepEm(struct nouveau_bios *, u8 pstate, u8 *ver, u8 *hdr,
+		   struct nvbios_cstepE *);
+
+struct nvbios_cstepX {
+	u32 freq;
+	u8  unkn[2];
+	u8  voltage;
+};
+
+u16 nvbios_cstepXe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+u16 nvbios_cstepXp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
+		   struct nvbios_cstepX *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 96d3364..c7b2e58 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -7,7 +7,15 @@
 	DCB_GPIO_TVDAC1 = 0x2d,
 	DCB_GPIO_FAN = 0x09,
 	DCB_GPIO_FAN_SENSE = 0x3d,
-	DCB_GPIO_UNUSED = 0xff
+	DCB_GPIO_UNUSED = 0xff,
+	DCB_GPIO_VID0 = 0x04,
+	DCB_GPIO_VID1 = 0x05,
+	DCB_GPIO_VID2 = 0x06,
+	DCB_GPIO_VID3 = 0x1a,
+	DCB_GPIO_VID4 = 0x73,
+	DCB_GPIO_VID5 = 0x74,
+	DCB_GPIO_VID6 = 0x75,
+	DCB_GPIO_VID7 = 0x76,
 };
 
 #define DCB_GPIO_LOG_DIR     0x02
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
index 0b285e9..16ff06e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -3,6 +3,39 @@
 
 struct nouveau_bios;
 
+u16 nvbios_perf_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+		      u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+
+struct nvbios_perfE {
+	u8  pstate;
+	u8  fanspeed;
+	u8  voltage;
+	u32 core;
+	u32 shader;
+	u32 memory;
+	u32 vdec;
+	u32 disp;
+	u32 script;
+};
+
+u16 nvbios_perf_entry(struct nouveau_bios *, int idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_perfEp(struct nouveau_bios *, int idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_perfE *);
+
+struct nvbios_perfS {
+	union {
+		struct {
+			u32 freq;
+		} v40;
+	};
+};
+
+u32 nvbios_perfSe(struct nouveau_bios *, u32 data, int idx,
+		  u8 *ver, u8 *hdr, u8 cnt, u8 len);
+u32 nvbios_perfSp(struct nouveau_bios *, u32 data, int idx,
+		  u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_perfS *);
+
 struct nvbios_perf_fan {
 	u32 pwm_divisor;
 };
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
new file mode 100644
index 0000000..bc15e03
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/rammap.h
@@ -0,0 +1,11 @@
+#ifndef __NVBIOS_RAMMAP_H__
+#define __NVBIOS_RAMMAP_H__
+
+u16 nvbios_rammap_table(struct nouveau_bios *, u8 *ver, u8 *hdr,
+			u8 *cnt, u8 *len, u8 *snr, u8 *ssz);
+u16 nvbios_rammap_entry(struct nouveau_bios *, int idx,
+			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_rammap_match(struct nouveau_bios *, u16 khz,
+			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
new file mode 100644
index 0000000..963694b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/timing.h
@@ -0,0 +1,8 @@
+#ifndef __NVBIOS_TIMING_H__
+#define __NVBIOS_TIMING_H__
+
+u16 nvbios_timing_table(struct nouveau_bios *,
+			u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_timing_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
new file mode 100644
index 0000000..ad5a8f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/vmap.h
@@ -0,0 +1,25 @@
+#ifndef __NVBIOS_VMAP_H__
+#define __NVBIOS_VMAP_H__
+
+struct nouveau_bios;
+
+struct nvbios_vmap {
+};
+
+u16 nvbios_vmap_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_vmap_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_vmap *);
+
+struct nvbios_vmap_entry {
+	u8  unk0;
+	u8  link;
+	u32 min;
+	u32 max;
+	s32 arg[6];
+};
+
+u16 nvbios_vmap_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_vmap_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
+			    struct nvbios_vmap_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
new file mode 100644
index 0000000..6a11dcd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/volt.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_VOLT_H__
+#define __NVBIOS_VOLT_H__
+
+struct nouveau_bios;
+
+struct nvbios_volt {
+	u8  vidmask;
+	u32 min;
+	u32 max;
+	u32 base;
+	s16 step;
+};
+
+u16 nvbios_volt_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_volt_parse(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_volt *);
+
+struct nvbios_volt_entry {
+	u32 voltage;
+	u8  vid;
+};
+
+u16 nvbios_volt_entry(struct nouveau_bios *, int idx, u8 *ver, u8 *len);
+u16 nvbios_volt_entry_parse(struct nouveau_bios *, int idx, u8 *ver, u8 *len,
+			    struct nvbios_volt_entry *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
index 7d88ec4..697f7ce 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -11,6 +11,8 @@
 
 struct nouveau_bus {
 	struct nouveau_subdev base;
+	int (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
+	u32 hwsq_size;
 };
 
 static inline struct nouveau_bus *
@@ -33,9 +35,19 @@
 #define _nouveau_bus_init _nouveau_subdev_init
 #define _nouveau_bus_fini _nouveau_subdev_fini
 
-extern struct nouveau_oclass nv04_bus_oclass;
-extern struct nouveau_oclass nv31_bus_oclass;
-extern struct nouveau_oclass nv50_bus_oclass;
-extern struct nouveau_oclass nvc0_bus_oclass;
+extern struct nouveau_oclass *nv04_bus_oclass;
+extern struct nouveau_oclass *nv31_bus_oclass;
+extern struct nouveau_oclass *nv50_bus_oclass;
+extern struct nouveau_oclass *nv94_bus_oclass;
+extern struct nouveau_oclass *nvc0_bus_oclass;
+
+/* interface to sequencer */
+struct nouveau_hwsq;
+int  nouveau_hwsq_init(struct nouveau_bus *, struct nouveau_hwsq **);
+int  nouveau_hwsq_fini(struct nouveau_hwsq **, bool exec);
+void nouveau_hwsq_wr32(struct nouveau_hwsq *, u32 addr, u32 data);
+void nouveau_hwsq_setf(struct nouveau_hwsq *, u8 flag, int data);
+void nouveau_hwsq_wait(struct nouveau_hwsq *, u8 flag, u8 data);
+void nouveau_hwsq_nsec(struct nouveau_hwsq *, u32 nsec);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 89ee289..e2675bc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -7,9 +7,78 @@
 struct nouveau_pll_vals;
 struct nvbios_pll;
 
+enum nv_clk_src {
+	nv_clk_src_crystal,
+	nv_clk_src_href,
+
+	nv_clk_src_hclk,
+	nv_clk_src_hclkm3,
+	nv_clk_src_hclkm3d2,
+
+	nv_clk_src_host,
+
+	nv_clk_src_sppll0,
+	nv_clk_src_sppll1,
+
+	nv_clk_src_mpllsrcref,
+	nv_clk_src_mpllsrc,
+	nv_clk_src_mpll,
+	nv_clk_src_mdiv,
+
+	nv_clk_src_core,
+	nv_clk_src_shader,
+
+	nv_clk_src_mem,
+
+	nv_clk_src_gpc,
+	nv_clk_src_rop,
+	nv_clk_src_hubk01,
+	nv_clk_src_hubk06,
+	nv_clk_src_hubk07,
+	nv_clk_src_copy,
+	nv_clk_src_daemon,
+	nv_clk_src_disp,
+	nv_clk_src_vdec,
+
+	nv_clk_src_dom6,
+
+	nv_clk_src_max,
+};
+
+struct nouveau_cstate {
+	struct list_head head;
+	u8  voltage;
+	u32 domain[nv_clk_src_max];
+};
+
+struct nouveau_pstate {
+	struct list_head head;
+	struct list_head list; /* c-states */
+	struct nouveau_cstate base;
+	u8 pstate;
+	u8 fanspeed;
+};
+
 struct nouveau_clock {
 	struct nouveau_subdev base;
 
+	struct nouveau_clocks *domains;
+	struct nouveau_pstate bstate;
+
+	struct list_head states;
+	int state_nr;
+
+	int pstate; /* current */
+	int ustate; /* user-requested (-1 disabled, -2 perfmon) */
+	int astate; /* perfmon adjustment (base) */
+	int tstate; /* thermal adjustment (max-) */
+	int dstate; /* display adjustment (min+) */
+
+	int  (*read)(struct nouveau_clock *, enum nv_clk_src);
+	int  (*calc)(struct nouveau_clock *, struct nouveau_cstate *);
+	int  (*prog)(struct nouveau_clock *);
+	void (*tidy)(struct nouveau_clock *);
+
 	/*XXX: die, these are here *only* to support the completely
 	 *     bat-shit insane what-was-nouveau_hw.c code
 	 */
@@ -25,27 +94,42 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
 }
 
-#define nouveau_clock_create(p,e,o,d)                                          \
-	nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
-#define nouveau_clock_destroy(p)                                               \
-	nouveau_subdev_destroy(&(p)->base)
-#define nouveau_clock_init(p)                                                  \
-	nouveau_subdev_init(&(p)->base)
+struct nouveau_clocks {
+	enum nv_clk_src name;
+	u8 bios; /* 0xff for none */
+#define NVKM_CLK_DOM_FLAG_CORE 0x01
+	u8 flags;
+	const char *mname;
+	int mdiv;
+};
+
+#define nouveau_clock_create(p,e,o,i,d)                                        \
+	nouveau_clock_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
+#define nouveau_clock_destroy(p) ({                                            \
+	struct nouveau_clock *clk = (p);                                       \
+	_nouveau_clock_dtor(nv_object(clk));                                   \
+})
+#define nouveau_clock_init(p) ({                                               \
+	struct nouveau_clock *clk = (p);                                       \
+	_nouveau_clock_init(nv_object(clk));                                   \
+})
 #define nouveau_clock_fini(p,s)                                                \
 	nouveau_subdev_fini(&(p)->base, (s))
 
 int  nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
-			   struct nouveau_oclass *, void *, u32, int, void **);
-
-#define _nouveau_clock_dtor _nouveau_subdev_dtor
-#define _nouveau_clock_init _nouveau_subdev_init
+			   struct nouveau_oclass *,
+			   struct nouveau_clocks *, int, void **);
+void _nouveau_clock_dtor(struct nouveau_object *);
+int _nouveau_clock_init(struct nouveau_object *);
 #define _nouveau_clock_fini _nouveau_subdev_fini
 
 extern struct nouveau_oclass nv04_clock_oclass;
 extern struct nouveau_oclass nv40_clock_oclass;
-extern struct nouveau_oclass nv50_clock_oclass;
+extern struct nouveau_oclass *nv50_clock_oclass;
+extern struct nouveau_oclass *nv84_clock_oclass;
 extern struct nouveau_oclass nva3_clock_oclass;
 extern struct nouveau_oclass nvc0_clock_oclass;
+extern struct nouveau_oclass nve0_clock_oclass;
 
 int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
 int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -55,4 +139,9 @@
 int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
 			int clk, struct nouveau_pll_vals *);
 
+int nouveau_clock_ustate(struct nouveau_clock *, int req);
+int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
+int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
+int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
+
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 2e74050..8541aa3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -78,23 +78,28 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
 }
 
-extern struct nouveau_oclass nv04_fb_oclass;
-extern struct nouveau_oclass nv10_fb_oclass;
-extern struct nouveau_oclass nv1a_fb_oclass;
-extern struct nouveau_oclass nv20_fb_oclass;
-extern struct nouveau_oclass nv25_fb_oclass;
-extern struct nouveau_oclass nv30_fb_oclass;
-extern struct nouveau_oclass nv35_fb_oclass;
-extern struct nouveau_oclass nv36_fb_oclass;
-extern struct nouveau_oclass nv40_fb_oclass;
-extern struct nouveau_oclass nv41_fb_oclass;
-extern struct nouveau_oclass nv44_fb_oclass;
-extern struct nouveau_oclass nv46_fb_oclass;
-extern struct nouveau_oclass nv47_fb_oclass;
-extern struct nouveau_oclass nv49_fb_oclass;
-extern struct nouveau_oclass nv4e_fb_oclass;
-extern struct nouveau_oclass nv50_fb_oclass;
-extern struct nouveau_oclass nvc0_fb_oclass;
+extern struct nouveau_oclass *nv04_fb_oclass;
+extern struct nouveau_oclass *nv10_fb_oclass;
+extern struct nouveau_oclass *nv1a_fb_oclass;
+extern struct nouveau_oclass *nv20_fb_oclass;
+extern struct nouveau_oclass *nv25_fb_oclass;
+extern struct nouveau_oclass *nv30_fb_oclass;
+extern struct nouveau_oclass *nv35_fb_oclass;
+extern struct nouveau_oclass *nv36_fb_oclass;
+extern struct nouveau_oclass *nv40_fb_oclass;
+extern struct nouveau_oclass *nv41_fb_oclass;
+extern struct nouveau_oclass *nv44_fb_oclass;
+extern struct nouveau_oclass *nv46_fb_oclass;
+extern struct nouveau_oclass *nv47_fb_oclass;
+extern struct nouveau_oclass *nv49_fb_oclass;
+extern struct nouveau_oclass *nv4e_fb_oclass;
+extern struct nouveau_oclass *nv50_fb_oclass;
+extern struct nouveau_oclass *nv84_fb_oclass;
+extern struct nouveau_oclass *nva3_fb_oclass;
+extern struct nouveau_oclass *nvaa_fb_oclass;
+extern struct nouveau_oclass *nvaf_fb_oclass;
+extern struct nouveau_oclass *nvc0_fb_oclass;
+extern struct nouveau_oclass *nve0_fb_oclass;
 
 struct nouveau_ram {
 	struct nouveau_object base;
@@ -121,6 +126,17 @@
 	int  (*get)(struct nouveau_fb *, u64 size, u32 align,
 		    u32 size_nc, u32 type, struct nouveau_mem **);
 	void (*put)(struct nouveau_fb *, struct nouveau_mem **);
+
+	int  (*calc)(struct nouveau_fb *, u32 freq);
+	int  (*prog)(struct nouveau_fb *);
+	void (*tidy)(struct nouveau_fb *);
+	struct {
+		u8  version;
+		u32 data;
+		u8  size;
+	} rammap, ramcfg, timing;
+	u32 freq;
+	u32 mr[16];
 };
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 7e4e277..9fa5da7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -60,13 +60,18 @@
 #define _nouveau_i2c_port_init nouveau_object_init
 #define _nouveau_i2c_port_fini nouveau_object_fini
 
+struct nouveau_i2c_board_info {
+	struct i2c_board_info dev;
+	u8 udelay; /* set to 0 to use the standard delay */
+};
+
 struct nouveau_i2c {
 	struct nouveau_subdev base;
 
 	struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
 	struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
 	int (*identify)(struct nouveau_i2c *, int index,
-			const char *what, struct i2c_board_info *,
+			const char *what, struct nouveau_i2c_board_info *,
 			bool (*match)(struct nouveau_i2c_port *,
 				      struct i2c_board_info *));
 	struct list_head ports;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index ce6569f..adc88b7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -11,7 +11,6 @@
 
 struct nouveau_mc {
 	struct nouveau_subdev base;
-	const struct nouveau_mc_intr *intr_map;
 	bool use_msi;
 };
 
@@ -21,8 +20,8 @@
 	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
 }
 
-#define nouveau_mc_create(p,e,o,m,d)                                           \
-	nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,d)                                             \
+	nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
 #define nouveau_mc_destroy(p) ({                                               \
 	struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc));        \
 })
@@ -34,20 +33,24 @@
 })
 
 int  nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
-			struct nouveau_oclass *, const struct nouveau_mc_intr *,
-			int, void **);
+			struct nouveau_oclass *, int, void **);
 void _nouveau_mc_dtor(struct nouveau_object *);
 int  _nouveau_mc_init(struct nouveau_object *);
 int  _nouveau_mc_fini(struct nouveau_object *, bool);
 
-extern struct nouveau_oclass nv04_mc_oclass;
-extern struct nouveau_oclass nv44_mc_oclass;
-extern struct nouveau_oclass nv50_mc_oclass;
-extern struct nouveau_oclass nv98_mc_oclass;
-extern struct nouveau_oclass nvc0_mc_oclass;
+struct nouveau_mc_oclass {
+	struct nouveau_oclass base;
+	const struct nouveau_mc_intr *intr;
+	void (*msi_rearm)(struct nouveau_mc *);
+};
 
-extern const struct nouveau_mc_intr nv04_mc_intr[];
-int nv04_mc_init(struct nouveau_object *);
-int nv50_mc_init(struct nouveau_object *);
+extern struct nouveau_oclass *nv04_mc_oclass;
+extern struct nouveau_oclass *nv40_mc_oclass;
+extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv50_mc_oclass;
+extern struct nouveau_oclass *nv94_mc_oclass;
+extern struct nouveau_oclass *nv98_mc_oclass;
+extern struct nouveau_oclass *nvc0_mc_oclass;
+extern struct nouveau_oclass *nvc3_mc_oclass;
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
new file mode 100644
index 0000000..c5c92cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -0,0 +1,80 @@
+#ifndef __NOUVEAU_PWR_H__
+#define __NOUVEAU_PWR_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_pwr {
+	struct nouveau_subdev base;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} code;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} data;
+
+	struct {
+		u32 base;
+		u32 size;
+	} send;
+
+	struct {
+		u32 base;
+		u32 size;
+
+		struct work_struct work;
+		wait_queue_head_t wait;
+		u32 process;
+		u32 message;
+		u32 data[2];
+	} recv;
+
+	int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+};
+
+static inline struct nouveau_pwr *
+nouveau_pwr(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
+}
+
+#define nouveau_pwr_create(p, e, o, d)                                         \
+	nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_pwr_destroy(p)                                                 \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_pwr_init(p) ({                                                 \
+	struct nouveau_pwr *ppwr = (p);                                        \
+	_nouveau_pwr_init(nv_object(ppwr));                                    \
+})
+#define nouveau_pwr_fini(p,s) ({                                               \
+	struct nouveau_pwr *ppwr = (p);                                        \
+	_nouveau_pwr_fini(nv_object(ppwr), (s));                               \
+})
+
+int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, int, void **);
+#define _nouveau_pwr_dtor _nouveau_subdev_dtor
+int _nouveau_pwr_init(struct nouveau_object *);
+int _nouveau_pwr_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nva3_pwr_oclass;
+extern struct nouveau_oclass nvc0_pwr_oclass;
+extern struct nouveau_oclass nvd0_pwr_oclass;
+extern struct nouveau_oclass nv108_pwr_oclass;
+
+/* interface to MEMX process running on PPWR */
+struct nouveau_memx;
+int  nouveau_memx_init(struct nouveau_pwr *, struct nouveau_memx **);
+int  nouveau_memx_fini(struct nouveau_memx **, bool exec);
+void nouveau_memx_wr32(struct nouveau_memx *, u32 addr, u32 data);
+void nouveau_memx_wait(struct nouveau_memx *,
+		       u32 addr, u32 mask, u32 data, u32 nsec);
+void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index c075998..69891d4 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -71,6 +71,8 @@
 int  _nouveau_therm_init(struct nouveau_object *);
 int  _nouveau_therm_fini(struct nouveau_object *, bool);
 
+int  nouveau_therm_cstate(struct nouveau_therm *, int, int);
+
 extern struct nouveau_oclass nv40_therm_oclass;
 extern struct nouveau_oclass nv50_therm_oclass;
 extern struct nouveau_oclass nv84_therm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
new file mode 100644
index 0000000..820b62f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_VOLT_H__
+#define __NOUVEAU_VOLT_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_voltage {
+	u32 uv;
+	u8  id;
+};
+
+struct nouveau_volt {
+	struct nouveau_subdev base;
+
+	int (*vid_get)(struct nouveau_volt *);
+	int (*get)(struct nouveau_volt *);
+	int (*vid_set)(struct nouveau_volt *, u8 vid);
+	int (*set)(struct nouveau_volt *, u32 uv);
+	int (*set_id)(struct nouveau_volt *, u8 id, int condition);
+
+	u8 vid_mask;
+	u8 vid_nr;
+	struct {
+		u32 uv;
+		u8 vid;
+	} vid[256];
+};
+
+static inline struct nouveau_volt *
+nouveau_volt(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VOLT];
+}
+
+#define nouveau_volt_create(p, e, o, d)                                        \
+	nouveau_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_volt_destroy(p) ({                                             \
+	struct nouveau_volt *v = (p);                                          \
+	_nouveau_volt_dtor(nv_object(v));                                      \
+})
+#define nouveau_volt_init(p) ({                                                \
+	struct nouveau_volt *v = (p);                                          \
+	_nouveau_volt_init(nv_object(v));                                      \
+})
+#define nouveau_volt_fini(p,s)                                                 \
+	nouveau_subdev_fini((p), (s))
+
+int  nouveau_volt_create_(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, int, void **);
+void _nouveau_volt_dtor(struct nouveau_object *);
+int  _nouveau_volt_init(struct nouveau_object *);
+#define _nouveau_volt_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv40_volt_oclass;
+
+int nouveau_voltgpio_init(struct nouveau_volt *);
+int nouveau_voltgpio_get(struct nouveau_volt *);
+int nouveau_voltgpio_set(struct nouveau_volt *, u8);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
new file mode 100644
index 0000000..c1835e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/boost.h>
+
+u16
+nvbios_boostTe(struct nouveau_bios *bios,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+{
+	struct bit_entry bit_P;
+	u16 boost = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			boost = nv_ro16(bios, bit_P.offset + 0x30);
+
+		if (boost) {
+			*ver = nv_ro08(bios, boost + 0);
+			switch (*ver) {
+			case 0x11:
+				*hdr = nv_ro08(bios, boost + 1);
+				*cnt = nv_ro08(bios, boost + 5);
+				*len = nv_ro08(bios, boost + 2);
+				*snr = nv_ro08(bios, boost + 4);
+				*ssz = nv_ro08(bios, boost + 3);
+				return boost;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_boostEe(struct nouveau_bios *bios, int idx,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8  snr, ssz;
+	u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (data && idx < *cnt) {
+		data = data + *hdr + (idx * (*len + (snr * ssz)));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_boostEp(struct nouveau_bios *bios, int idx,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
+{
+	u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->min    =  nv_ro16(bios, data + 0x02) * 1000;
+		info->max    =  nv_ro16(bios, data + 0x04) * 1000;
+	}
+	return data;
+}
+
+u16
+nvbios_boostEm(struct nouveau_bios *bios, u8 pstate,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info)
+{
+	u32 data, idx = 0;
+	while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) {
+		if (info->pstate == pstate)
+			break;
+	}
+	return data;
+}
+
+u16
+nvbios_boostSe(struct nouveau_bios *bios, int idx,
+	       u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len)
+{
+	if (data && idx < cnt) {
+		data = data + *hdr + (idx * len);
+		*hdr = len;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_boostSp(struct nouveau_bios *bios, int idx,
+	       u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len,
+	       struct nvbios_boostS *info)
+{
+	data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->domain  = nv_ro08(bios, data + 0x00);
+		info->percent = nv_ro08(bios, data + 0x01);
+		info->min     = nv_ro16(bios, data + 0x02) * 1000;
+		info->max     = nv_ro16(bios, data + 0x04) * 1000;
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
new file mode 100644
index 0000000..d3b1532
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/cstep.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/cstep.h>
+
+u16
+nvbios_cstepTe(struct nouveau_bios *bios,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *xnr, u8 *xsz)
+{
+	struct bit_entry bit_P;
+	u16 cstep = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			cstep = nv_ro16(bios, bit_P.offset + 0x34);
+
+		if (cstep) {
+			*ver = nv_ro08(bios, cstep + 0);
+			switch (*ver) {
+			case 0x10:
+				*hdr = nv_ro08(bios, cstep + 1);
+				*cnt = nv_ro08(bios, cstep + 3);
+				*len = nv_ro08(bios, cstep + 2);
+				*xnr = nv_ro08(bios, cstep + 5);
+				*xsz = nv_ro08(bios, cstep + 4);
+				return cstep;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_cstepEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+	u8  cnt, len, xnr, xsz;
+	u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+	if (data && idx < cnt) {
+		data = data + *hdr + (idx * len);
+		*hdr = len;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_cstepEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+	       struct nvbios_cstepE *info)
+{
+	u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->index   = nv_ro08(bios, data + 0x03);
+	}
+	return data;
+}
+
+u16
+nvbios_cstepEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr,
+	       struct nvbios_cstepE *info)
+{
+	u32 data, idx = 0;
+	while ((data = nvbios_cstepEp(bios, idx++, ver, hdr, info))) {
+		if (info->pstate == pstate)
+			break;
+	}
+	return data;
+}
+
+u16
+nvbios_cstepXe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
+{
+	u8  cnt, len, xnr, xsz;
+	u16 data = nvbios_cstepTe(bios, ver, hdr, &cnt, &len, &xnr, &xsz);
+	if (data && idx < xnr) {
+		data = data + *hdr + (cnt * len) + (idx * xsz);
+		*hdr = xsz;
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_cstepXp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
+	       struct nvbios_cstepX *info)
+{
+	u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
+	memset(info, 0x00, sizeof(*info));
+	if (data) {
+		info->freq    = nv_ro16(bios, data + 0x00) * 1000;
+		info->unkn[0] = nv_ro08(bios, data + 0x02);
+		info->unkn[1] = nv_ro08(bios, data + 0x03);
+		info->voltage = nv_ro08(bios, data + 0x04);
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 663853b..7628fe7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -89,6 +89,7 @@
 		   struct nvbios_dpout *info)
 {
 	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
 	if (data && *ver) {
 		info->type = nv_ro16(bios, data + 0x00);
 		info->mask = nv_ro16(bios, data + 0x02);
@@ -99,9 +100,12 @@
 			info->script[0] = nv_ro16(bios, data + 0x06);
 			info->script[1] = nv_ro16(bios, data + 0x08);
 			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
-			info->script[2] = nv_ro16(bios, data + 0x0c);
-			info->script[3] = nv_ro16(bios, data + 0x0e);
-			info->script[4] = nv_ro16(bios, data + 0x10);
+			if (*len >= 0x0f) {
+				info->script[2] = nv_ro16(bios, data + 0x0c);
+				info->script[3] = nv_ro16(bios, data + 0x0e);
+			}
+			if (*len >= 0x11)
+				info->script[4] = nv_ro16(bios, data + 0x10);
 			break;
 		case 0x40:
 			info->flags     = nv_ro08(bios, data + 0x04);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 57cda2a..420908c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2180,7 +2180,7 @@
 	u16 data;
 
 	if (execute)
-		nv_suspend(bios, "running init tables\n");
+		nv_info(bios, "running init tables\n");
 	while (!ret && (data = (init_script(bios, ++i)))) {
 		struct nvbios_init init = {
 			.subdev = subdev,
@@ -2210,5 +2210,5 @@
 		ret = nvbios_exec(&init);
 	}
 
-	return 0;
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
index bcbb056..675e221 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -26,8 +26,9 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/perf.h>
 
-static u16
-perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+u16
+nvbios_perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+		  u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
 	struct bit_entry bit_P;
 	u16 perf = 0x0000;
@@ -38,10 +39,22 @@
 			if (perf) {
 				*ver = nv_ro08(bios, perf + 0);
 				*hdr = nv_ro08(bios, perf + 1);
+				if (*ver >= 0x40 && *ver < 0x41) {
+					*cnt = nv_ro08(bios, perf + 5);
+					*len = nv_ro08(bios, perf + 2);
+					*snr = nv_ro08(bios, perf + 4);
+					*ssz = nv_ro08(bios, perf + 3);
+					return perf;
+				} else
+				if (*ver >= 0x20 && *ver < 0x40) {
+					*cnt = nv_ro08(bios, perf + 2);
+					*len = nv_ro08(bios, perf + 3);
+					*snr = nv_ro08(bios, perf + 4);
+					*ssz = nv_ro08(bios, perf + 5);
+					return perf;
+				}
 			}
-		} else
-			nv_error(bios, "unknown offset for perf in BIT P %d\n",
-				bit_P.version);
+		}
 	}
 
 	if (bios->bmp_offset) {
@@ -50,19 +63,132 @@
 			if (perf) {
 				*hdr = nv_ro08(bios, perf + 0);
 				*ver = nv_ro08(bios, perf + 1);
+				*cnt = nv_ro08(bios, perf + 2);
+				*len = nv_ro08(bios, perf + 3);
+				*snr = 0;
+				*ssz = 0;
+				return perf;
 			}
 		}
 	}
 
+	return 0x0000;
+}
+
+u16
+nvbios_perf_entry(struct nouveau_bios *bios, int idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8  snr, ssz;
+	u16 perf = nvbios_perf_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (perf && idx < *cnt) {
+		perf = perf + *hdr + (idx * (*len + (snr * ssz)));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return perf;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_perfEp(struct nouveau_bios *bios, int idx,
+	      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	      struct nvbios_perfE *info)
+{
+	u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	info->pstate = nv_ro08(bios, perf + 0x00);
+	switch (!!perf * *ver) {
+	case 0x12:
+	case 0x13:
+	case 0x14:
+		info->core     = nv_ro32(bios, perf + 0x01) * 10;
+		info->memory   = nv_ro32(bios, perf + 0x05) * 20;
+		info->fanspeed = nv_ro08(bios, perf + 0x37);
+		if (*hdr > 0x38)
+			info->voltage = nv_ro08(bios, perf + 0x38);
+		break;
+	case 0x21:
+	case 0x23:
+	case 0x24:
+		info->fanspeed = nv_ro08(bios, perf + 0x04);
+		info->voltage  = nv_ro08(bios, perf + 0x05);
+		info->shader   = nv_ro16(bios, perf + 0x06) * 1000;
+		info->core     = info->shader + (signed char)
+				 nv_ro08(bios, perf + 0x08) * 1000;
+		switch (nv_device(bios)->chipset) {
+		case 0x49:
+		case 0x4b:
+			info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+			break;
+		default:
+			info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+			break;
+		}
+		break;
+	case 0x25:
+		info->fanspeed = nv_ro08(bios, perf + 0x04);
+		info->voltage  = nv_ro08(bios, perf + 0x05);
+		info->core     = nv_ro16(bios, perf + 0x06) * 1000;
+		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
+		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		break;
+	case 0x30:
+		info->script   = nv_ro16(bios, perf + 0x02);
+	case 0x35:
+		info->fanspeed = nv_ro08(bios, perf + 0x06);
+		info->voltage  = nv_ro08(bios, perf + 0x07);
+		info->core     = nv_ro16(bios, perf + 0x08) * 1000;
+		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
+		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		info->vdec     = nv_ro16(bios, perf + 0x10) * 1000;
+		info->disp     = nv_ro16(bios, perf + 0x14) * 1000;
+		break;
+	case 0x40:
+		info->voltage  = nv_ro08(bios, perf + 0x02);
+		break;
+	default:
+		return 0x0000;
+	}
 	return perf;
 }
 
+u32
+nvbios_perfSe(struct nouveau_bios *bios, u32 perfE, int idx,
+	      u8 *ver, u8 *hdr, u8 cnt, u8 len)
+{
+	u32 data = 0x00000000;
+	if (idx < cnt) {
+		data = perfE + *hdr + (idx * len);
+		*hdr = len;
+	}
+	return data;
+}
+
+u32
+nvbios_perfSp(struct nouveau_bios *bios, u32 perfE, int idx,
+	      u8 *ver, u8 *hdr, u8 cnt, u8 len,
+	      struct nvbios_perfS *info)
+{
+	u32 data = nvbios_perfSe(bios, perfE, idx, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!data * *ver) {
+	case 0x40:
+		info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+		break;
+	default:
+		break;
+	}
+	return data;
+}
+
 int
 nvbios_perf_fan_parse(struct nouveau_bios *bios,
 		      struct nvbios_perf_fan *fan)
 {
-	u8 ver = 0, hdr = 0, cnt = 0, len = 0;
-	u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
+	u8  ver, hdr, cnt, len, snr, ssz;
+	u16 perf = nvbios_perf_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
 	if (!perf)
 		return -ENODEV;
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index f835501..1f76de5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -114,6 +114,7 @@
 	switch (nv_device(bios)->card_type) {
 	case NV_04:
 	case NV_10:
+	case NV_11:
 	case NV_20:
 	case NV_30:
 		return nv04_pll_mapping;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
new file mode 100644
index 0000000..916fa9d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/rammap.h>
+
+u16
+nvbios_rammap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr,
+		    u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
+{
+	struct bit_entry bit_P;
+	u16 rammap = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			rammap = nv_ro16(bios, bit_P.offset + 4);
+
+		if (rammap) {
+			*ver = nv_ro08(bios, rammap + 0);
+			switch (*ver) {
+			case 0x10:
+			case 0x11:
+				*hdr = nv_ro08(bios, rammap + 1);
+				*cnt = nv_ro08(bios, rammap + 5);
+				*len = nv_ro08(bios, rammap + 2);
+				*snr = nv_ro08(bios, rammap + 4);
+				*ssz = nv_ro08(bios, rammap + 3);
+				return rammap;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_rammap_entry(struct nouveau_bios *bios, int idx,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8  snr, ssz;
+	u16 rammap = nvbios_rammap_table(bios, ver, hdr, cnt, len, &snr, &ssz);
+	if (rammap && idx < *cnt) {
+		rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
+		*hdr = *len;
+		*cnt = snr;
+		*len = ssz;
+		return rammap;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_rammap_match(struct nouveau_bios *bios, u16 khz,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	int idx = 0;
+	u32 data;
+	while ((data = nvbios_rammap_entry(bios, idx++, ver, hdr, cnt, len))) {
+		if (khz >= nv_ro16(bios, data + 0x00) &&
+		    khz <= nv_ro16(bios, data + 0x02))
+			break;
+	}
+	return data;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
new file mode 100644
index 0000000..151c2d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/timing.h>
+
+u16
+nvbios_timing_table(struct nouveau_bios *bios,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 timing = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 1)
+			timing = nv_ro16(bios, bit_P.offset + 4);
+		else
+		if (bit_P.version == 2)
+			timing = nv_ro16(bios, bit_P.offset + 8);
+
+		if (timing) {
+			*ver = nv_ro08(bios, timing + 0);
+			switch (*ver) {
+			case 0x10:
+				*hdr = nv_ro08(bios, timing + 1);
+				*cnt = nv_ro08(bios, timing + 2);
+				*len = nv_ro08(bios, timing + 3);
+				return timing;
+			case 0x20:
+				*hdr = nv_ro08(bios, timing + 1);
+				*cnt = nv_ro08(bios, timing + 3);
+				*len = nv_ro08(bios, timing + 2);
+				return timing;
+			default:
+				break;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_timing_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 timing = nvbios_timing_table(bios, ver, &hdr, &cnt, len);
+	if (timing && idx < cnt)
+		return timing + hdr + (idx * *len);
+	return 0x0000;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
new file mode 100644
index 0000000..f343a1b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/vmap.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/vmap.h>
+
+u16
+nvbios_vmap_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 vmap = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2) {
+			vmap = nv_ro16(bios, bit_P.offset + 0x20);
+			if (vmap) {
+				*ver = nv_ro08(bios, vmap + 0);
+				switch (*ver) {
+				case 0x10:
+				case 0x20:
+					*hdr = nv_ro08(bios, vmap + 1);
+					*cnt = nv_ro08(bios, vmap + 3);
+					*len = nv_ro08(bios, vmap + 2);
+					return vmap;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_vmap_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_vmap *info)
+{
+	u16 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!vmap * *ver) {
+	case 0x10:
+	case 0x20:
+		break;
+	}
+	return vmap;
+}
+
+u16
+nvbios_vmap_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 vmap = nvbios_vmap_table(bios, ver, &hdr, &cnt, len);
+	if (vmap && idx < cnt) {
+		vmap = vmap + hdr + (idx * *len);
+		return vmap;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_vmap_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+			struct nvbios_vmap_entry *info)
+{
+	u16 vmap = nvbios_vmap_entry(bios, idx, ver, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!vmap * *ver) {
+	case 0x10:
+		info->link   = 0xff;
+		info->min    = nv_ro32(bios, vmap + 0x00);
+		info->max    = nv_ro32(bios, vmap + 0x04);
+		info->arg[0] = nv_ro32(bios, vmap + 0x08);
+		info->arg[1] = nv_ro32(bios, vmap + 0x0c);
+		info->arg[2] = nv_ro32(bios, vmap + 0x10);
+		break;
+	case 0x20:
+		info->unk0   = nv_ro08(bios, vmap + 0x00);
+		info->link   = nv_ro08(bios, vmap + 0x01);
+		info->min    = nv_ro32(bios, vmap + 0x02);
+		info->max    = nv_ro32(bios, vmap + 0x06);
+		info->arg[0] = nv_ro32(bios, vmap + 0x0a);
+		info->arg[1] = nv_ro32(bios, vmap + 0x0e);
+		info->arg[2] = nv_ro32(bios, vmap + 0x12);
+		info->arg[3] = nv_ro32(bios, vmap + 0x16);
+		info->arg[4] = nv_ro32(bios, vmap + 0x1a);
+		info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+		break;
+	}
+	return vmap;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
new file mode 100644
index 0000000..bb590de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/volt.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/volt.h>
+
+u16
+nvbios_volt_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 volt = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 2)
+			volt = nv_ro16(bios, bit_P.offset + 0x0c);
+		else
+		if (bit_P.version == 1)
+			volt = nv_ro16(bios, bit_P.offset + 0x10);
+
+		if (volt) {
+			*ver = nv_ro08(bios, volt + 0);
+			switch (*ver) {
+			case 0x12:
+				*hdr = 5;
+				*cnt = nv_ro08(bios, volt + 2);
+				*len = nv_ro08(bios, volt + 1);
+				return volt;
+			case 0x20:
+				*hdr = nv_ro08(bios, volt + 1);
+				*cnt = nv_ro08(bios, volt + 2);
+				*len = nv_ro08(bios, volt + 3);
+				return volt;
+			case 0x30:
+			case 0x40:
+			case 0x50:
+				*hdr = nv_ro08(bios, volt + 1);
+				*cnt = nv_ro08(bios, volt + 3);
+				*len = nv_ro08(bios, volt + 2);
+				return volt;
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_volt_parse(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_volt *info)
+{
+	u16 volt = nvbios_volt_table(bios, ver, hdr, cnt, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!volt * *ver) {
+	case 0x12:
+		info->vidmask = nv_ro08(bios, volt + 0x04);
+		break;
+	case 0x20:
+		info->vidmask = nv_ro08(bios, volt + 0x05);
+		break;
+	case 0x30:
+		info->vidmask = nv_ro08(bios, volt + 0x04);
+		break;
+	case 0x40:
+		info->base    = nv_ro32(bios, volt + 0x04);
+		info->step    = nv_ro16(bios, volt + 0x08);
+		info->vidmask = nv_ro08(bios, volt + 0x0b);
+		/*XXX*/
+		info->min     = 0;
+		info->max     = info->base;
+		break;
+	case 0x50:
+		info->vidmask = nv_ro08(bios, volt + 0x06);
+		info->min     = nv_ro32(bios, volt + 0x0a);
+		info->max     = nv_ro32(bios, volt + 0x0e);
+		info->base    = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
+		info->step    = nv_ro16(bios, volt + 0x16);
+		break;
+	}
+	return volt;
+}
+
+u16
+nvbios_volt_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 volt = nvbios_volt_table(bios, ver, &hdr, &cnt, len);
+	if (volt && idx < cnt) {
+		volt = volt + hdr + (idx * *len);
+		return volt;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_volt_entry_parse(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len,
+			struct nvbios_volt_entry *info)
+{
+	u16 volt = nvbios_volt_entry(bios, idx, ver, len);
+	memset(info, 0x00, sizeof(*info));
+	switch (!!volt * *ver) {
+	case 0x12:
+	case 0x20:
+		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
+		info->vid     = nv_ro08(bios, volt + 0x01);
+		break;
+	case 0x30:
+		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
+		info->vid     = nv_ro08(bios, volt + 0x01) >> 2;
+		break;
+	case 0x40:
+	case 0x50:
+		break;
+	}
+	return volt;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
new file mode 100644
index 0000000..f757470
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/timer.h>
+#include <subdev/bus.h>
+
+struct nouveau_hwsq {
+	struct nouveau_bus *pbus;
+	u32 addr;
+	u32 data;
+	struct {
+		u8 data[512];
+		u8 size;
+	} c;
+};
+
+static void
+hwsq_cmd(struct nouveau_hwsq *hwsq, int size, u8 data[])
+{
+	memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
+	hwsq->c.size += size;
+}
+
+int
+nouveau_hwsq_init(struct nouveau_bus *pbus, struct nouveau_hwsq **phwsq)
+{
+	struct nouveau_hwsq *hwsq;
+
+	hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
+	if (hwsq) {
+		hwsq->pbus = pbus;
+		hwsq->addr = ~0;
+		hwsq->data = ~0;
+		memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
+		hwsq->c.size = 0;
+	}
+
+	return hwsq ? 0 : -ENOMEM;
+}
+
+int
+nouveau_hwsq_fini(struct nouveau_hwsq **phwsq, bool exec)
+{
+	struct nouveau_hwsq *hwsq = *phwsq;
+	int ret = 0, i;
+	if (hwsq) {
+		struct nouveau_bus *pbus = hwsq->pbus;
+		hwsq->c.size = (hwsq->c.size + 4) / 4;
+		if (hwsq->c.size <= pbus->hwsq_size) {
+			if (exec)
+				ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
+						      hwsq->c.size);
+			if (ret)
+				nv_error(pbus, "hwsq exec failed: %d\n", ret);
+		} else {
+			nv_error(pbus, "hwsq ucode too large\n");
+			ret = -ENOSPC;
+		}
+
+		for (i = 0; ret && i < hwsq->c.size; i++)
+			nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+
+		*phwsq = NULL;
+		kfree(hwsq);
+	}
+	return ret;
+}
+
+void
+nouveau_hwsq_wr32(struct nouveau_hwsq *hwsq, u32 addr, u32 data)
+{
+	nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+
+	if (hwsq->data != data) {
+		if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
+			hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
+						  data >> 16, data >> 24 });
+		} else {
+			hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
+		}
+	}
+
+	if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
+		hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
+					  addr >> 16, addr >> 24 });
+	} else {
+		hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
+	}
+
+	hwsq->addr = addr;
+	hwsq->data = data;
+}
+
+void
+nouveau_hwsq_setf(struct nouveau_hwsq *hwsq, u8 flag, int data)
+{
+	nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+	flag += 0x80;
+	if (data >= 0)
+		flag += 0x20;
+	if (data >= 1)
+		flag += 0x20;
+	hwsq_cmd(hwsq, 1, (u8[]){ flag });
+}
+
+void
+nouveau_hwsq_wait(struct nouveau_hwsq *hwsq, u8 flag, u8 data)
+{
+	nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+	hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
+}
+
+void
+nouveau_hwsq_nsec(struct nouveau_hwsq *hwsq, u32 nsec)
+{
+	u8 shift = 0, usec = nsec / 1000;
+	while (usec & ~3) {
+		usec >>= 2;
+		shift++;
+	}
+
+	nv_debug(hwsq->pbus, "    DELAY = %d ns\n", nsec);
+	hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
new file mode 100644
index 0000000..12176f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/hwsq.h
@@ -0,0 +1,113 @@
+#ifndef __NVKM_BUS_HWSQ_H__
+#define __NVKM_BUS_HWSQ_H__
+
+#include <subdev/bus.h>
+
+struct hwsq {
+	struct nouveau_subdev *subdev;
+	struct nouveau_hwsq *hwsq;
+	int sequence;
+};
+
+struct hwsq_reg {
+	int sequence;
+	bool force;
+	u32 addr[2];
+	u32 data;
+};
+
+static inline struct hwsq_reg
+hwsq_reg2(u32 addr1, u32 addr2)
+{
+	return (struct hwsq_reg) {
+		.sequence = 0,
+		.force = 0,
+		.addr = { addr1, addr2 },
+		.data = 0xdeadbeef,
+	};
+}
+
+static inline struct hwsq_reg
+hwsq_reg(u32 addr)
+{
+	return hwsq_reg2(addr, addr);
+}
+
+static inline int
+hwsq_init(struct hwsq *ram, struct nouveau_subdev *subdev)
+{
+	struct nouveau_bus *pbus = nouveau_bus(subdev);
+	int ret;
+
+	ret = nouveau_hwsq_init(pbus, &ram->hwsq);
+	if (ret)
+		return ret;
+
+	ram->sequence++;
+	ram->subdev = subdev;
+	return 0;
+}
+
+static inline int
+hwsq_exec(struct hwsq *ram, bool exec)
+{
+	int ret = 0;
+	if (ram->subdev) {
+		ret = nouveau_hwsq_fini(&ram->hwsq, exec);
+		ram->subdev = NULL;
+	}
+	return ret;
+}
+
+static inline u32
+hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
+{
+	if (reg->sequence != ram->sequence)
+		reg->data = nv_rd32(ram->subdev, reg->addr[0]);
+	return reg->data;
+}
+
+static inline void
+hwsq_wr32(struct hwsq *ram, struct hwsq_reg *reg, u32 data)
+{
+	reg->sequence = ram->sequence;
+	reg->data = data;
+	if (reg->addr[0] != reg->addr[1])
+		nouveau_hwsq_wr32(ram->hwsq, reg->addr[1], reg->data);
+	nouveau_hwsq_wr32(ram->hwsq, reg->addr[0], reg->data);
+}
+
+static inline void
+hwsq_nuke(struct hwsq *ram, struct hwsq_reg *reg)
+{
+	reg->force = true;
+}
+
+static inline u32
+hwsq_mask(struct hwsq *ram, struct hwsq_reg *reg, u32 mask, u32 data)
+{
+	u32 temp = hwsq_rd32(ram, reg);
+	if (temp != ((temp & ~mask) | data) || reg->force)
+		hwsq_wr32(ram, reg, (temp & ~mask) | data);
+	return temp;
+}
+
+static inline void
+hwsq_setf(struct hwsq *ram, u8 flag, int data)
+{
+	nouveau_hwsq_setf(ram->hwsq, flag, data);
+}
+
+static inline void
+hwsq_wait(struct hwsq *ram, u8 flag, u8 data)
+{
+	nouveau_hwsq_wait(ram->hwsq, flag, data);
+}
+
+static inline void
+hwsq_nsec(struct hwsq *ram, u32 nsec)
+{
+	nouveau_hwsq_nsec(ram->hwsq, nsec);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
index 8c7f805..23921b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -23,11 +23,7 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
 static void
 nv04_bus_intr(struct nouveau_subdev *subdev)
@@ -56,23 +52,6 @@
 }
 
 static int
-nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nv04_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nv04_bus_intr;
-	return 0;
-}
-
-static int
 nv04_bus_init(struct nouveau_object *object)
 {
 	struct nv04_bus_priv *priv = (void *)object;
@@ -83,13 +62,34 @@
 	return nouveau_bus_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv04_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+int
+nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv04_bus_impl *impl = (void *)oclass;
+	struct nv04_bus_priv *priv;
+	int ret;
+
+	ret = nouveau_bus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = impl->intr;
+	priv->base.hwsq_exec = impl->hwsq_exec;
+	priv->base.hwsq_size = impl->hwsq_size;
+	return 0;
+}
+
+struct nouveau_oclass *
+nv04_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x04),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nv04_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nv04_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
new file mode 100644
index 0000000..4d76024
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_BUS_NV04_H__
+#define __NVKM_BUS_NV04_H__
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+	struct nouveau_bus base;
+};
+
+int  nv04_bus_ctor(struct nouveau_object *, struct nouveau_object *,
+		   struct nouveau_oclass *, void *, u32,
+		   struct nouveau_object **);
+int  nv50_bus_init(struct nouveau_object *);
+void nv50_bus_intr(struct nouveau_subdev *);
+
+struct nv04_bus_impl {
+	struct nouveau_oclass base;
+	void (*intr)(struct nouveau_subdev *);
+	int  (*hwsq_exec)(struct nouveau_bus *, u32 *, u32);
+	u32  hwsq_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
index 34132ae..94da46f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -23,11 +23,7 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
-
-struct nv31_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
 static void
 nv31_bus_intr(struct nouveau_subdev *subdev)
@@ -71,7 +67,7 @@
 static int
 nv31_bus_init(struct nouveau_object *object)
 {
-	struct nv31_bus_priv *priv = (void *)object;
+	struct nv04_bus_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_bus_init(&priv->base);
@@ -83,30 +79,14 @@
 	return 0;
 }
 
-static int
-nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nv31_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nv31_bus_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nv31_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0x31),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv31_bus_ctor,
+struct nouveau_oclass *
+nv31_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x31),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nv31_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nv31_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
index f5b2117..11918f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -23,13 +23,27 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
+#include <subdev/timer.h>
 
-struct nv50_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
-static void
+static int
+nv50_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+{
+	struct nv50_bus_priv *priv = (void *)pbus;
+	int i;
+
+	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
+	nv_wr32(pbus, 0x001304, 0x00000000);
+	for (i = 0; i < size; i++)
+		nv_wr32(priv, 0x001400 + (i * 4), data[i]);
+	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
+	nv_wr32(pbus, 0x00130c, 0x00000003);
+
+	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+}
+
+void
 nv50_bus_intr(struct nouveau_subdev *subdev)
 {
 	struct nouveau_bus *pbus = nouveau_bus(subdev);
@@ -61,10 +75,10 @@
 	}
 }
 
-static int
+int
 nv50_bus_init(struct nouveau_object *object)
 {
-	struct nv50_bus_priv *priv = (void *)object;
+	struct nv04_bus_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_bus_init(&priv->base);
@@ -76,30 +90,16 @@
 	return 0;
 }
 
-static int
-nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nv50_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nv50_bus_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nv50_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_bus_ctor,
+struct nouveau_oclass *
+nv50_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nv50_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nv50_bus_intr,
+	.hwsq_exec = nv50_bus_hwsq_exec,
+	.hwsq_size = 64,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
new file mode 100644
index 0000000..d365905
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv94.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ *          Ben Skeggs
+ */
+
+#include <subdev/timer.h>
+
+#include "nv04.h"
+
+static int
+nv94_bus_hwsq_exec(struct nouveau_bus *pbus, u32 *data, u32 size)
+{
+	struct nv50_bus_priv *priv = (void *)pbus;
+	int i;
+
+	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
+	nv_wr32(pbus, 0x001304, 0x00000000);
+	nv_wr32(pbus, 0x001318, 0x00000000);
+	for (i = 0; i < size; i++)
+		nv_wr32(priv, 0x080000 + (i * 4), data[i]);
+	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
+	nv_wr32(pbus, 0x00130c, 0x00000001);
+
+	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+}
+
+struct nouveau_oclass *
+nv94_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0x94),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
+		.dtor = _nouveau_bus_dtor,
+		.init = nv50_bus_init,
+		.fini = _nouveau_bus_fini,
+	},
+	.intr = nv50_bus_intr,
+	.hwsq_exec = nv94_bus_hwsq_exec,
+	.hwsq_size = 128,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
index b192d62..73839d7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -23,11 +23,7 @@
  *          Ben Skeggs
  */
 
-#include <subdev/bus.h>
-
-struct nvc0_bus_priv {
-	struct nouveau_bus base;
-};
+#include "nv04.h"
 
 static void
 nvc0_bus_intr(struct nouveau_subdev *subdev)
@@ -60,7 +56,7 @@
 static int
 nvc0_bus_init(struct nouveau_object *object)
 {
-	struct nvc0_bus_priv *priv = (void *)object;
+	struct nv04_bus_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_bus_init(&priv->base);
@@ -72,30 +68,14 @@
 	return 0;
 }
 
-static int
-nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	      struct nouveau_oclass *oclass, void *data, u32 size,
-	      struct nouveau_object **pobject)
-{
-	struct nvc0_bus_priv *priv;
-	int ret;
-
-	ret = nouveau_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = nvc0_bus_intr;
-	return 0;
-}
-
-struct nouveau_oclass
-nvc0_bus_oclass = {
-	.handle = NV_SUBDEV(BUS, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_bus_ctor,
+struct nouveau_oclass *
+nvc0_bus_oclass = &(struct nv04_bus_impl) {
+	.base.handle = NV_SUBDEV(BUS, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
 		.dtor = _nouveau_bus_dtor,
 		.init = nvc0_bus_init,
 		.fini = _nouveau_bus_fini,
 	},
-};
+	.intr = nvc0_bus_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
new file mode 100644
index 0000000..e2938a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/volt.h>
+#include <subdev/fb.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/boost.h>
+#include <subdev/bios/cstep.h>
+#include <subdev/bios/perf.h>
+
+/******************************************************************************
+ * misc
+ *****************************************************************************/
+static u32
+nouveau_clock_adjust(struct nouveau_clock *clk, bool adjust,
+		     u8 pstate, u8 domain, u32 input)
+{
+	struct nouveau_bios *bios = nouveau_bios(clk);
+	struct nvbios_boostE boostE;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+
+	data = nvbios_boostEm(bios, pstate, &ver, &hdr, &cnt, &len, &boostE);
+	if (data) {
+		struct nvbios_boostS boostS;
+		u8  idx = 0, sver, shdr;
+		u16 subd;
+
+		input = max(boostE.min, input);
+		input = min(boostE.max, input);
+		do {
+			sver = ver;
+			shdr = hdr;
+			subd = nvbios_boostSp(bios, idx++, data, &sver, &shdr,
+					      cnt, len, &boostS);
+			if (subd && boostS.domain == domain) {
+				if (adjust)
+					input = input * boostS.percent / 100;
+				input = max(boostS.min, input);
+				input = min(boostS.max, input);
+				break;
+			}
+		} while (subd);
+	}
+
+	return input;
+}
+
+/******************************************************************************
+ * C-States
+ *****************************************************************************/
+static int
+nouveau_cstate_prog(struct nouveau_clock *clk,
+		    struct nouveau_pstate *pstate, int cstatei)
+{
+	struct nouveau_therm *ptherm = nouveau_therm(clk);
+	struct nouveau_volt *volt = nouveau_volt(clk);
+	struct nouveau_cstate *cstate;
+	int ret;
+
+	if (!list_empty(&pstate->list)) {
+		cstate = list_entry(pstate->list.prev, typeof(*cstate), head);
+	} else {
+		cstate = &pstate->base;
+	}
+
+	ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
+	if (ret && ret != -ENODEV) {
+		nv_error(clk, "failed to raise fan speed: %d\n", ret);
+		return ret;
+	}
+
+	ret = volt->set_id(volt, cstate->voltage, +1);
+	if (ret && ret != -ENODEV) {
+		nv_error(clk, "failed to raise voltage: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk->calc(clk, cstate);
+	if (ret == 0) {
+		ret = clk->prog(clk);
+		clk->tidy(clk);
+	}
+
+	ret = volt->set_id(volt, cstate->voltage, -1);
+	if (ret && ret != -ENODEV)
+		nv_error(clk, "failed to lower voltage: %d\n", ret);
+
+	ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
+	if (ret && ret != -ENODEV)
+		nv_error(clk, "failed to lower fan speed: %d\n", ret);
+
+	return 0;
+}
+
+static void
+nouveau_cstate_del(struct nouveau_cstate *cstate)
+{
+	list_del(&cstate->head);
+	kfree(cstate);
+}
+
+static int
+nouveau_cstate_new(struct nouveau_clock *clk, int idx,
+		   struct nouveau_pstate *pstate)
+{
+	struct nouveau_bios *bios = nouveau_bios(clk);
+	struct nouveau_clocks *domain = clk->domains;
+	struct nouveau_cstate *cstate = NULL;
+	struct nvbios_cstepX cstepX;
+	u8  ver, hdr;
+	u16 data;
+
+	data = nvbios_cstepXp(bios, idx, &ver, &hdr, &cstepX);
+	if (!data)
+		return -ENOENT;
+
+	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+	if (!cstate)
+		return -ENOMEM;
+
+	*cstate = pstate->base;
+	cstate->voltage = cstepX.voltage;
+
+	while (domain && domain->name != nv_clk_src_max) {
+		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
+			u32 freq = nouveau_clock_adjust(clk, true,
+							pstate->pstate,
+							domain->bios,
+							cstepX.freq);
+			cstate->domain[domain->name] = freq;
+		}
+		domain++;
+	}
+
+	list_add(&cstate->head, &pstate->list);
+	return 0;
+}
+
+/******************************************************************************
+ * P-States
+ *****************************************************************************/
+static int
+nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
+{
+	struct nouveau_fb *pfb = nouveau_fb(clk);
+	struct nouveau_pstate *pstate;
+	int ret, idx = 0;
+
+	list_for_each_entry(pstate, &clk->states, head) {
+		if (idx++ == pstatei)
+			break;
+	}
+
+	nv_debug(clk, "setting performance state %d\n", pstatei);
+	clk->pstate = pstatei;
+
+	if (pfb->ram->calc) {
+		ret = pfb->ram->calc(pfb, pstate->base.domain[nv_clk_src_mem]);
+		if (ret == 0)
+			ret = pfb->ram->prog(pfb);
+		pfb->ram->tidy(pfb);
+	}
+
+	return nouveau_cstate_prog(clk, pstate, 0);
+}
+
+static int
+nouveau_pstate_calc(struct nouveau_clock *clk)
+{
+	int pstate, ret = 0;
+
+	nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
+		 clk->ustate, clk->astate, clk->tstate, clk->dstate);
+
+	if (clk->state_nr && clk->ustate != -1) {
+		pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
+		pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+		pstate = max(pstate, clk->dstate);
+	} else {
+		pstate = clk->pstate = -1;
+	}
+
+	nv_trace(clk, "-> %d\n", pstate);
+	if (pstate != clk->pstate)
+		ret = nouveau_pstate_prog(clk, pstate);
+	return ret;
+}
+
+static void
+nouveau_pstate_info(struct nouveau_clock *clk, struct nouveau_pstate *pstate)
+{
+	struct nouveau_clocks *clock = clk->domains - 1;
+	struct nouveau_cstate *cstate;
+	char info[3][32] = { "", "", "" };
+	char name[4] = "--";
+	int i = -1;
+
+	if (pstate->pstate != 0xff)
+		snprintf(name, sizeof(name), "%02x", pstate->pstate);
+
+	while ((++clock)->name != nv_clk_src_max) {
+		u32 lo = pstate->base.domain[clock->name];
+		u32 hi = lo;
+		if (hi == 0)
+			continue;
+
+		nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+		list_for_each_entry(cstate, &pstate->list, head) {
+			u32 freq = cstate->domain[clock->name];
+			lo = min(lo, freq);
+			hi = max(hi, freq);
+			nv_debug(clk, "%10d KHz\n", freq);
+		}
+
+		if (clock->mname && ++i < ARRAY_SIZE(info)) {
+			lo /= clock->mdiv;
+			hi /= clock->mdiv;
+			if (lo == hi) {
+				snprintf(info[i], sizeof(info[i]), "%s %d MHz",
+					 clock->mname, lo);
+			} else {
+				snprintf(info[i], sizeof(info[i]),
+					 "%s %d-%d MHz", clock->mname, lo, hi);
+			}
+		}
+	}
+
+	nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+}
+
+static void
+nouveau_pstate_del(struct nouveau_pstate *pstate)
+{
+	struct nouveau_cstate *cstate, *temp;
+
+	list_for_each_entry_safe(cstate, temp, &pstate->list, head) {
+		nouveau_cstate_del(cstate);
+	}
+
+	list_del(&pstate->head);
+	kfree(pstate);
+}
+
+static int
+nouveau_pstate_new(struct nouveau_clock *clk, int idx)
+{
+	struct nouveau_bios *bios = nouveau_bios(clk);
+	struct nouveau_clocks *domain = clk->domains - 1;
+	struct nouveau_pstate *pstate;
+	struct nouveau_cstate *cstate;
+	struct nvbios_cstepE cstepE;
+	struct nvbios_perfE perfE;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+
+	data = nvbios_perfEp(bios, idx, &ver, &hdr, &cnt, &len, &perfE);
+	if (!data)
+		return -EINVAL;
+	if (perfE.pstate == 0xff)
+		return 0;
+
+	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+	cstate = &pstate->base;
+	if (!pstate)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pstate->list);
+
+	pstate->pstate = perfE.pstate;
+	pstate->fanspeed = perfE.fanspeed;
+	cstate->voltage = perfE.voltage;
+	cstate->domain[nv_clk_src_core] = perfE.core;
+	cstate->domain[nv_clk_src_shader] = perfE.shader;
+	cstate->domain[nv_clk_src_mem] = perfE.memory;
+	cstate->domain[nv_clk_src_vdec] = perfE.vdec;
+	cstate->domain[nv_clk_src_dom6] = perfE.disp;
+
+	while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) {
+		struct nvbios_perfS perfS;
+		u8  sver = ver, shdr = hdr;
+		u32 perfSe = nvbios_perfSp(bios, data, domain->bios,
+					  &sver, &shdr, cnt, len, &perfS);
+		if (perfSe == 0 || sver != 0x40)
+			continue;
+
+		if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) {
+			perfS.v40.freq = nouveau_clock_adjust(clk, false,
+							      pstate->pstate,
+							      domain->bios,
+							      perfS.v40.freq);
+		}
+
+		cstate->domain[domain->name] = perfS.v40.freq;
+	}
+
+	data = nvbios_cstepEm(bios, pstate->pstate, &ver, &hdr, &cstepE);
+	if (data) {
+		int idx = cstepE.index;
+		do {
+			nouveau_cstate_new(clk, idx, pstate);
+		} while(idx--);
+	}
+
+	nouveau_pstate_info(clk, pstate);
+	list_add_tail(&pstate->head, &clk->states);
+	clk->state_nr++;
+	return 0;
+}
+
+/******************************************************************************
+ * Adjustment triggers
+ *****************************************************************************/
+static int
+nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
+{
+	struct nouveau_pstate *pstate;
+	int i = 0;
+
+	/* YKW repellant */
+	return -ENOSYS;
+
+	if (req != -1 && req != -2) {
+		list_for_each_entry(pstate, &clk->states, head) {
+			if (pstate->pstate == req)
+				break;
+			i++;
+		}
+
+		if (pstate->pstate != req)
+			return -EINVAL;
+		req = i;
+	}
+
+	clk->ustate = req;
+	return 0;
+}
+
+int
+nouveau_clock_ustate(struct nouveau_clock *clk, int req)
+{
+	int ret = nouveau_clock_ustate_update(clk, req);
+	if (ret)
+		return ret;
+	return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
+{
+	if (!rel) clk->astate  = req;
+	if ( rel) clk->astate += rel;
+	clk->astate = min(clk->astate, clk->state_nr - 1);
+	clk->astate = max(clk->astate, 0);
+	return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
+{
+	if (!rel) clk->tstate  = req;
+	if ( rel) clk->tstate += rel;
+	clk->tstate = min(clk->tstate, 0);
+	clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
+	return nouveau_pstate_calc(clk);
+}
+
+int
+nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
+{
+	if (!rel) clk->dstate  = req;
+	if ( rel) clk->dstate += rel;
+	clk->dstate = min(clk->dstate, clk->state_nr - 1);
+	clk->dstate = max(clk->dstate, 0);
+	return nouveau_pstate_calc(clk);
+}
+
+/******************************************************************************
+ * subdev base class implementation
+ *****************************************************************************/
+int
+_nouveau_clock_init(struct nouveau_object *object)
+{
+	struct nouveau_clock *clk = (void *)object;
+	struct nouveau_clocks *clock = clk->domains;
+	int ret;
+
+	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
+	INIT_LIST_HEAD(&clk->bstate.list);
+	clk->bstate.pstate = 0xff;
+
+	while (clock->name != nv_clk_src_max) {
+		ret = clk->read(clk, clock->name);
+		if (ret < 0) {
+			nv_error(clk, "%02x freq unknown\n", clock->name);
+			return ret;
+		}
+		clk->bstate.base.domain[clock->name] = ret;
+		clock++;
+	}
+
+	nouveau_pstate_info(clk, &clk->bstate);
+
+	clk->astate = clk->state_nr - 1;
+	clk->tstate = 0;
+	clk->dstate = 0;
+	clk->pstate = -1;
+	nouveau_pstate_calc(clk);
+	return 0;
+}
+
+void
+_nouveau_clock_dtor(struct nouveau_object *object)
+{
+	struct nouveau_clock *clk = (void *)object;
+	struct nouveau_pstate *pstate, *temp;
+
+	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
+		nouveau_pstate_del(pstate);
+	}
+
+	nouveau_subdev_destroy(&clk->base);
+}
+
+int
+nouveau_clock_create_(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass,
+		      struct nouveau_clocks *clocks,
+		      int length, void **object)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_clock *clk;
+	int ret, idx, arglen;
+	const char *mode;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "CLK",
+				     "clock", length, object);
+	clk = *object;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&clk->states);
+	clk->domains = clocks;
+	clk->ustate = -1;
+
+	idx = 0;
+	do {
+		ret = nouveau_pstate_new(clk, idx++);
+	} while (ret == 0);
+
+	mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
+	if (mode) {
+		if (!strncasecmpz(mode, "disabled", arglen)) {
+			clk->ustate = -1;
+		} else {
+			char save = mode[arglen];
+			long v;
+
+			((char *)mode)[arglen] = '\0';
+			if (!kstrtol(mode, 0, &v))
+				nouveau_clock_ustate_update(clk, v);
+			((char *)mode)[arglen] = save;
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index a142775..da50c1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -77,7 +77,7 @@
 	struct nv04_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, NULL, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 0db5dbf..db7346f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -23,11 +23,188 @@
  */
 
 #include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
 
 struct nv40_clock_priv {
 	struct nouveau_clock base;
+	u32 ctrl;
+	u32 npll_ctrl;
+	u32 npll_coef;
+	u32 spll;
 };
 
+static struct nouveau_clocks
+nv40_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+	{ nv_clk_src_max }
+};
+
+static u32
+read_pll_1(struct nv40_clock_priv *priv, u32 reg)
+{
+	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	int P = (ctrl & 0x00070000) >> 16;
+	int N = (ctrl & 0x0000ff00) >> 8;
+	int M = (ctrl & 0x000000ff) >> 0;
+	u32 ref = 27000, clk = 0;
+
+	if (ctrl & 0x80000000)
+		clk = ref * N / M;
+
+	return clk >> P;
+}
+
+static u32
+read_pll_2(struct nv40_clock_priv *priv, u32 reg)
+{
+	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	u32 coef = nv_rd32(priv, reg + 0x04);
+	int N2 = (coef & 0xff000000) >> 24;
+	int M2 = (coef & 0x00ff0000) >> 16;
+	int N1 = (coef & 0x0000ff00) >> 8;
+	int M1 = (coef & 0x000000ff) >> 0;
+	int P = (ctrl & 0x00070000) >> 16;
+	u32 ref = 27000, clk = 0;
+
+	if ((ctrl & 0x80000000) && M1) {
+		clk = ref * N1 / M1;
+		if ((ctrl & 0x40000100) == 0x40000000) {
+			if (M2)
+				clk = clk * N2 / M2;
+			else
+				clk = 0;
+		}
+	}
+
+	return clk >> P;
+}
+
+static u32
+read_clk(struct nv40_clock_priv *priv, u32 src)
+{
+	switch (src) {
+	case 3:
+		return read_pll_2(priv, 0x004000);
+	case 2:
+		return read_pll_1(priv, 0x004008);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nv40_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nv40_clock_priv *priv = (void *)clk;
+	u32 mast = nv_rd32(priv, 0x00c040);
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return nv_device(priv)->crystal;
+	case nv_clk_src_href:
+		return 100000; /*XXX: PCIE/AGP differ*/
+	case nv_clk_src_core:
+		return read_clk(priv, (mast & 0x00000003) >> 0);
+	case nv_clk_src_shader:
+		return read_clk(priv, (mast & 0x00000030) >> 4);
+	case nv_clk_src_mem:
+		return read_pll_2(priv, 0x4020);
+	default:
+		break;
+	}
+
+	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	return -EINVAL;
+}
+
+static int
+nv40_clock_calc_pll(struct nv40_clock_priv *priv, u32 reg, u32 clk,
+		    int *N1, int *M1, int *N2, int *M2, int *log2P)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll pll;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, reg, &pll);
+	if (ret)
+		return ret;
+
+	if (clk < pll.vco1.max_freq)
+		pll.vco2.max_freq = 0;
+
+	ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+	if (ret == 0)
+		return -ERANGE;
+	return ret;
+}
+
+static int
+nv40_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nv40_clock_priv *priv = (void *)clk;
+	int gclk = cstate->domain[nv_clk_src_core];
+	int sclk = cstate->domain[nv_clk_src_shader];
+	int N1, M1, N2, M2, log2P;
+	int ret;
+
+	/* core/geometric clock */
+	ret = nv40_clock_calc_pll(priv, 0x004000, gclk,
+				 &N1, &M1, &N2, &M2, &log2P);
+	if (ret < 0)
+		return ret;
+
+	if (N2 == M2) {
+		priv->npll_ctrl = 0x80000100 | (log2P << 16);
+		priv->npll_coef = (N1 << 8) | M1;
+	} else {
+		priv->npll_ctrl = 0xc0000000 | (log2P << 16);
+		priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+	}
+
+	/* use the second pll for shader/rop clock, if it differs from core */
+	if (sclk && sclk != gclk) {
+		ret = nv40_clock_calc_pll(priv, 0x004008, sclk,
+					 &N1, &M1, NULL, NULL, &log2P);
+		if (ret < 0)
+			return ret;
+
+		priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+		priv->ctrl = 0x00000223;
+	} else {
+		priv->spll = 0x00000000;
+		priv->ctrl = 0x00000333;
+	}
+
+	return 0;
+}
+
+static int
+nv40_clock_prog(struct nouveau_clock *clk)
+{
+	struct nv40_clock_priv *priv = (void *)clk;
+	nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
+	nv_wr32(priv, 0x004004, priv->npll_coef);
+	nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
+	nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+	mdelay(5);
+	nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+	return 0;
+}
+
+static void
+nv40_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
 static int
 nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		struct nouveau_oclass *oclass, void *data, u32 size,
@@ -36,13 +213,17 @@
 	struct nv40_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
 	priv->base.pll_calc = nv04_clock_pll_calc;
 	priv->base.pll_prog = nv04_clock_pll_prog;
+	priv->base.read = nv40_clock_read;
+	priv->base.calc = nv40_clock_calc;
+	priv->base.prog = nv40_clock_prog;
+	priv->base.tidy = nv40_clock_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index d09d3e7..250a6d9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -22,40 +22,538 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/clock.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
+#include "nv50.h"
 #include "pll.h"
+#include "seq.h"
 
-struct nv50_clock_priv {
-	struct nouveau_clock base;
-};
+static u32
+read_div(struct nv50_clock_priv *priv)
+{
+	switch (nv_device(priv)->chipset) {
+	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+	case 0x84:
+	case 0x86:
+	case 0x98:
+	case 0xa0:
+		return nv_rd32(priv, 0x004700);
+	case 0x92:
+	case 0x94:
+	case 0x96:
+		return nv_rd32(priv, 0x004800);
+	default:
+		return 0x00000000;
+	}
+}
+
+static u32
+read_pll_src(struct nv50_clock_priv *priv, u32 base)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
+	u32 rsel = nv_rd32(priv, 0x00e18c);
+	int P, N, M, id;
+
+	switch (nv_device(priv)->chipset) {
+	case 0x50:
+	case 0xa0:
+		switch (base) {
+		case 0x4020:
+		case 0x4028: id = !!(rsel & 0x00000004); break;
+		case 0x4008: id = !!(rsel & 0x00000008); break;
+		case 0x4030: id = 0; break;
+		default:
+			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			return 0;
+		}
+
+		coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+		ref *=  (coef & 0x01000000) ? 2 : 4;
+		P    =  (coef & 0x00070000) >> 16;
+		N    = ((coef & 0x0000ff00) >> 8) + 1;
+		M    = ((coef & 0x000000ff) >> 0) + 1;
+		break;
+	case 0x84:
+	case 0x86:
+	case 0x92:
+		coef = nv_rd32(priv, 0x00e81c);
+		P    = (coef & 0x00070000) >> 16;
+		N    = (coef & 0x0000ff00) >> 8;
+		M    = (coef & 0x000000ff) >> 0;
+		break;
+	case 0x94:
+	case 0x96:
+	case 0x98:
+		rsel = nv_rd32(priv, 0x00c050);
+		switch (base) {
+		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+		case 0x4030: rsel = 3; break;
+		default:
+			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			return 0;
+		}
+
+		switch (rsel) {
+		case 0: id = 1; break;
+		case 1: return clk->read(clk, nv_clk_src_crystal);
+		case 2: return clk->read(clk, nv_clk_src_href);
+		case 3: id = 0; break;
+		}
+
+		coef =  nv_rd32(priv, 0x00e81c + (id * 0x28));
+		P    = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		P   += (coef & 0x00070000) >> 16;
+		N    = (coef & 0x0000ff00) >> 8;
+		M    = (coef & 0x000000ff) >> 0;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	if (M)
+		return (ref * N / M) >> P;
+	return 0;
+}
+
+static u32
+read_pll_ref(struct nv50_clock_priv *priv, u32 base)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 src, mast = nv_rd32(priv, 0x00c040);
+
+	switch (base) {
+	case 0x004028:
+		src = !!(mast & 0x00200000);
+		break;
+	case 0x004020:
+		src = !!(mast & 0x00400000);
+		break;
+	case 0x004008:
+		src = !!(mast & 0x00010000);
+		break;
+	case 0x004030:
+		src = !!(mast & 0x02000000);
+		break;
+	case 0x00e810:
+		return clk->read(clk, nv_clk_src_crystal);
+	default:
+		nv_error(priv, "bad pll 0x%06x\n", base);
+		return 0;
+	}
+
+	if (src)
+		return clk->read(clk, nv_clk_src_href);
+	return read_pll_src(priv, base);
+}
+
+static u32
+read_pll(struct nv50_clock_priv *priv, u32 base)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 mast = nv_rd32(priv, 0x00c040);
+	u32 ctrl = nv_rd32(priv, base + 0);
+	u32 coef = nv_rd32(priv, base + 4);
+	u32 ref = read_pll_ref(priv, base);
+	u32 freq = 0;
+	int N1, N2, M1, M2;
+
+	if (base == 0x004028 && (mast & 0x00100000)) {
+		/* wtf, appears to only disable post-divider on nva0 */
+		if (nv_device(priv)->chipset != 0xa0)
+			return clk->read(clk, nv_clk_src_dom6);
+	}
+
+	N2 = (coef & 0xff000000) >> 24;
+	M2 = (coef & 0x00ff0000) >> 16;
+	N1 = (coef & 0x0000ff00) >> 8;
+	M1 = (coef & 0x000000ff);
+	if ((ctrl & 0x80000000) && M1) {
+		freq = ref * N1 / M1;
+		if ((ctrl & 0x40000100) == 0x40000000) {
+			if (M2)
+				freq = freq * N2 / M2;
+			else
+				freq = 0;
+		}
+	}
+
+	return freq;
+}
 
 static int
+nv50_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	u32 mast = nv_rd32(priv, 0x00c040);
+	u32 P = 0;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return nv_device(priv)->crystal;
+	case nv_clk_src_href:
+		return 100000; /* PCIE reference clock */
+	case nv_clk_src_hclk:
+		return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+	case nv_clk_src_hclkm3:
+		return clk->read(clk, nv_clk_src_hclk) * 3;
+	case nv_clk_src_hclkm3d2:
+		return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+	case nv_clk_src_host:
+		switch (mast & 0x30000000) {
+		case 0x00000000: return clk->read(clk, nv_clk_src_href);
+		case 0x10000000: break;
+		case 0x20000000: /* !0x50 */
+		case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+		}
+		break;
+	case nv_clk_src_core:
+		if (!(mast & 0x00100000))
+			P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+		switch (mast & 0x00000003) {
+		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
+		case 0x00000002: return read_pll(priv, 0x004020) >> P;
+		case 0x00000003: return read_pll(priv, 0x004028) >> P;
+		}
+		break;
+	case nv_clk_src_shader:
+		P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+		switch (mast & 0x00000030) {
+		case 0x00000000:
+			if (mast & 0x00000080)
+				return clk->read(clk, nv_clk_src_host) >> P;
+			return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000010: break;
+		case 0x00000020: return read_pll(priv, 0x004028) >> P;
+		case 0x00000030: return read_pll(priv, 0x004020) >> P;
+		}
+		break;
+	case nv_clk_src_mem:
+		P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
+		if (nv_rd32(priv, 0x004008) & 0x00000200) {
+			switch (mast & 0x0000c000) {
+			case 0x00000000:
+				return clk->read(clk, nv_clk_src_crystal) >> P;
+			case 0x00008000:
+			case 0x0000c000:
+				return clk->read(clk, nv_clk_src_href) >> P;
+			}
+		} else {
+			return read_pll(priv, 0x004008) >> P;
+		}
+		break;
+	case nv_clk_src_vdec:
+		P = (read_div(priv) & 0x00000700) >> 8;
+		switch (nv_device(priv)->chipset) {
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa0:
+			switch (mast & 0x00000c00) {
+			case 0x00000000:
+				if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
+					return clk->read(clk, nv_clk_src_core) >> P;
+				return clk->read(clk, nv_clk_src_crystal) >> P;
+			case 0x00000400:
+				return 0;
+			case 0x00000800:
+				if (mast & 0x01000000)
+					return read_pll(priv, 0x004028) >> P;
+				return read_pll(priv, 0x004030) >> P;
+			case 0x00000c00:
+				return clk->read(clk, nv_clk_src_core) >> P;
+			}
+			break;
+		case 0x98:
+			switch (mast & 0x00000c00) {
+			case 0x00000000:
+				return clk->read(clk, nv_clk_src_core) >> P;
+			case 0x00000400:
+				return 0;
+			case 0x00000800:
+				return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+			case 0x00000c00:
+				return clk->read(clk, nv_clk_src_mem) >> P;
+			}
+			break;
+		}
+		break;
+	case nv_clk_src_dom6:
+		switch (nv_device(priv)->chipset) {
+		case 0x50:
+		case 0xa0:
+			return read_pll(priv, 0x00e810) >> 2;
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0x98:
+			P = (read_div(priv) & 0x00000007) >> 0;
+			switch (mast & 0x0c000000) {
+			case 0x00000000: return clk->read(clk, nv_clk_src_href);
+			case 0x04000000: break;
+			case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+			case 0x0c000000:
+				return clk->read(clk, nv_clk_src_hclkm3) >> P;
+			}
+			break;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	return -EINVAL;
+}
+
+static u32
+calc_pll(struct nv50_clock_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll pll;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, reg, &pll);
+	if (ret)
+		return 0;
+
+	pll.vco2.max_freq = 0;
+	pll.refclk = read_pll_ref(priv, reg);
+	if (!pll.refclk)
+		return 0;
+
+	return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+	u32 clk0 = src, clk1 = src;
+	for (*div = 0; *div <= 7; (*div)++) {
+		if (clk0 <= target) {
+			clk1 = clk0 << (*div ? 1 : 0);
+			break;
+		}
+		clk0 >>= 1;
+	}
+
+	if (target - clk0 <= clk1 - target)
+		return clk0;
+	(*div)--;
+	return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+	return ((a / 1000) == (b / 1000));
+}
+
+static int
+nv50_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	struct nv50_clock_hwsq *hwsq = &priv->hwsq;
+	const int shader = cstate->domain[nv_clk_src_shader];
+	const int core = cstate->domain[nv_clk_src_core];
+	const int vdec = cstate->domain[nv_clk_src_vdec];
+	const int dom6 = cstate->domain[nv_clk_src_dom6];
+	u32 mastm = 0, mastv = 0;
+	u32 divsm = 0, divsv = 0;
+	int N, M, P1, P2;
+	int freq, out;
+
+	/* prepare a hwsq script from which we'll perform the reclock */
+	out = clk_init(hwsq, nv_subdev(clk));
+	if (out)
+		return out;
+
+	clk_wr32(hwsq, fifo, 0x00000001); /* block fifo */
+	clk_nsec(hwsq, 8000);
+	clk_setf(hwsq, 0x10, 0x00); /* disable fb */
+	clk_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+
+	/* vdec: avoid modifying xpll until we know exactly how the other
+	 * clock domains work, i suspect at least some of them can also be
+	 * tied to xpll...
+	 */
+	if (vdec) {
+		/* see how close we can get using nvclk as a source */
+		freq = calc_div(core, vdec, &P1);
+
+		/* see how close we can get using xpll/hclk as a source */
+		if (nv_device(priv)->chipset != 0x98)
+			out = read_pll(priv, 0x004030);
+		else
+			out = clk->read(clk, nv_clk_src_hclkm3d2);
+		out = calc_div(out, vdec, &P2);
+
+		/* select whichever gets us closest */
+		if (abs(vdec - freq) <= abs(vdec - out)) {
+			if (nv_device(priv)->chipset != 0x98)
+				mastv |= 0x00000c00;
+			divsv |= P1 << 8;
+		} else {
+			mastv |= 0x00000800;
+			divsv |= P2 << 8;
+		}
+
+		mastm |= 0x00000c00;
+		divsm |= 0x00000700;
+	}
+
+	/* dom6: nfi what this is, but we're limited to various combinations
+	 * of the host clock frequency
+	 */
+	if (dom6) {
+		if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+			mastv |= 0x00000000;
+		} else
+		if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+			mastv |= 0x08000000;
+		} else {
+			freq = clk->read(clk, nv_clk_src_hclk) * 3;
+			freq = calc_div(freq, dom6, &P1);
+
+			mastv |= 0x0c000000;
+			divsv |= P1;
+		}
+
+		mastm |= 0x0c000000;
+		divsm |= 0x00000007;
+	}
+
+	/* vdec/dom6: switch to "safe" clocks temporarily, update dividers
+	 * and then switch to target clocks
+	 */
+	clk_mask(hwsq, mast, mastm, 0x00000000);
+	clk_mask(hwsq, divs, divsm, divsv);
+	clk_mask(hwsq, mast, mastm, mastv);
+
+	/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
+	 * sclk to hclk) before reprogramming
+	 */
+	if (nv_device(priv)->chipset < 0x92)
+		clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
+	else
+		clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
+
+	/* core: for the moment at least, always use nvpll */
+	freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+	if (freq == 0)
+		return -ERANGE;
+
+	clk_mask(hwsq, nvpll[0], 0xc03f0100,
+				 0x80000000 | (P1 << 19) | (P1 << 16));
+	clk_mask(hwsq, nvpll[1], 0x0000ffff, (N << 8) | M);
+
+	/* shader: tie to nvclk if possible, otherwise use spll.  have to be
+	 * very careful that the shader clock is at least twice the core, or
+	 * some chipsets will be very unhappy.  i expect most or all of these
+	 * cases will be handled by tying to nvclk, but it's possible there's
+	 * corners
+	 */
+	if (P1-- && shader == (core << 1)) {
+		clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
+		clk_mask(hwsq, mast, 0x00100033, 0x00000023);
+	} else {
+		freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+		if (freq == 0)
+			return -ERANGE;
+
+		clk_mask(hwsq, spll[0], 0xc03f0100,
+					0x80000000 | (P1 << 19) | (P1 << 16));
+		clk_mask(hwsq, spll[1], 0x0000ffff, (N << 8) | M);
+		clk_mask(hwsq, mast, 0x00100033, 0x00000033);
+	}
+
+	/* restore normal operation */
+	clk_setf(hwsq, 0x10, 0x01); /* enable fb */
+	clk_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
+	clk_wr32(hwsq, fifo, 0x00000000); /* un-block fifo */
+	return 0;
+}
+
+static int
+nv50_clock_prog(struct nouveau_clock *clk)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	return clk_exec(&priv->hwsq, true);
+}
+
+static void
+nv50_clock_tidy(struct nouveau_clock *clk)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	clk_exec(&priv->hwsq, false);
+}
+
+int
 nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 		struct nouveau_oclass *oclass, void *data, u32 size,
 		struct nouveau_object **pobject)
 {
+	struct nv50_clock_oclass *pclass = (void *)oclass;
 	struct nv50_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
+				  &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.pll_calc = nv04_clock_pll_calc;
+	priv->hwsq.r_fifo = hwsq_reg(0x002504);
+	priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
+	priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
+	priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+	priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+	switch (nv_device(priv)->chipset) {
+	case 0x92:
+	case 0x94:
+	case 0x96:
+		priv->hwsq.r_divs = hwsq_reg(0x004800);
+		break;
+	default:
+		priv->hwsq.r_divs = hwsq_reg(0x004700);
+		break;
+	}
+	priv->hwsq.r_mast = hwsq_reg(0x00c040);
+
+	priv->base.read = nv50_clock_read;
+	priv->base.calc = nv50_clock_calc;
+	priv->base.prog = nv50_clock_prog;
+	priv->base.tidy = nv50_clock_tidy;
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_clock_oclass = {
-	.handle = NV_SUBDEV(CLOCK, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+static struct nouveau_clocks
+nv50_domains[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+	{ nv_clk_src_max }
+};
+
+struct nouveau_oclass *
+nv50_clock_oclass = &(struct nv50_clock_oclass) {
+	.base.handle = NV_SUBDEV(CLOCK, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_clock_ctor,
 		.dtor = _nouveau_clock_dtor,
 		.init = _nouveau_clock_init,
 		.fini = _nouveau_clock_fini,
 	},
-};
+	.domains = nv50_domains,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
new file mode 100644
index 0000000..f10917d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_CLK_NV50_H__
+#define __NVKM_CLK_NV50_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+#include <subdev/clock.h>
+
+struct nv50_clock_hwsq {
+	struct hwsq base;
+	struct hwsq_reg r_fifo;
+	struct hwsq_reg r_spll[2];
+	struct hwsq_reg r_nvpll[2];
+	struct hwsq_reg r_divs;
+	struct hwsq_reg r_mast;
+};
+
+struct nv50_clock_priv {
+	struct nouveau_clock base;
+	struct nv50_clock_hwsq hwsq;
+};
+
+int  nv50_clock_ctor(struct nouveau_object *, struct nouveau_object *,
+		     struct nouveau_oclass *, void *, u32,
+		     struct nouveau_object **);
+
+struct nv50_clock_oclass {
+	struct nouveau_oclass base;
+	struct nouveau_clocks *domains;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
new file mode 100644
index 0000000..b0b7c14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv84.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nv50.h"
+
+static struct nouveau_clocks
+nv84_domains[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+	{ nv_clk_src_vdec   , 0xff },
+	{ nv_clk_src_max }
+};
+
+struct nouveau_oclass *
+nv84_clock_oclass = &(struct nv50_clock_oclass) {
+	.base.handle = NV_SUBDEV(CLOCK, 0x84),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+	.domains = nv84_domains,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index f074cd2..4f5a137 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -22,33 +22,277 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/clock.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
+#include <subdev/timer.h>
 
 #include "pll.h"
 
+#include "nva3.h"
+
 struct nva3_clock_priv {
 	struct nouveau_clock base;
+	struct nva3_clock_info eng[nv_clk_src_max];
 };
 
-int
-nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
-		    int clk, struct nouveau_pll_vals *pv)
+static u32 read_clk(struct nva3_clock_priv *, int, bool);
+static u32 read_pll(struct nva3_clock_priv *, int, u32);
+
+static u32
+read_vco(struct nva3_clock_priv *priv, int clk)
 {
-	int ret, N, M, P;
+	u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	if ((sctl & 0x00000030) != 0x00000030)
+		return read_pll(priv, 0x41, 0x00e820);
+	return read_pll(priv, 0x42, 0x00e8a0);
+}
 
-	ret = nva3_pll_calc(nv_subdev(clock), info, clk, &N, NULL, &M, &P);
+static u32
+read_clk(struct nva3_clock_priv *priv, int clk, bool ignore_en)
+{
+	u32 sctl, sdiv, sclk;
 
-	if (ret > 0) {
-		pv->refclk = info->refclk;
-		pv->N1 = N;
-		pv->M1 = M;
-		pv->log2P = P;
+	/* refclk for the 0xe8xx plls is a fixed frequency */
+	if (clk >= 0x40) {
+		if (nv_device(priv)->chipset == 0xaf) {
+			/* no joke.. seriously.. sigh.. */
+			return nv_rd32(priv, 0x00471c) * 1000;
+		}
+
+		return nv_device(priv)->crystal;
 	}
+
+	sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	if (!ignore_en && !(sctl & 0x00000100))
+		return 0;
+
+	switch (sctl & 0x00003000) {
+	case 0x00000000:
+		return nv_device(priv)->crystal;
+	case 0x00002000:
+		if (sctl & 0x00000040)
+			return 108000;
+		return 100000;
+	case 0x00003000:
+		sclk = read_vco(priv, clk);
+		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
+		return (sclk * 2) / sdiv;
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_pll(struct nva3_clock_priv *priv, int clk, u32 pll)
+{
+	u32 ctrl = nv_rd32(priv, pll + 0);
+	u32 sclk = 0, P = 1, N = 1, M = 1;
+
+	if (!(ctrl & 0x00000008)) {
+		if (ctrl & 0x00000001) {
+			u32 coef = nv_rd32(priv, pll + 4);
+			M = (coef & 0x000000ff) >> 0;
+			N = (coef & 0x0000ff00) >> 8;
+			P = (coef & 0x003f0000) >> 16;
+
+			/* no post-divider on these.. */
+			if ((pll & 0x00ff00) == 0x00e800)
+				P = 1;
+
+			sclk = read_clk(priv, 0x00 + clk, false);
+		}
+	} else {
+		sclk = read_clk(priv, 0x10 + clk, false);
+	}
+
+	if (M * P)
+		return sclk * N / (M * P);
+	return 0;
+}
+
+static int
+nva3_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return nv_device(priv)->crystal;
+	case nv_clk_src_href:
+		return 100000;
+	case nv_clk_src_core:
+		return read_pll(priv, 0x00, 0x4200);
+	case nv_clk_src_shader:
+		return read_pll(priv, 0x01, 0x4220);
+	case nv_clk_src_mem:
+		return read_pll(priv, 0x02, 0x4000);
+	case nv_clk_src_disp:
+		return read_clk(priv, 0x20, false);
+	case nv_clk_src_vdec:
+		return read_clk(priv, 0x21, false);
+	case nv_clk_src_daemon:
+		return read_clk(priv, 0x25, false);
+	default:
+		nv_error(clk, "invalid clock source %d\n", src);
+		return -EINVAL;
+	}
+}
+
+int
+nva3_clock_info(struct nouveau_clock *clock, int clk, u32 pll, u32 khz,
+		struct nva3_clock_info *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(clock);
+	struct nva3_clock_priv *priv = (void *)clock;
+	struct nvbios_pll limits;
+	u32 oclk, sclk, sdiv;
+	int P, N, M, diff;
+	int ret;
+
+	info->pll = 0;
+	info->clk = 0;
+
+	switch (khz) {
+	case 27000:
+		info->clk = 0x00000100;
+		return khz;
+	case 100000:
+		info->clk = 0x00002100;
+		return khz;
+	case 108000:
+		info->clk = 0x00002140;
+		return khz;
+	default:
+		sclk = read_vco(priv, clk);
+		sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
+		/* if the clock has a PLL attached, and we can get a within
+		 * [-2, 3) MHz of a divider, we'll disable the PLL and use
+		 * the divider instead.
+		 *
+		 * divider can go as low as 2, limited here because NVIDIA
+		 * and the VBIOS on my NVA8 seem to prefer using the PLL
+		 * for 810MHz - is there a good reason?
+		 */
+		if (sdiv > 4) {
+			oclk = (sclk * 2) / sdiv;
+			diff = khz - oclk;
+			if (!pll || (diff >= -2000 && diff < 3000)) {
+				info->clk = (((sdiv - 2) << 16) | 0x00003100);
+				return oclk;
+			}
+		}
+
+		if (!pll)
+			return -ERANGE;
+		break;
+	}
+
+	ret = nvbios_pll_parse(bios, pll, &limits);
+	if (ret)
+		return ret;
+
+	limits.refclk = read_clk(priv, clk - 0x10, true);
+	if (!limits.refclk)
+		return -EINVAL;
+
+	ret = nva3_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+	if (ret >= 0) {
+		info->clk = nv_rd32(priv, 0x4120 + (clk * 4));
+		info->pll = (P << 16) | (N << 8) | M;
+	}
+
+	return ret ? ret : -ERANGE;
+}
+
+static int
+calc_clk(struct nva3_clock_priv *priv, struct nouveau_cstate *cstate,
+	 int clk, u32 pll, int idx)
+{
+	int ret = nva3_clock_info(&priv->base, clk, pll, cstate->domain[idx],
+				  &priv->eng[idx]);
+	if (ret >= 0)
+		return 0;
 	return ret;
 }
 
+static void
+prog_pll(struct nva3_clock_priv *priv, int clk, u32 pll, int idx)
+{
+	struct nva3_clock_info *info = &priv->eng[idx];
+	const u32 src0 = 0x004120 + (clk * 4);
+	const u32 src1 = 0x004160 + (clk * 4);
+	const u32 ctrl = pll + 0;
+	const u32 coef = pll + 4;
+
+	if (info->pll) {
+		nv_mask(priv, src0, 0x00000101, 0x00000101);
+		nv_wr32(priv, coef, info->pll);
+		nv_mask(priv, ctrl, 0x00000015, 0x00000015);
+		nv_mask(priv, ctrl, 0x00000010, 0x00000000);
+		nv_wait(priv, ctrl, 0x00020000, 0x00020000);
+		nv_mask(priv, ctrl, 0x00000010, 0x00000010);
+		nv_mask(priv, ctrl, 0x00000008, 0x00000000);
+		nv_mask(priv, src1, 0x00000100, 0x00000000);
+		nv_mask(priv, src1, 0x00000001, 0x00000000);
+	} else {
+		nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
+		nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+		udelay(20);
+		nv_mask(priv, ctrl, 0x00000001, 0x00000000);
+		nv_mask(priv, src0, 0x00000100, 0x00000000);
+		nv_mask(priv, src0, 0x00000001, 0x00000000);
+	}
+}
+
+static void
+prog_clk(struct nva3_clock_priv *priv, int clk, int idx)
+{
+	struct nva3_clock_info *info = &priv->eng[idx];
+	nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+}
+
+static int
+nva3_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+	int ret;
+
+	if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+	    (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+	    (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+	    (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)))
+		return ret;
+
+	return 0;
+}
+
+static int
+nva3_clock_prog(struct nouveau_clock *clk)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+	prog_pll(priv, 0x00, 0x004200, nv_clk_src_core);
+	prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
+	prog_clk(priv, 0x20, nv_clk_src_disp);
+	prog_clk(priv, 0x21, nv_clk_src_vdec);
+	return 0;
+}
+
+static void
+nva3_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static struct nouveau_clocks
+nva3_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_core   , 0x00, 0, "core", 1000 },
+	{ nv_clk_src_shader , 0x01, 0, "shader", 1000 },
+	{ nv_clk_src_mem    , 0x02, 0, "memory", 1000 },
+	{ nv_clk_src_vdec   , 0x03 },
+	{ nv_clk_src_disp   , 0x04 },
+	{ nv_clk_src_max }
+};
 
 static int
 nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -58,12 +302,15 @@
 	struct nva3_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.pll_calc = nva3_clock_pll_calc;
+	priv->base.read = nva3_clock_read;
+	priv->base.calc = nva3_clock_calc;
+	priv->base.prog = nva3_clock_prog;
+	priv->base.tidy = nva3_clock_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
new file mode 100644
index 0000000..6229a50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_CLK_NVA3_H__
+#define __NVKM_CLK_NVA3_H__
+
+#include <subdev/clock.h>
+
+struct nva3_clock_info {
+	u32 clk;
+	u32 pll;
+};
+
+int nva3_clock_info(struct nouveau_clock *, int, u32, u32,
+		    struct nva3_clock_info *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 439d81c2..c3105720 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -25,11 +25,408 @@
 #include <subdev/clock.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
+#include <subdev/timer.h>
 
 #include "pll.h"
 
+struct nvc0_clock_info {
+	u32 freq;
+	u32 ssel;
+	u32 mdiv;
+	u32 dsrc;
+	u32 ddiv;
+	u32 coef;
+};
+
 struct nvc0_clock_priv {
 	struct nouveau_clock base;
+	struct nvc0_clock_info eng[16];
+};
+
+static u32 read_div(struct nvc0_clock_priv *, int, u32, u32);
+
+static u32
+read_vco(struct nvc0_clock_priv *priv, u32 dsrc)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 ssrc = nv_rd32(priv, dsrc);
+	if (!(ssrc & 0x00000100))
+		return clk->read(clk, nv_clk_src_sppll0);
+	return clk->read(clk, nv_clk_src_sppll1);
+}
+
+static u32
+read_pll(struct nvc0_clock_priv *priv, u32 pll)
+{
+	struct nouveau_clock *clk = &priv->base;
+	u32 ctrl = nv_rd32(priv, pll + 0x00);
+	u32 coef = nv_rd32(priv, pll + 0x04);
+	u32 P = (coef & 0x003f0000) >> 16;
+	u32 N = (coef & 0x0000ff00) >> 8;
+	u32 M = (coef & 0x000000ff) >> 0;
+	u32 sclk;
+
+	if (!(ctrl & 0x00000001))
+		return 0;
+
+	switch (pll) {
+	case 0x00e800:
+	case 0x00e820:
+		sclk = nv_device(priv)->crystal;
+		P = 1;
+		break;
+	case 0x132000:
+		sclk = clk->read(clk, nv_clk_src_mpllsrc);
+		break;
+	case 0x132020:
+		sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+		break;
+	case 0x137000:
+	case 0x137020:
+	case 0x137040:
+	case 0x1370e0:
+		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		break;
+	default:
+		return 0;
+	}
+
+	return sclk * N / M / P;
+}
+
+static u32
+read_div(struct nvc0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+{
+	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
+	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+
+	switch (ssrc & 0x00000003) {
+	case 0:
+		if ((ssrc & 0x00030000) != 0x00030000)
+			return nv_device(priv)->crystal;
+		return 108000;
+	case 2:
+		return 100000;
+	case 3:
+		if (sctl & 0x80000000) {
+			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sdiv = (sctl & 0x0000003f) + 2;
+			return (sclk * 2) / sdiv;
+		}
+
+		return read_vco(priv, dsrc + (doff * 4));
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_clk(struct nvc0_clock_priv *priv, int clk)
+{
+	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	u32 ssel = nv_rd32(priv, 0x137100);
+	u32 sclk, sdiv;
+
+	if (ssel & (1 << clk)) {
+		if (clk < 7)
+			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+		else
+			sclk = read_pll(priv, 0x1370e0);
+		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+	} else {
+		sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+	}
+
+	if (sctl & 0x80000000)
+		return (sclk * 2) / sdiv;
+
+	return sclk;
+}
+
+static int
+nvc0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nouveau_device *device = nv_device(clk);
+	struct nvc0_clock_priv *priv = (void *)clk;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return device->crystal;
+	case nv_clk_src_href:
+		return 100000;
+	case nv_clk_src_sppll0:
+		return read_pll(priv, 0x00e800);
+	case nv_clk_src_sppll1:
+		return read_pll(priv, 0x00e820);
+
+	case nv_clk_src_mpllsrcref:
+		return read_div(priv, 0, 0x137320, 0x137330);
+	case nv_clk_src_mpllsrc:
+		return read_pll(priv, 0x132020);
+	case nv_clk_src_mpll:
+		return read_pll(priv, 0x132000);
+	case nv_clk_src_mdiv:
+		return read_div(priv, 0, 0x137300, 0x137310);
+	case nv_clk_src_mem:
+		if (nv_rd32(priv, 0x1373f0) & 0x00000002)
+			return clk->read(clk, nv_clk_src_mpll);
+		return clk->read(clk, nv_clk_src_mdiv);
+
+	case nv_clk_src_gpc:
+		return read_clk(priv, 0x00);
+	case nv_clk_src_rop:
+		return read_clk(priv, 0x01);
+	case nv_clk_src_hubk07:
+		return read_clk(priv, 0x02);
+	case nv_clk_src_hubk06:
+		return read_clk(priv, 0x07);
+	case nv_clk_src_hubk01:
+		return read_clk(priv, 0x08);
+	case nv_clk_src_copy:
+		return read_clk(priv, 0x09);
+	case nv_clk_src_daemon:
+		return read_clk(priv, 0x0c);
+	case nv_clk_src_vdec:
+		return read_clk(priv, 0x0e);
+	default:
+		nv_error(clk, "invalid clock source %d\n", src);
+		return -EINVAL;
+	}
+}
+
+static u32
+calc_div(struct nvc0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+	u32 div = min((ref * 2) / freq, (u32)65);
+	if (div < 2)
+		div = 2;
+
+	*ddiv = div - 2;
+	return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+	u32 sclk;
+
+	/* use one of the fixed frequencies if possible */
+	*ddiv = 0x00000000;
+	switch (freq) {
+	case  27000:
+	case 108000:
+		*dsrc = 0x00000000;
+		if (freq == 108000)
+			*dsrc |= 0x00030000;
+		return freq;
+	case 100000:
+		*dsrc = 0x00000002;
+		return freq;
+	default:
+		*dsrc = 0x00000003;
+		break;
+	}
+
+	/* otherwise, calculate the closest divider */
+	sclk = read_vco(priv, 0x137160 + (clk * 4));
+	if (clk < 7)
+		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	return sclk;
+}
+
+static u32
+calc_pll(struct nvc0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll limits;
+	int N, M, P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	if (ret)
+		return 0;
+
+	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	if (!limits.refclk)
+		return 0;
+
+	ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	if (ret <= 0)
+		return 0;
+
+	*coef = (P << 16) | (N << 8) | M;
+	return ret;
+}
+
+static int
+calc_clk(struct nvc0_clock_priv *priv,
+	 struct nouveau_cstate *cstate, int clk, int dom)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	u32 freq = cstate->domain[dom];
+	u32 src0, div0, div1D, div1P = 0;
+	u32 clk0, clk1 = 0;
+
+	/* invalid clock domain */
+	if (!freq)
+		return 0;
+
+	/* first possible path, using only dividers */
+	clk0 = calc_src(priv, clk, freq, &src0, &div0);
+	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+
+	/* see if we can get any closer using PLLs */
+	if (clk0 != freq && (0x00004387 & (1 << clk))) {
+		if (clk <= 7)
+			clk1 = calc_pll(priv, clk, freq, &info->coef);
+		else
+			clk1 = cstate->domain[nv_clk_src_hubk06];
+		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+	}
+
+	/* select the method which gets closest to target freq */
+	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+		info->dsrc = src0;
+		if (div0) {
+			info->ddiv |= 0x80000000;
+			info->ddiv |= div0 << 8;
+			info->ddiv |= div0;
+		}
+		if (div1D) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1D;
+		}
+		info->ssel = info->coef = 0;
+		info->freq = clk0;
+	} else {
+		if (div1P) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1P << 8;
+		}
+		info->ssel = (1 << clk);
+		info->freq = clk1;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	int ret;
+
+	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
+	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+		return ret;
+
+	return 0;
+}
+
+static void
+nvc0_clock_prog_0(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	if (clk < 7 && !info->ssel) {
+		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	}
+}
+
+static void
+nvc0_clock_prog_1(struct nvc0_clock_priv *priv, int clk)
+{
+	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
+	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+}
+
+static void
+nvc0_clock_prog_2(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	const u32 addr = 0x137000 + (clk * 0x20);
+	if (clk <= 7) {
+		nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
+		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+		if (info->coef) {
+			nv_wr32(priv, addr + 0x04, info->coef);
+			nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
+			nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
+			nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+		}
+	}
+}
+
+static void
+nvc0_clock_prog_3(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	if (info->ssel) {
+		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
+		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+	}
+}
+
+static void
+nvc0_clock_prog_4(struct nvc0_clock_priv *priv, int clk)
+{
+	struct nvc0_clock_info *info = &priv->eng[clk];
+	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static int
+nvc0_clock_prog(struct nouveau_clock *clk)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	struct {
+		void (*exec)(struct nvc0_clock_priv *, int);
+	} stage[] = {
+		{ nvc0_clock_prog_0 }, /* div programming */
+		{ nvc0_clock_prog_1 }, /* select div mode */
+		{ nvc0_clock_prog_2 }, /* (maybe) program pll */
+		{ nvc0_clock_prog_3 }, /* (maybe) select pll mode */
+		{ nvc0_clock_prog_4 }, /* final divider */
+	};
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(stage); i++) {
+		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+			if (!priv->eng[j].freq)
+				continue;
+			stage[i].exec(priv, j);
+		}
+	}
+
+	return 0;
+}
+
+static void
+nvc0_clock_tidy(struct nouveau_clock *clk)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	memset(priv->eng, 0x00, sizeof(priv->eng));
+}
+
+static struct nouveau_clocks
+nvc0_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_hubk06 , 0x00 },
+	{ nv_clk_src_hubk01 , 0x01 },
+	{ nv_clk_src_copy   , 0x02 },
+	{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
+	{ nv_clk_src_rop    , 0x04 },
+	{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
+	{ nv_clk_src_vdec   , 0x06 },
+	{ nv_clk_src_daemon , 0x0a },
+	{ nv_clk_src_hubk07 , 0x0b },
+	{ nv_clk_src_max }
 };
 
 static int
@@ -40,12 +437,15 @@
 	struct nvc0_clock_priv *priv;
 	int ret;
 
-	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.pll_calc = nva3_clock_pll_calc;
+	priv->base.read = nvc0_clock_read;
+	priv->base.calc = nvc0_clock_calc;
+	priv->base.prog = nvc0_clock_prog;
+	priv->base.tidy = nvc0_clock_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
new file mode 100644
index 0000000..4c62e84
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nve0_clock_info {
+	u32 freq;
+	u32 ssel;
+	u32 mdiv;
+	u32 dsrc;
+	u32 ddiv;
+	u32 coef;
+};
+
+struct nve0_clock_priv {
+	struct nouveau_clock base;
+	struct nve0_clock_info eng[16];
+};
+
+static u32 read_div(struct nve0_clock_priv *, int, u32, u32);
+static u32 read_pll(struct nve0_clock_priv *, u32);
+
+static u32
+read_vco(struct nve0_clock_priv *priv, u32 dsrc)
+{
+	u32 ssrc = nv_rd32(priv, dsrc);
+	if (!(ssrc & 0x00000100))
+		return read_pll(priv, 0x00e800);
+	return read_pll(priv, 0x00e820);
+}
+
+static u32
+read_pll(struct nve0_clock_priv *priv, u32 pll)
+{
+	u32 ctrl = nv_rd32(priv, pll + 0x00);
+	u32 coef = nv_rd32(priv, pll + 0x04);
+	u32 P = (coef & 0x003f0000) >> 16;
+	u32 N = (coef & 0x0000ff00) >> 8;
+	u32 M = (coef & 0x000000ff) >> 0;
+	u32 sclk;
+	u16 fN = 0xf000;
+
+	if (!(ctrl & 0x00000001))
+		return 0;
+
+	switch (pll) {
+	case 0x00e800:
+	case 0x00e820:
+		sclk = nv_device(priv)->crystal;
+		P = 1;
+		break;
+	case 0x132000:
+		sclk = read_pll(priv, 0x132020);
+		P = (coef & 0x10000000) ? 2 : 1;
+		break;
+	case 0x132020:
+		sclk = read_div(priv, 0, 0x137320, 0x137330);
+		fN   = nv_rd32(priv, pll + 0x10) >> 16;
+		break;
+	case 0x137000:
+	case 0x137020:
+	case 0x137040:
+	case 0x1370e0:
+		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		break;
+	default:
+		return 0;
+	}
+
+	if (P == 0)
+		P = 1;
+
+	sclk = (sclk * N) + (((u16)(fN + 4096) * sclk) >> 13);
+	return sclk / (M * P);
+}
+
+static u32
+read_div(struct nve0_clock_priv *priv, int doff, u32 dsrc, u32 dctl)
+{
+	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
+	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+
+	switch (ssrc & 0x00000003) {
+	case 0:
+		if ((ssrc & 0x00030000) != 0x00030000)
+			return nv_device(priv)->crystal;
+		return 108000;
+	case 2:
+		return 100000;
+	case 3:
+		if (sctl & 0x80000000) {
+			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sdiv = (sctl & 0x0000003f) + 2;
+			return (sclk * 2) / sdiv;
+		}
+
+		return read_vco(priv, dsrc + (doff * 4));
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_mem(struct nve0_clock_priv *priv)
+{
+	switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
+	case 1: return read_pll(priv, 0x132020);
+	case 2: return read_pll(priv, 0x132000);
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_clk(struct nve0_clock_priv *priv, int clk)
+{
+	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	u32 sclk, sdiv;
+
+	if (clk < 7) {
+		u32 ssel = nv_rd32(priv, 0x137100);
+		if (ssel & (1 << clk)) {
+			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+			sdiv = 1;
+		} else {
+			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sdiv = 0;
+		}
+	} else {
+		u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+		if ((ssrc & 0x00000003) == 0x00000003) {
+			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			if (ssrc & 0x00000100) {
+				if (ssrc & 0x40000000)
+					sclk = read_pll(priv, 0x1370e0);
+				sdiv = 1;
+			} else {
+				sdiv = 0;
+			}
+		} else {
+			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sdiv = 0;
+		}
+	}
+
+	if (sctl & 0x80000000) {
+		if (sdiv)
+			sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+		else
+			sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+		return (sclk * 2) / sdiv;
+	}
+
+	return sclk;
+}
+
+static int
+nve0_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+	struct nouveau_device *device = nv_device(clk);
+	struct nve0_clock_priv *priv = (void *)clk;
+
+	switch (src) {
+	case nv_clk_src_crystal:
+		return device->crystal;
+	case nv_clk_src_href:
+		return 100000;
+	case nv_clk_src_mem:
+		return read_mem(priv);
+	case nv_clk_src_gpc:
+		return read_clk(priv, 0x00);
+	case nv_clk_src_rop:
+		return read_clk(priv, 0x01);
+	case nv_clk_src_hubk07:
+		return read_clk(priv, 0x02);
+	case nv_clk_src_hubk06:
+		return read_clk(priv, 0x07);
+	case nv_clk_src_hubk01:
+		return read_clk(priv, 0x08);
+	case nv_clk_src_daemon:
+		return read_clk(priv, 0x0c);
+	case nv_clk_src_vdec:
+		return read_clk(priv, 0x0e);
+	default:
+		nv_error(clk, "invalid clock source %d\n", src);
+		return -EINVAL;
+	}
+}
+
+static u32
+calc_div(struct nve0_clock_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+	u32 div = min((ref * 2) / freq, (u32)65);
+	if (div < 2)
+		div = 2;
+
+	*ddiv = div - 2;
+	return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+	u32 sclk;
+
+	/* use one of the fixed frequencies if possible */
+	*ddiv = 0x00000000;
+	switch (freq) {
+	case  27000:
+	case 108000:
+		*dsrc = 0x00000000;
+		if (freq == 108000)
+			*dsrc |= 0x00030000;
+		return freq;
+	case 100000:
+		*dsrc = 0x00000002;
+		return freq;
+	default:
+		*dsrc = 0x00000003;
+		break;
+	}
+
+	/* otherwise, calculate the closest divider */
+	sclk = read_vco(priv, 0x137160 + (clk * 4));
+	if (clk < 7)
+		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	return sclk;
+}
+
+static u32
+calc_pll(struct nve0_clock_priv *priv, int clk, u32 freq, u32 *coef)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll limits;
+	int N, M, P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	if (ret)
+		return 0;
+
+	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	if (!limits.refclk)
+		return 0;
+
+	ret = nva3_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	if (ret <= 0)
+		return 0;
+
+	*coef = (P << 16) | (N << 8) | M;
+	return ret;
+}
+
+static int
+calc_clk(struct nve0_clock_priv *priv,
+	 struct nouveau_cstate *cstate, int clk, int dom)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	u32 freq = cstate->domain[dom];
+	u32 src0, div0, div1D, div1P = 0;
+	u32 clk0, clk1 = 0;
+
+	/* invalid clock domain */
+	if (!freq)
+		return 0;
+
+	/* first possible path, using only dividers */
+	clk0 = calc_src(priv, clk, freq, &src0, &div0);
+	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+
+	/* see if we can get any closer using PLLs */
+	if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
+		if (clk <= 7)
+			clk1 = calc_pll(priv, clk, freq, &info->coef);
+		else
+			clk1 = cstate->domain[nv_clk_src_hubk06];
+		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+	}
+
+	/* select the method which gets closest to target freq */
+	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+		info->dsrc = src0;
+		if (div0) {
+			info->ddiv |= 0x80000000;
+			info->ddiv |= div0 << 8;
+			info->ddiv |= div0;
+		}
+		if (div1D) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1D;
+		}
+		info->ssel = 0;
+		info->freq = clk0;
+	} else {
+		if (div1P) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1P << 8;
+		}
+		info->ssel = (1 << clk);
+		info->dsrc = 0x40000100;
+		info->freq = clk1;
+	}
+
+	return 0;
+}
+
+static int
+nve0_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+	struct nve0_clock_priv *priv = (void *)clk;
+	int ret;
+
+	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+		return ret;
+
+	return 0;
+}
+
+static void
+nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	if (!info->ssel) {
+		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	}
+}
+
+static void
+nve0_clock_prog_1_0(struct nve0_clock_priv *priv, int clk)
+{
+	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
+	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+}
+
+static void
+nve0_clock_prog_1_1(struct nve0_clock_priv *priv, int clk)
+{
+	nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+}
+
+static void
+nve0_clock_prog_2(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	const u32 addr = 0x137000 + (clk * 0x20);
+	nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
+	nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	if (info->coef) {
+		nv_wr32(priv, addr + 0x04, info->coef);
+		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
+		nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
+		nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+	}
+}
+
+static void
+nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static void
+nve0_clock_prog_4_0(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	if (info->ssel) {
+		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
+		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+	}
+}
+
+static void
+nve0_clock_prog_4_1(struct nve0_clock_priv *priv, int clk)
+{
+	struct nve0_clock_info *info = &priv->eng[clk];
+	if (info->ssel) {
+		nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
+		nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+	}
+}
+
+static int
+nve0_clock_prog(struct nouveau_clock *clk)
+{
+	struct nve0_clock_priv *priv = (void *)clk;
+	struct {
+		u32 mask;
+		void (*exec)(struct nve0_clock_priv *, int);
+	} stage[] = {
+		{ 0x007f, nve0_clock_prog_0   }, /* div programming */
+		{ 0x007f, nve0_clock_prog_1_0 }, /* select div mode */
+		{ 0xff80, nve0_clock_prog_1_1 },
+		{ 0x00ff, nve0_clock_prog_2   }, /* (maybe) program pll */
+		{ 0xff80, nve0_clock_prog_3   }, /* final divider */
+		{ 0x007f, nve0_clock_prog_4_0 }, /* (maybe) select pll mode */
+		{ 0xff80, nve0_clock_prog_4_1 },
+	};
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(stage); i++) {
+		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+			if (!(stage[i].mask & (1 << j)))
+				continue;
+			if (!priv->eng[j].freq)
+				continue;
+			stage[i].exec(priv, j);
+		}
+	}
+
+	return 0;
+}
+
+static void
+nve0_clock_tidy(struct nouveau_clock *clk)
+{
+	struct nve0_clock_priv *priv = (void *)clk;
+	memset(priv->eng, 0x00, sizeof(priv->eng));
+}
+
+static struct nouveau_clocks
+nve0_domain[] = {
+	{ nv_clk_src_crystal, 0xff },
+	{ nv_clk_src_href   , 0xff },
+	{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+	{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+	{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+	{ nv_clk_src_mem    , 0x03, 0, "memory", 1000 },
+	{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+	{ nv_clk_src_hubk01 , 0x05 },
+	{ nv_clk_src_vdec   , 0x06 },
+	{ nv_clk_src_daemon , 0x07 },
+	{ nv_clk_src_max }
+};
+
+static int
+nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nve0_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.read = nve0_clock_read;
+	priv->base.calc = nve0_clock_calc;
+	priv->base.prog = nve0_clock_prog;
+	priv->base.tidy = nve0_clock_tidy;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
index cf1ed0d..b47d543 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -38,7 +38,7 @@
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	int cv = nouveau_bios(subdev)->version.chip;
+	struct nouveau_bios *bios = nouveau_bios(subdev);
 	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
 	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
 	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -54,18 +54,21 @@
 
 	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
 	/* possibly correlated with introduction of 27MHz crystal */
-	if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
-		if (clk > 250000)
-			maxM = 6;
-		if (clk > 340000)
-			maxM = 2;
-	} else if (cv < 0x40) {
-		if (clk > 150000)
-			maxM = 6;
-		if (clk > 200000)
-			maxM = 4;
-		if (clk > 340000)
-			maxM = 2;
+	if (bios->version.major < 0x60) {
+		int cv = bios->version.chip;
+		if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+			if (clk > 250000)
+				maxM = 6;
+			if (clk > 340000)
+				maxM = 2;
+		} else if (cv < 0x40) {
+			if (clk > 150000)
+				maxM = 6;
+			if (clk > 200000)
+				maxM = 4;
+			if (clk > 340000)
+				maxM = 2;
+		}
 	}
 
 	P = 1 << maxP;
@@ -227,10 +230,12 @@
 {
 	int ret;
 
-	if (!info->vco2.max_freq) {
+	if (!info->vco2.max_freq || !N2) {
 		ret = getMNP_single(subdev, info, freq, N1, M1, P);
-		*N2 = 1;
-		*M2 = 1;
+		if (N2) {
+			*N2 = 1;
+			*M2 = 1;
+		}
 	} else {
 		ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P);
 	}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 2fe1f71..8eca457 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -45,6 +45,7 @@
 	lM = max(lM, (int)info->vco1.min_m);
 	hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
 	hM = min(hM, (int)info->vco1.max_m);
+	lM = min(lM, hM);
 
 	for (M = lM; M <= hM; M++) {
 		u32 tmp = freq * *P * M;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
new file mode 100644
index 0000000..fb33f06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/seq.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_CLK_SEQ_H__
+#define __NVKM_CLK_SEQ_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+
+#define clk_init(s,p)       hwsq_init(&(s)->base, (p))
+#define clk_exec(s,e)       hwsq_exec(&(s)->base, (e))
+#define clk_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define clk_rd32(s,r)       hwsq_rd32(&(s)->base, &(s)->r_##r)
+#define clk_wr32(s,r,d)     hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
+#define clk_mask(s,r,m,d)   hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define clk_setf(s,f,d)     hwsq_setf(&(s)->base, (f), (d))
+#define clk_wait(s,f,d)     hwsq_wait(&(s)->base, (f), (d))
+#define clk_nsec(s,n)       hwsq_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index b22357d..27c8235 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -168,7 +168,8 @@
 		/* downclock -- write new NM first */
 		nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
 
-	if (chip_version < 0x17 && chip_version != 0x11)
+	if ((chip_version < 0x17 || chip_version == 0x1a) &&
+	    chip_version != 0x11)
 		/* wait a bit on older chips */
 		msleep(64);
 	nv_rd32(devinit, reg);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 463b08f..8d274db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -38,12 +38,18 @@
 nv10_devinit_meminit(struct nouveau_devinit *devinit)
 {
 	struct nv10_devinit_priv *priv = (void *)devinit;
-	const int mem_width[] = { 0x10, 0x00, 0x20 };
-	const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
+	static const int mem_width[] = { 0x10, 0x00, 0x20 };
+	int mem_width_count;
 	uint32_t patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i, j, k;
 
+	if (nv_device(priv)->card_type >= NV_11 &&
+	    nv_device(priv)->chipset >= 0x17)
+		mem_width_count = 3;
+	else
+		mem_width_count = 2;
+
 	/* Map the framebuffer aperture */
 	fb = fbmem_init(nv_device(priv)->pdev);
 	if (!fb) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index 821cd75..f009d8a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -22,9 +22,10 @@
  * Authors: Ben Skeggs
  */
 
-#include "subdev/fb.h"
-#include "subdev/bios.h"
-#include "subdev/bios/bit.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+
+#include "priv.h"
 
 int
 nouveau_fb_bios_memtype(struct nouveau_bios *bios)
@@ -106,9 +107,9 @@
 
 int
 nouveau_fb_create_(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass, struct nouveau_oclass *ramcls,
-		   int length, void **pobject)
+		   struct nouveau_oclass *oclass, int length, void **pobject)
 {
+	struct nouveau_fb_impl *impl = (void *)oclass;
 	static const char *name[] = {
 		[NV_MEM_TYPE_UNKNOWN] = "unknown",
 		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
@@ -132,8 +133,10 @@
 	if (ret)
 		return ret;
 
+	pfb->memtype_valid = impl->memtype;
+
 	ret = nouveau_object_ctor(nv_object(pfb), nv_object(pfb),
-				  ramcls, NULL, 0, &ram);
+				  impl->ram, NULL, 0, &ram);
 	if (ret) {
 		nv_fatal(pfb, "error detecting memory configuration!!\n");
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
new file mode 100644
index 0000000..34f9605
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr5.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+int
+nouveau_gddr5_calc(struct nouveau_ram *ram)
+{
+	struct nouveau_bios *bios = nouveau_bios(ram);
+	int pd, lf, xd, vh, vr, vo;
+	int WL, CL, WR, at, dt, ds;
+	int rq = ram->freq < 1000000; /* XXX */
+
+	switch (!!ram->ramcfg.data * ram->ramcfg.version) {
+	case 0x11:
+		pd =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x80) >> 7;
+		lf =  (nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x40) >> 6;
+		xd = !(nv_ro08(bios, ram->ramcfg.data + 0x01) & 0x20);
+		vh =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x10) >> 4;
+		vr =  (nv_ro08(bios, ram->ramcfg.data + 0x02) & 0x04) >> 2;
+		vo =   nv_ro08(bios, ram->ramcfg.data + 0x06) & 0xff;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	switch (!!ram->timing.data * ram->timing.version) {
+	case 0x20:
+		WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
+		CL =  nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
+		WR =  nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
+		at = (nv_ro08(bios, ram->timing.data + 0x2e) & 0xc0) >> 6;
+		dt =  nv_ro08(bios, ram->timing.data + 0x2e) & 0x03;
+		ds =  nv_ro08(bios, ram->timing.data + 0x2f) & 0x03;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	if (WL < 1 || WL > 7 || CL < 5 || CL > 36 || WR < 4 || WR > 35)
+		return -EINVAL;
+	CL -= 5;
+	WR -= 4;
+
+	ram->mr[0] &= ~0xf7f;
+	ram->mr[0] |= (WR & 0x0f) << 8;
+	ram->mr[0] |= (CL & 0x0f) << 3;
+	ram->mr[0] |= (WL & 0x07) << 0;
+
+	ram->mr[1] &= ~0x0bf;
+	ram->mr[1] |= (xd & 0x01) << 7;
+	ram->mr[1] |= (at & 0x03) << 4;
+	ram->mr[1] |= (dt & 0x03) << 2;
+	ram->mr[1] |= (ds & 0x03) << 0;
+
+	ram->mr[3] &= ~0x020;
+	ram->mr[3] |= (rq & 0x01) << 5;
+
+	if (!vo)
+		vo = (ram->mr[6] & 0xff0) >> 4;
+	if (ram->mr[6] & 0x001)
+		pd = 1; /* binary driver does this.. bug? */
+	ram->mr[6] &= ~0xff1;
+	ram->mr[6] |= (vo & 0xff) << 4;
+	ram->mr[6] |= (pd & 0x01) << 0;
+
+	if (!(ram->mr[7] & 0x100))
+		vr = 0; /* binary driver does this.. bug? */
+	ram->mr[7] &= ~0x188;
+	ram->mr[7] |= (vr & 0x01) << 8;
+	ram->mr[7] |= (vh & 0x01) << 7;
+	ram->mr[7] |= (lf & 0x01) << 3;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index 1f103c7..8309fe3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -22,14 +22,10 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv04.h"
 
 #define NV04_PFB_CFG0						0x00100200
 
-struct nv04_fb_priv {
-	struct nouveau_fb base;
-};
-
 bool
 nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 {
@@ -57,30 +53,37 @@
 	return 0;
 }
 
-static int
+int
 nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
 {
+	struct nv04_fb_impl *impl = (void *)oclass;
 	struct nv04_fb_priv *priv;
 	int ret;
 
-	ret = nouveau_fb_create(parent, engine, oclass, &nv04_ram_oclass, &priv);
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.tile.regions = impl->tile.regions;
+	priv->base.tile.init = impl->tile.init;
+	priv->base.tile.comp = impl->tile.comp;
+	priv->base.tile.fini = impl->tile.fini;
+	priv->base.tile.prog = impl->tile.prog;
 	return 0;
 }
 
-struct nouveau_oclass
-nv04_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv04_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x04),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv04_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv04_ram_oclass,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
new file mode 100644
index 0000000..06ce71f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.h
@@ -0,0 +1,55 @@
+#ifndef __NVKM_FB_NV04_H__
+#define __NVKM_FB_NV04_H__
+
+#include "priv.h"
+
+struct nv04_fb_priv {
+	struct nouveau_fb base;
+};
+
+int  nv04_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+
+struct nv04_fb_impl {
+	struct nouveau_fb_impl base;
+	struct {
+		int regions;
+		void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nouveau_fb_tile *);
+		void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+			     struct nouveau_fb_tile *);
+		void (*fini)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+		void (*prog)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+	} tile;
+};
+
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv30_fb_init(struct nouveau_object *);
+void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+		       struct nouveau_fb_tile *);
+
+int  nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index be069b5..ffb7ec6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv10_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -57,34 +53,19 @@
 	nv_rd32(pfb, 0x100240 + (i * 0x10));
 }
 
-static int
-nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv10_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv10_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv10_fb_tile_init;
-	priv->base.tile.fini = nv10_fb_tile_fini;
-	priv->base.tile.prog = nv10_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv10_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x10),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv10_fb_ctor,
+struct nouveau_oclass *
+nv10_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x10),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv10_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv10_fb_tile_init,
+	.tile.fini = nv10_fb_tile_fini,
+	.tile.prog = nv10_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 57a2af0..9159a5c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -24,40 +24,21 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv1a_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv1a_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv1a_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv10_fb_tile_init;
-	priv->base.tile.fini = nv10_fb_tile_fini;
-	priv->base.tile.prog = nv10_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv1a_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x1a),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv1a_fb_ctor,
+struct nouveau_oclass *
+nv1a_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x1a),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv10_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv10_fb_tile_init,
+	.tile.fini = nv10_fb_tile_fini,
+	.tile.prog = nv10_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index b18c4e6..f003c1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv20_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -80,35 +76,20 @@
 	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
-static int
-nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv20_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv20_fb_tile_init;
-	priv->base.tile.comp = nv20_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv20_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x20),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv20_fb_ctor,
+struct nouveau_oclass *
+nv20_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x20),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv20_fb_tile_init,
+	.tile.comp = nv20_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
index 32ccabf..f34f422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv25_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -46,35 +42,20 @@
 	}
 }
 
-static int
-nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv25_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv20_fb_tile_init;
-	priv->base.tile.comp = nv25_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv25_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x25),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv25_fb_ctor,
+struct nouveau_oclass *
+nv25_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x25),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = _nouveau_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv20_fb_tile_init,
+	.tile.comp = nv25_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index bef756d..69093f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv30_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -67,7 +63,7 @@
 }
 
 static int
-calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
 {
 	struct nouveau_device *device = nv_device(priv);
 	int b = (device->chipset > 0x30 ?
@@ -78,7 +74,7 @@
 }
 
 static int
-calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
 {
 	int j, x = 0;
 
@@ -95,7 +91,7 @@
 nv30_fb_init(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
-	struct nv30_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret, i, j;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -124,35 +120,20 @@
 	return 0;
 }
 
-static int
-nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv30_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv30_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv30_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x30),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv30_fb_ctor,
+struct nouveau_oclass *
+nv30_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x30),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv30_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv30_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
index 097d8e3..161b06e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv35_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@
 	}
 }
 
-static int
-nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv35_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv35_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv35_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x35),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv35_fb_ctor,
+struct nouveau_oclass *
+nv35_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x35),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv30_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv35_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
index 9d6d9df..2dd3d0a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv36_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -47,35 +43,20 @@
 	}
 }
 
-static int
-nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv36_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv20_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv36_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv36_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x36),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv36_fb_ctor,
+struct nouveau_oclass *
+nv36_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x36),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv30_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv20_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv36_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 33b4393..95a115a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv40_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
@@ -50,7 +46,7 @@
 static int
 nv40_fb_init(struct nouveau_object *object)
 {
-	struct nv40_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -61,36 +57,20 @@
 	return 0;
 }
 
-static int
-nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv40_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv40_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 8;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv20_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv40_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x40),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv40_fb_ctor,
+struct nouveau_oclass *
+nv40_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x40),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv40_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv40_ram_oclass,
+	.tile.regions = 8,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv20_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
new file mode 100644
index 0000000..581f808
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_FB_NV40_H__
+#define __NVKM_FB_NV40_H__
+
+#include "priv.h"
+
+struct nv40_ram {
+	struct nouveau_ram base;
+	u32 ctrl;
+	u32 coef;
+};
+
+
+int  nv40_ram_calc(struct nouveau_fb *, u32);
+int  nv40_ram_prog(struct nouveau_fb *);
+void nv40_ram_tidy(struct nouveau_fb *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
index 02cd837..b239a86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv41_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
@@ -43,7 +39,7 @@
 int
 nv41_fb_init(struct nouveau_object *object)
 {
-	struct nv41_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -54,36 +50,20 @@
 	return 0;
 }
 
-static int
-nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv41_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 12;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv41_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv41_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x41),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv41_fb_ctor,
+struct nouveau_oclass *
+nv41_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x41),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv41_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv41_ram_oclass,
+	.tile.regions = 12,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
index c5246c2..d847820 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv44_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 static void
 nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -52,7 +48,7 @@
 int
 nv44_fb_init(struct nouveau_object *object)
 {
-	struct nv44_fb_priv *priv = (void *)object;
+	struct nv04_fb_priv *priv = (void *)object;
 	int ret;
 
 	ret = nouveau_fb_init(&priv->base);
@@ -64,35 +60,19 @@
 	return 0;
 }
 
-static int
-nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv44_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 12;
-	priv->base.tile.init = nv44_fb_tile_init;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv44_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv44_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x44),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv44_fb_ctor,
+struct nouveau_oclass *
+nv44_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x44),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv44_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv44_ram_oclass,
+	.tile.regions = 12,
+	.tile.init = nv44_fb_tile_init,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
index e2b5790..a5b7751 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -24,11 +24,7 @@
  *
  */
 
-#include "priv.h"
-
-struct nv46_fb_priv {
-	struct nouveau_fb base;
-};
+#include "nv04.h"
 
 void
 nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
@@ -44,35 +40,19 @@
 	tile->pitch = pitch;
 }
 
-static int
-nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv46_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv44_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 15;
-	priv->base.tile.init = nv46_fb_tile_init;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv44_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv46_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x46),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv46_fb_ctor,
+struct nouveau_oclass *
+nv46_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x46),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv44_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv44_ram_oclass,
+	.tile.regions = 15,
+	.tile.init = nv46_fb_tile_init,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
index fe6a227..3bea142 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -24,42 +24,22 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv47_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv47_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv41_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 15;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv41_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv47_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x47),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv47_fb_ctor,
+struct nouveau_oclass *
+nv47_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x47),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv41_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv41_ram_oclass,
+	.tile.regions = 15,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
index 5eca99b..666cbd5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -24,42 +24,22 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv49_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv49_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv49_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 15;
-	priv->base.tile.init = nv30_fb_tile_init;
-	priv->base.tile.comp = nv40_fb_tile_comp;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv41_fb_tile_prog;
-	return 0;
-}
-
-
-struct nouveau_oclass
-nv49_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x49),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv49_fb_ctor,
+struct nouveau_oclass *
+nv49_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x49),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv41_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv49_ram_oclass,
+	.tile.regions = 15,
+	.tile.init = nv30_fb_tile_init,
+	.tile.comp = nv40_fb_tile_comp,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv41_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
index 1190b78..42e64f3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -24,40 +24,21 @@
  *
  */
 
-#include "priv.h"
+#include "nv04.h"
 
-struct nv4e_fb_priv {
-	struct nouveau_fb base;
-};
-
-static int
-nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv4e_fb_priv *priv;
-	int ret;
-
-	ret = nouveau_fb_create(parent, engine, oclass, &nv4e_ram_oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.memtype_valid = nv04_fb_memtype_valid;
-	priv->base.tile.regions = 12;
-	priv->base.tile.init = nv46_fb_tile_init;
-	priv->base.tile.fini = nv20_fb_tile_fini;
-	priv->base.tile.prog = nv44_fb_tile_prog;
-	return 0;
-}
-
-struct nouveau_oclass
-nv4e_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x4e),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv4e_fb_ctor,
+struct nouveau_oclass *
+nv4e_fb_oclass = &(struct nv04_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x4e),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
 		.dtor = _nouveau_fb_dtor,
 		.init = nv44_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv04_fb_memtype_valid,
+	.base.ram = &nv4e_ram_oclass,
+	.tile.regions = 12,
+	.tile.init = nv46_fb_tile_init,
+	.tile.fini = nv20_fb_tile_fini,
+	.tile.prog = nv44_fb_tile_prog,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index da614ec..cbc7f00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -27,14 +27,9 @@
 #include <core/engctx.h>
 #include <core/object.h>
 
-#include "priv.h"
 #include <subdev/bios.h>
 
-struct nv50_fb_priv {
-	struct nouveau_fb base;
-	struct page *r100c08_page;
-	dma_addr_t r100c08;
-};
+#include "nv50.h"
 
 int
 nv50_fb_memtype[0x80] = {
@@ -48,7 +43,7 @@
 	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
 };
 
-static bool
+bool
 nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
 {
 	return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
@@ -239,7 +234,7 @@
 		pr_cont("0x%08x\n", st1);
 }
 
-static int
+int
 nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
@@ -248,7 +243,7 @@
 	struct nv50_fb_priv *priv;
 	int ret;
 
-	ret = nouveau_fb_create(parent, engine, oclass, &nv50_ram_oclass, &priv);
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
@@ -264,12 +259,11 @@
 		nv_warn(priv, "failed 0x100c08 page alloc\n");
 	}
 
-	priv->base.memtype_valid = nv50_fb_memtype_valid;
 	nv_subdev(priv)->intr = nv50_fb_intr;
 	return 0;
 }
 
-static void
+void
 nv50_fb_dtor(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
@@ -284,10 +278,10 @@
 	nouveau_fb_destroy(&priv->base);
 }
 
-static int
+int
 nv50_fb_init(struct nouveau_object *object)
 {
-	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_impl *impl = (void *)object->oclass;
 	struct nv50_fb_priv *priv = (void *)object;
 	int ret;
 
@@ -303,33 +297,20 @@
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
-	switch (device->chipset) {
-	case 0x50:
-		nv_wr32(priv, 0x100c90, 0x000707ff);
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_wr32(priv, 0x100c90, 0x000d0fff);
-		break;
-	case 0xaf:
-		nv_wr32(priv, 0x100c90, 0x089d1fff);
-		break;
-	default:
-		nv_wr32(priv, 0x100c90, 0x001d07ff);
-		break;
-	}
-
+	nv_wr32(priv, 0x100c90, impl->trap);
 	return 0;
 }
 
-struct nouveau_oclass
-nv50_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nv50_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x50),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv50_fb_ctor,
 		.dtor = nv50_fb_dtor,
 		.init = nv50_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nv50_ram_oclass,
+	.trap = 0x000707ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
new file mode 100644
index 0000000..c5e5a88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FB_NV50_H__
+#define __NVKM_FB_NV50_H__
+
+#include "priv.h"
+
+struct nv50_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+int  nv50_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+void nv50_fb_dtor(struct nouveau_object *);
+int  nv50_fb_init(struct nouveau_object *);
+
+struct nv50_fb_impl {
+	struct nouveau_fb_impl base;
+	u32 trap;
+};
+
+#define nv50_ram_create(p,e,o,d)                                               \
+	nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int  nv50_ram_create_(struct nouveau_object *, struct nouveau_object *,
+		      struct nouveau_oclass *, int, void **);
+int  nv50_ram_get(struct nouveau_fb *, u64 size, u32 align, u32 ncmin,
+		  u32 memtype, struct nouveau_mem **);
+void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
+extern int nv50_fb_memtype[0x80];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
new file mode 100644
index 0000000..cf0e767
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv84.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nv84_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0x84),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nv50_ram_oclass,
+	.trap = 0x001d07ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
new file mode 100644
index 0000000..dab6e1c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nva3.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nva3_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0xa3),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nva3_ram_oclass,
+	.trap = 0x000d0fff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
new file mode 100644
index 0000000..cba8e68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaa.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nvaa_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0xaa),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nvaa_ram_oclass,
+	.trap = 0x001d07ff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
new file mode 100644
index 0000000..5423faa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvaf.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+nvaf_fb_oclass = &(struct nv50_fb_impl) {
+	.base.base.handle = NV_SUBDEV(FB, 0xaf),
+	.base.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.base.memtype = nv50_fb_memtype_valid,
+	.base.ram = &nvaa_ram_oclass,
+	.trap = 0x089d1fff,
+}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index f35d76f..e5fc37c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -22,24 +22,18 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
-
-struct nvc0_fb_priv {
-	struct nouveau_fb base;
-	struct page *r100c10_page;
-	dma_addr_t r100c10;
-};
+#include "nvc0.h"
 
 extern const u8 nvc0_pte_storage_type_map[256];
 
-static bool
+bool
 nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 {
 	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
 	return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
 }
 
-static int
+int
 nvc0_fb_init(struct nouveau_object *object)
 {
 	struct nvc0_fb_priv *priv = (void *)object;
@@ -54,7 +48,7 @@
 	return 0;
 }
 
-static void
+void
 nvc0_fb_dtor(struct nouveau_object *object)
 {
 	struct nouveau_device *device = nv_device(object);
@@ -69,7 +63,7 @@
 	nouveau_fb_destroy(&priv->base);
 }
 
-static int
+int
 nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 	     struct nouveau_oclass *oclass, void *data, u32 size,
 	     struct nouveau_object **pobject)
@@ -78,13 +72,11 @@
 	struct nvc0_fb_priv *priv;
 	int ret;
 
-	ret = nouveau_fb_create(parent, engine, oclass, &nvc0_ram_oclass, &priv);
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
 	*pobject = nv_object(priv);
 	if (ret)
 		return ret;
 
-	priv->base.memtype_valid = nvc0_fb_memtype_valid;
-
 	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 	if (priv->r100c10_page) {
 		priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
@@ -97,14 +89,15 @@
 	return 0;
 }
 
-
-struct nouveau_oclass
-nvc0_fb_oclass = {
-	.handle = NV_SUBDEV(FB, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
+struct nouveau_oclass *
+nvc0_fb_oclass = &(struct nouveau_fb_impl) {
+	.base.handle = NV_SUBDEV(FB, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nvc0_fb_ctor,
 		.dtor = nvc0_fb_dtor,
 		.init = nvc0_fb_init,
 		.fini = _nouveau_fb_fini,
 	},
-};
+	.memtype = nvc0_fb_memtype_valid,
+	.ram = &nvc0_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
new file mode 100644
index 0000000..9e1931e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.h
@@ -0,0 +1,29 @@
+#ifndef __NVKM_RAM_NVC0_H__
+#define __NVKM_RAM_NVC0_H__
+
+#include "priv.h"
+#include "nv50.h"
+
+struct nvc0_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c10_page;
+	dma_addr_t r100c10;
+};
+
+int  nvc0_fb_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+void nvc0_fb_dtor(struct nouveau_object *);
+int  nvc0_fb_init(struct nouveau_object *);
+bool nvc0_fb_memtype_valid(struct nouveau_fb *, u32);
+
+
+#define nvc0_ram_create(p,e,o,d)                                               \
+	nvc0_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
+int  nvc0_ram_create_(struct nouveau_object *, struct nouveau_object *,
+		      struct nouveau_oclass *, int, void **);
+int  nvc0_ram_get(struct nouveau_fb *, u64, u32, u32, u32,
+		  struct nouveau_mem **);
+void nvc0_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
new file mode 100644
index 0000000..595db50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nve0.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+struct nouveau_oclass *
+nve0_fb_oclass = &(struct nouveau_fb_impl) {
+	.base.handle = NV_SUBDEV(FB, 0xe0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fb_ctor,
+		.dtor = nvc0_fb_dtor,
+		.init = nvc0_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+	.memtype = nvc0_fb_memtype_valid,
+	.ram = &nve0_ram_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index db9d6dd..4931252 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -12,6 +12,8 @@
 #define nouveau_ram_fini(p,s)                                                  \
 	nouveau_object_fini(&(p)->base, (s))
 
+#define nouveau_ram_create_(p,e,o,s,d)                                         \
+	nouveau_object_create_((p), (e), (o), 0, (s), (void **)d)
 #define _nouveau_ram_dtor nouveau_object_destroy
 #define _nouveau_ram_init nouveau_object_init
 #define _nouveau_ram_fini nouveau_object_fini
@@ -26,10 +28,16 @@
 extern struct nouveau_oclass nv49_ram_oclass;
 extern struct nouveau_oclass nv4e_ram_oclass;
 extern struct nouveau_oclass nv50_ram_oclass;
+extern struct nouveau_oclass nva3_ram_oclass;
+extern struct nouveau_oclass nvaa_ram_oclass;
 extern struct nouveau_oclass nvc0_ram_oclass;
+extern struct nouveau_oclass nve0_ram_oclass;
 
-#define nouveau_fb_create(p,e,c,r,d)                                           \
-	nouveau_fb_create_((p), (e), (c), (r), sizeof(**d), (void **)d)
+int nouveau_sddr3_calc(struct nouveau_ram *ram);
+int nouveau_gddr5_calc(struct nouveau_ram *ram);
+
+#define nouveau_fb_create(p,e,c,d)                                             \
+	nouveau_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
 #define nouveau_fb_destroy(p) ({                                               \
 	struct nouveau_fb *pfb = (p);                                          \
 	_nouveau_fb_dtor(nv_object(pfb));                                      \
@@ -44,44 +52,21 @@
 })
 
 int nouveau_fb_create_(struct nouveau_object *, struct nouveau_object *,
-		       struct nouveau_oclass *, struct nouveau_oclass *,
-		       int length, void **pobject);
+		       struct nouveau_oclass *, int, void **);
 void _nouveau_fb_dtor(struct nouveau_object *);
 int  _nouveau_fb_init(struct nouveau_object *);
 int  _nouveau_fb_fini(struct nouveau_object *, bool);
 
-struct nouveau_bios;
-int  nouveau_fb_bios_memtype(struct nouveau_bios *);
+struct nouveau_fb_impl {
+	struct nouveau_oclass base;
+	struct nouveau_oclass *ram;
+	bool (*memtype)(struct nouveau_fb *, u32);
+};
 
 bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+bool nv50_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
 
-void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
-void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int  nv30_fb_init(struct nouveau_object *);
-void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
-		       struct nouveau_fb_tile *);
-
-int  nv41_fb_init(struct nouveau_object *);
-void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-int  nv44_fb_init(struct nouveau_object *);
-void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
-
-void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-
-void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
-extern int nv50_fb_memtype[0x80];
+struct nouveau_bios;
+int  nouveau_fb_bios_memtype(struct nouveau_bios *);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
new file mode 100644
index 0000000..0f57fcf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -0,0 +1,118 @@
+#ifndef __NVKM_FBRAM_FUC_H__
+#define __NVKM_FBRAM_FUC_H__
+
+#include <subdev/pwr.h>
+
+struct ramfuc {
+	struct nouveau_memx *memx;
+	struct nouveau_fb *pfb;
+	int sequence;
+};
+
+struct ramfuc_reg {
+	int sequence;
+	bool force;
+	u32 addr[2];
+	u32 data;
+};
+
+static inline struct ramfuc_reg
+ramfuc_reg2(u32 addr1, u32 addr2)
+{
+	return (struct ramfuc_reg) {
+		.sequence = 0,
+		.addr = { addr1, addr2 },
+		.data = 0xdeadbeef,
+	};
+}
+
+static inline struct ramfuc_reg
+ramfuc_reg(u32 addr)
+{
+	return ramfuc_reg2(addr, addr);
+}
+
+static inline int
+ramfuc_init(struct ramfuc *ram, struct nouveau_fb *pfb)
+{
+	struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
+	int ret;
+
+	ret = nouveau_memx_init(ppwr, &ram->memx);
+	if (ret)
+		return ret;
+
+	ram->sequence++;
+	ram->pfb = pfb;
+	return 0;
+}
+
+static inline int
+ramfuc_exec(struct ramfuc *ram, bool exec)
+{
+	int ret = 0;
+	if (ram->pfb) {
+		ret = nouveau_memx_fini(&ram->memx, exec);
+		ram->pfb = NULL;
+	}
+	return ret;
+}
+
+static inline u32
+ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
+{
+	if (reg->sequence != ram->sequence)
+		reg->data = nv_rd32(ram->pfb, reg->addr[0]);
+	return reg->data;
+}
+
+static inline void
+ramfuc_wr32(struct ramfuc *ram, struct ramfuc_reg *reg, u32 data)
+{
+	reg->sequence = ram->sequence;
+	reg->data = data;
+	if (reg->addr[0] != reg->addr[1])
+		nouveau_memx_wr32(ram->memx, reg->addr[1], reg->data);
+	nouveau_memx_wr32(ram->memx, reg->addr[0], reg->data);
+}
+
+static inline void
+ramfuc_nuke(struct ramfuc *ram, struct ramfuc_reg *reg)
+{
+	reg->force = true;
+}
+
+static inline u32
+ramfuc_mask(struct ramfuc *ram, struct ramfuc_reg *reg, u32 mask, u32 data)
+{
+	u32 temp = ramfuc_rd32(ram, reg);
+	if (temp != ((temp & ~mask) | data) || reg->force) {
+		ramfuc_wr32(ram, reg, (temp & ~mask) | data);
+		reg->force = false;
+	}
+	return temp;
+}
+
+static inline void
+ramfuc_wait(struct ramfuc *ram, u32 addr, u32 mask, u32 data, u32 nsec)
+{
+	nouveau_memx_wait(ram->memx, addr, mask, data, nsec);
+}
+
+static inline void
+ramfuc_nsec(struct ramfuc *ram, u32 nsec)
+{
+	nouveau_memx_nsec(ram->memx, nsec);
+}
+
+#define ram_init(s,p)       ramfuc_init(&(s)->base, (p))
+#define ram_exec(s,e)       ramfuc_exec(&(s)->base, (e))
+#define ram_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define ram_rd32(s,r)       ramfuc_rd32(&(s)->base, &(s)->r_##r)
+#define ram_wr32(s,r,d)     ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
+#define ram_nuke(s,r)       ramfuc_nuke(&(s)->base, &(s)->r_##r)
+#define ram_mask(s,r,m,d)   ramfuc_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
+#define ram_nsec(s,n)       ramfuc_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
index ee49ac4..7648beb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv40.c
@@ -22,7 +22,154 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
+#include "nv40.h"
+
+int
+nv40_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nvbios_pll pll;
+	int N1, M1, N2, M2;
+	int log2P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x04, &pll);
+	if (ret) {
+		nv_error(pfb, "mclk pll data not found\n");
+		return ret;
+	}
+
+	ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
+			    &N1, &M1, &N2, &M2, &log2P);
+	if (ret < 0)
+		return ret;
+
+	ram->ctrl  = 0x80000000 | (log2P << 16);
+	ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20;
+	if (N2 == M2) {
+		ram->ctrl |= 0x00000100;
+		ram->coef  = (N1 << 8) | M1;
+	} else {
+		ram->ctrl |= 0x40000000;
+		ram->coef  = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+	}
+
+	return 0;
+}
+
+int
+nv40_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nv40_ram *ram = (void *)pfb->ram;
+	struct bit_entry M;
+	u32 crtc_mask = 0;
+	u8  sr1[2];
+	int i;
+
+	/* determine which CRTCs are active, fetch VGA_SR1 for each */
+	for (i = 0; i < 2; i++) {
+		u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+		u32 cnt = 0;
+		do {
+			if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
+				nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+				if (!(sr1[i] & 0x20))
+					crtc_mask |= (1 << i);
+				break;
+			}
+			udelay(1);
+		} while (cnt++ < 32);
+	}
+
+	/* wait for vblank start on active crtcs, disable memory access */
+	for (i = 0; i < 2; i++) {
+		if (!(crtc_mask & (1 << i)))
+			continue;
+		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
+		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+	}
+
+	/* prepare ram for reclocking */
+	nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
+	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
+	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
+	nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+	/* change the PLL of each memory partition */
+	nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
+	switch (nv_device(pfb)->chipset) {
+	case 0x40:
+	case 0x45:
+	case 0x41:
+	case 0x42:
+	case 0x47:
+		nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
+		nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
+		nv_wr32(pfb, 0x004048, ram->coef);
+		nv_wr32(pfb, 0x004030, ram->coef);
+	case 0x43:
+	case 0x49:
+	case 0x4b:
+		nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
+		nv_wr32(pfb, 0x00403c, ram->coef);
+	default:
+		nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
+		nv_wr32(pfb, 0x004024, ram->coef);
+		break;
+	}
+	udelay(100);
+	nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+
+	/* re-enable normal operation of memory controller */
+	nv_wr32(pfb, 0x1002dc, 0x00000000);
+	nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+	udelay(100);
+
+	/* execute memory reset script from vbios */
+	if (!bit_entry(bios, 'M', &M)) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(pfb),
+			.bios = bios,
+			.offset = nv_ro16(bios, M.offset + 0x00),
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+	}
+
+	/* make sure we're in vblank (hopefully the same one as before), and
+	 * then re-enable crtc memory access
+	 */
+	for (i = 0; i < 2; i++) {
+		if (!(crtc_mask & (1 << i)))
+			continue;
+		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+	}
+
+	return 0;
+}
+
+void
+nv40_ram_tidy(struct nouveau_fb *pfb)
+{
+}
 
 static int
 nv40_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +177,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pbus1218 = nv_rd32(pfb, 0x001218);
 	int ret;
 
@@ -40,15 +187,18 @@
 		return ret;
 
 	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
 	}
 
-	ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	ram->base.tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
index 1dab7e1..d64498a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv41.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv40.h"
 
 static int
 nv41_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pfb474 = nv_rd32(pfb, 0x100474);
 	int ret;
 
@@ -40,15 +40,18 @@
 		return ret;
 
 	if (pfb474 & 0x00000004)
-		ram->type = NV_MEM_TYPE_GDDR3;
+		ram->base.type = NV_MEM_TYPE_GDDR3;
 	if (pfb474 & 0x00000002)
-		ram->type = NV_MEM_TYPE_DDR2;
+		ram->base.type = NV_MEM_TYPE_DDR2;
 	if (pfb474 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		ram->base.type = NV_MEM_TYPE_DDR1;
 
-	ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	ram->base.tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
index 25fff84..089acac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv44.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv40.h"
 
 static int
 nv44_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pfb474 = nv_rd32(pfb, 0x100474);
 	int ret;
 
@@ -40,13 +40,16 @@
 		return ret;
 
 	if (pfb474 & 0x00000004)
-		ram->type = NV_MEM_TYPE_GDDR3;
+		ram->base.type = NV_MEM_TYPE_GDDR3;
 	if (pfb474 & 0x00000002)
-		ram->type = NV_MEM_TYPE_DDR2;
+		ram->base.type = NV_MEM_TYPE_DDR2;
 	if (pfb474 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		ram->base.type = NV_MEM_TYPE_DDR1;
 
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index ab7ef0a..baa013a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include "priv.h"
+#include "nv40.h"
 
 static int
 nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -30,7 +30,7 @@
 		struct nouveau_object **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_ram *ram;
+	struct nv40_ram *ram;
 	u32 pfb914 = nv_rd32(pfb, 0x100914);
 	int ret;
 
@@ -40,15 +40,18 @@
 		return ret;
 
 	switch (pfb914 & 0x00000003) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
-	case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
 	case 0x00000003: break;
 	}
 
-	ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	ram->base.tags  =  nv_rd32(pfb, 0x100320);
+	ram->base.calc = nv40_ram_calc;
+	ram->base.prog = nv40_ram_prog;
+	ram->base.tidy = nv40_ram_tidy;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index 903baff..76762a1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -23,8 +23,215 @@
  */
 
 #include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/perf.h>
+#include <subdev/bios/timing.h>
+#include <subdev/clock/pll.h>
+#include <subdev/fb.h>
+
+#include <core/option.h>
 #include <core/mm.h>
-#include "priv.h"
+
+#include "ramseq.h"
+
+#include "nv50.h"
+
+struct nv50_ramseq {
+	struct hwsq base;
+	struct hwsq_reg r_0x002504;
+	struct hwsq_reg r_0x004008;
+	struct hwsq_reg r_0x00400c;
+	struct hwsq_reg r_0x00c040;
+	struct hwsq_reg r_0x100210;
+	struct hwsq_reg r_0x1002d0;
+	struct hwsq_reg r_0x1002d4;
+	struct hwsq_reg r_0x1002dc;
+	struct hwsq_reg r_0x100da0[8];
+	struct hwsq_reg r_0x100e20;
+	struct hwsq_reg r_0x100e24;
+	struct hwsq_reg r_0x611200;
+	struct hwsq_reg r_timing[9];
+	struct hwsq_reg r_mr[4];
+};
+
+struct nv50_ram {
+	struct nouveau_ram base;
+	struct nv50_ramseq hwsq;
+};
+
+#define QFX5800NVA0 1
+
+static int
+nv50_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ramseq *hwsq = &ram->hwsq;
+	struct nvbios_perfE perfE;
+	struct nvbios_pll mpll;
+	struct bit_entry M;
+	struct {
+		u32 data;
+		u8  size;
+	} ramcfg, timing;
+	u8  ver, hdr, cnt, strap;
+	u32 data;
+	int N1, M1, N2, M2, P;
+	int ret, i;
+
+	/* lookup closest matching performance table entry for frequency */
+	i = 0;
+	do {
+		ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+					   &ramcfg.size, &perfE);
+		if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
+		    (ramcfg.size < 2)) {
+			nv_error(pfb, "invalid/missing perftab entry\n");
+			return -EINVAL;
+		}
+	} while (perfE.memory < freq);
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 1 || M.length < 5) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 3);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ramcfg.data += hdr + (strap * ramcfg.size);
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	if (strap != 0xff) {
+		timing.data = nvbios_timing_entry(bios, strap, &ver, &hdr);
+		if (!timing.data || ver != 0x10 || hdr < 0x12) {
+			nv_error(pfb, "invalid/missing timing entry "
+				 "%02x %04x %02x %02x\n",
+				 strap, timing.data, ver, hdr);
+			return -EINVAL;
+		}
+	} else {
+		timing.data = 0;
+	}
+
+	ret = ram_init(hwsq, nv_subdev(pfb));
+	if (ret)
+		return ret;
+
+	ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
+	ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
+	ram_wr32(hwsq, 0x611200, 0x00003300);
+	ram_wr32(hwsq, 0x002504, 0x00000001); /* block fifo */
+	ram_nsec(hwsq, 8000);
+	ram_setf(hwsq, 0x10, 0x00); /* disable fb */
+	ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+
+	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
+	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
+	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
+	ram_wr32(hwsq, 0x100210, 0x00000000); /* disable auto-refresh */
+	ram_wr32(hwsq, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+	ret = nvbios_pll_parse(bios, 0x004008, &mpll);
+	mpll.vco2.max_freq = 0;
+	if (ret == 0) {
+		ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+				   &N1, &M1, &N2, &M2, &P);
+		if (ret == 0)
+			ret = -EINVAL;
+	}
+
+	if (ret < 0)
+		return ret;
+
+	ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
+	ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+	ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
+	ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
+					     (P << 22) | (P << 16));
+#if QFX5800NVA0
+	for (i = 0; i < 8; i++)
+		ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
+#endif
+	ram_nsec(hwsq, 96000); /*XXX*/
+	ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
+
+	ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+	ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
+
+	ram_nsec(hwsq, 12000);
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR2:
+		ram_nuke(hwsq, mr[0]); /* force update */
+		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		break;
+	case NV_MEM_TYPE_GDDR3:
+		ram_mask(hwsq, mr[2], 0x000, 0x000);
+		ram_nuke(hwsq, mr[0]); /* force update */
+		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		break;
+	default:
+		break;
+	}
+
+	ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
+
+	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+
+#if QFX5800NVA0
+	ram_nuke(hwsq, 0x100e24);
+	ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
+	ram_nuke(hwsq, 0x100e20);
+	ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
+#endif
+
+	ram_mask(hwsq, mr[0], 0x100, 0x100);
+	ram_mask(hwsq, mr[0], 0x100, 0x000);
+
+	ram_setf(hwsq, 0x10, 0x01); /* enable fb */
+	ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
+	ram_wr32(hwsq, 0x611200, 0x00003330);
+	ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+	return 0;
+}
+
+static int
+nv50_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ramseq *hwsq = &ram->hwsq;
+
+	ram_exec(hwsq, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nv50_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ramseq *hwsq = &ram->hwsq;
+	ram_exec(hwsq, false);
+}
 
 void
 __nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
@@ -57,7 +264,7 @@
 	kfree(mem);
 }
 
-static int
+int
 nv50_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
 	     u32 memtype, struct nouveau_mem **pmem)
 {
@@ -160,77 +367,114 @@
 	return rblock_size;
 }
 
-static int
-nv50_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
-		struct nouveau_oclass *oclass, void *data, u32 datasize,
-		struct nouveau_object **pobject)
+int
+nv50_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, int length, void **pobject)
 {
-	struct nouveau_fb *pfb = nouveau_fb(parent);
-	struct nouveau_device *device = nv_device(pfb);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_ram *ram;
 	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
 	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 size;
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_ram *ram;
 	int ret;
 
-	ret = nouveau_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	ret = nouveau_ram_create_(parent, engine, oclass, length, pobject);
+	ram = *pobject;
 	if (ret)
 		return ret;
 
 	ram->size = nv_rd32(pfb, 0x10020c);
-	ram->size = (ram->size & 0xffffff00) |
-		       ((ram->size & 0x000000ff) << 32);
+	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
 
-	size = (ram->size >> 12) - rsvd_head - rsvd_tail;
-	switch (device->chipset) {
-	case 0xaa:
-	case 0xac:
-	case 0xaf: /* IGPs, no reordering, no real VRAM */
-		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
-		if (ret)
-			return ret;
-
-		ram->type   = NV_MEM_TYPE_STOLEN;
-		ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+	switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+	case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+	case 1:
+		if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+			ram->type = NV_MEM_TYPE_DDR3;
+		else
+			ram->type = NV_MEM_TYPE_DDR2;
 		break;
+	case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
+	case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
+	case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
 	default:
-		switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
-		case 0: ram->type = NV_MEM_TYPE_DDR1; break;
-		case 1:
-			if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-				ram->type = NV_MEM_TYPE_DDR3;
-			else
-				ram->type = NV_MEM_TYPE_DDR2;
-			break;
-		case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
-		case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
-		case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
-		default:
-			break;
-		}
-
-		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
-				      nv50_fb_vram_rblock(pfb, ram) >> 12);
-		if (ret)
-			return ret;
-
-		ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
-		ram->tags  =  nv_rd32(pfb, 0x100320);
 		break;
 	}
 
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+			      (rsvd_head + rsvd_tail),
+			      nv50_fb_vram_rblock(pfb, ram) >> 12);
+	if (ret)
+		return ret;
+
+	ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+	ram->tags  =  nv_rd32(pfb, 0x100320);
 	ram->get = nv50_ram_get;
 	ram->put = nv50_ram_put;
 	return 0;
 }
 
+static int
+nv50_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 datasize,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_ram *ram;
+	int ret, i;
+
+	ret = nv50_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR2:
+	case NV_MEM_TYPE_GDDR3:
+		ram->base.calc = nv50_ram_calc;
+		ram->base.prog = nv50_ram_prog;
+		ram->base.tidy = nv50_ram_tidy;
+		break;
+	default:
+		nv_warn(ram, "reclocking of this ram type unsupported\n");
+		return 0;
+	}
+
+	ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
+	ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
+	ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
+	ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+	ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+	ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
+	ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
+	ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
+	for (i = 0; i < 8; i++)
+		ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+	ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
+	ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
+	ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
+
+	for (i = 0; i < 9; i++)
+		ram->hwsq.r_timing[i] = hwsq_reg(0x100220 + (i * 0x04));
+
+	if (ram->base.ranks > 1) {
+		ram->hwsq.r_mr[0] = hwsq_reg2(0x1002c0, 0x1002c8);
+		ram->hwsq.r_mr[1] = hwsq_reg2(0x1002c4, 0x1002cc);
+		ram->hwsq.r_mr[2] = hwsq_reg2(0x1002e0, 0x1002e8);
+		ram->hwsq.r_mr[3] = hwsq_reg2(0x1002e4, 0x1002ec);
+	} else {
+		ram->hwsq.r_mr[0] = hwsq_reg(0x1002c0);
+		ram->hwsq.r_mr[1] = hwsq_reg(0x1002c4);
+		ram->hwsq.r_mr[2] = hwsq_reg(0x1002e0);
+		ram->hwsq.r_mr[3] = hwsq_reg(0x1002e4);
+	}
+
+	return 0;
+}
+
 struct nouveau_oclass
 nv50_ram_oclass = {
-	.handle = 0,
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_ram_create,
+		.ctor = nv50_ram_ctor,
 		.dtor = _nouveau_ram_dtor,
 		.init = _nouveau_ram_init,
 		.fini = _nouveau_ram_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
new file mode 100644
index 0000000..f6292cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+
+#include <subdev/clock/nva3.h>
+#include <subdev/clock/pll.h>
+
+#include <core/option.h>
+
+#include "ramfuc.h"
+
+#include "nv50.h"
+
+struct nva3_ramfuc {
+	struct ramfuc base;
+	struct ramfuc_reg r_0x004000;
+	struct ramfuc_reg r_0x004004;
+	struct ramfuc_reg r_0x004018;
+	struct ramfuc_reg r_0x004128;
+	struct ramfuc_reg r_0x004168;
+	struct ramfuc_reg r_0x100200;
+	struct ramfuc_reg r_0x100210;
+	struct ramfuc_reg r_0x100220[9];
+	struct ramfuc_reg r_0x1002d0;
+	struct ramfuc_reg r_0x1002d4;
+	struct ramfuc_reg r_0x1002dc;
+	struct ramfuc_reg r_0x10053c;
+	struct ramfuc_reg r_0x1005a0;
+	struct ramfuc_reg r_0x1005a4;
+	struct ramfuc_reg r_0x100714;
+	struct ramfuc_reg r_0x100718;
+	struct ramfuc_reg r_0x10071c;
+	struct ramfuc_reg r_0x100760;
+	struct ramfuc_reg r_0x1007a0;
+	struct ramfuc_reg r_0x1007e0;
+	struct ramfuc_reg r_0x10f804;
+	struct ramfuc_reg r_0x1110e0;
+	struct ramfuc_reg r_0x111100;
+	struct ramfuc_reg r_0x111104;
+	struct ramfuc_reg r_0x611200;
+	struct ramfuc_reg r_mr[4];
+};
+
+struct nva3_ram {
+	struct nouveau_ram base;
+	struct nva3_ramfuc fuc;
+};
+
+static int
+nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nva3_ram *ram = (void *)pfb->ram;
+	struct nva3_ramfuc *fuc = &ram->fuc;
+	struct nva3_clock_info mclk;
+	struct bit_entry M;
+	u8  ver, cnt, strap;
+	u32 data;
+	struct {
+		u32 data;
+		u8  size;
+	} rammap, ramcfg, timing;
+	u32 r004018, r100760, ctrl;
+	u32 unk714, unk718, unk71c;
+	int ret;
+
+	/* lookup memory config data relevant to the target frequency */
+	rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
+					 &cnt, &ramcfg.size);
+	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
+		nv_error(pfb, "invalid/missing rammap entry\n");
+		return -EINVAL;
+	}
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 1);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
+	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
+		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		return -EINVAL;
+	}
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	if (strap != 0xff) {
+		timing.data = nvbios_timing_entry(bios, strap, &ver,
+						 &timing.size);
+		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
+			nv_error(pfb, "invalid/missing timing entry\n");
+			return -EINVAL;
+		}
+	} else {
+		timing.data = 0;
+	}
+
+	ret = nva3_clock_info(nouveau_clock(pfb), 0x12, 0x4000, freq, &mclk);
+	if (ret < 0) {
+		nv_error(pfb, "failed mclk calculation\n");
+		return ret;
+	}
+
+	ret = ram_init(fuc, pfb);
+	if (ret)
+		return ret;
+
+	/* XXX: where the fuck does 750MHz come from? */
+	if (freq <= 750000) {
+		r004018 = 0x10000000;
+		r100760 = 0x22222222;
+	} else {
+		r004018 = 0x00000000;
+		r100760 = 0x00000000;
+	}
+
+	ctrl = ram_rd32(fuc, 0x004000);
+	if (ctrl & 0x00000008) {
+		if (mclk.pll) {
+			ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
+			ram_wr32(fuc, 0x004004, mclk.pll);
+			ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
+			ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
+			ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
+			ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
+			ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
+			ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
+		}
+	} else {
+		u32 ssel = 0x00000101;
+		if (mclk.clk)
+			ssel |= mclk.clk;
+		else
+			ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+		ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
+	}
+
+	if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
+		ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
+		ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
+	}
+
+	if (!(nv_ro08(bios, rammap.data + 0x04) & 0x02))
+		ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
+	ram_wr32(fuc, 0x611200, 0x00003300);
+	if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x10))
+		ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
+
+	ram_wr32(fuc, 0x1002d4, 0x00000001);
+	ram_wr32(fuc, 0x1002d0, 0x00000001);
+	ram_wr32(fuc, 0x1002d0, 0x00000001);
+	ram_wr32(fuc, 0x100210, 0x00000000);
+	ram_wr32(fuc, 0x1002dc, 0x00000001);
+	ram_nsec(fuc, 2000);
+
+	ctrl = ram_rd32(fuc, 0x004000);
+	if (!(ctrl & 0x00000008) && mclk.pll) {
+		ram_wr32(fuc, 0x004000, (ctrl |=  0x00000008));
+		ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
+		ram_wr32(fuc, 0x004018, 0x00001000);
+		ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001));
+		ram_wr32(fuc, 0x004004, mclk.pll);
+		ram_wr32(fuc, 0x004000, (ctrl |=  0x00000001));
+		udelay(64);
+		ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
+		udelay(20);
+	} else
+	if (!mclk.pll) {
+		ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk);
+		ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008));
+		ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
+		ram_wr32(fuc, 0x004018, 0x0000d000 | r004018);
+	}
+
+	if ( (nv_ro08(bios, rammap.data + 0x04) & 0x08)) {
+		u32 unk5a0 = (nv_ro16(bios, ramcfg.data + 0x05) << 8) |
+			      nv_ro08(bios, ramcfg.data + 0x05);
+		u32 unk5a4 = (nv_ro16(bios, ramcfg.data + 0x07));
+		u32 unk804 = (nv_ro08(bios, ramcfg.data + 0x09) & 0xf0) << 16 |
+			     (nv_ro08(bios, ramcfg.data + 0x03) & 0x0f) << 16 |
+			     (nv_ro08(bios, ramcfg.data + 0x09) & 0x0f) |
+			     0x80000000;
+		ram_wr32(fuc, 0x1005a0, unk5a0);
+		ram_wr32(fuc, 0x1005a4, unk5a4);
+		ram_wr32(fuc, 0x10f804, unk804);
+		ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
+		ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
+		ram_mask(fuc, 0x100760, 0x22222222, r100760);
+		ram_mask(fuc, 0x1007a0, 0x22222222, r100760);
+		ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
+	}
+
+	if (mclk.pll) {
+		ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
+		ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008));
+	}
+
+	/*XXX: LEAVE */
+	ram_wr32(fuc, 0x1002dc, 0x00000000);
+	ram_wr32(fuc, 0x1002d4, 0x00000001);
+	ram_wr32(fuc, 0x100210, 0x80000000);
+	ram_nsec(fuc, 1000);
+	ram_nsec(fuc, 1000);
+
+	ram_mask(fuc, mr[2], 0x00000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+	ram_nuke(fuc, mr[0]);
+	ram_mask(fuc, mr[0], 0x00000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000);
+	ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000);
+
+	data = (nv_ro08(bios, ramcfg.data + 0x02) & 0x08) ? 0x00000000 : 0x00001000;
+	ram_mask(fuc, 0x100200, 0x00001000, data);
+
+	unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010;
+	unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
+	unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
+	if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x20))
+		unk714 |= 0xf0000000;
+	if (!(nv_ro08(bios, ramcfg.data + 0x02) & 0x04))
+		unk714 |= 0x00000010;
+	ram_wr32(fuc, 0x100714, unk714);
+
+	if (nv_ro08(bios, ramcfg.data + 0x02) & 0x01)
+		unk71c |= 0x00000100;
+	ram_wr32(fuc, 0x10071c, unk71c);
+
+	if (nv_ro08(bios, ramcfg.data + 0x02) & 0x02)
+		unk718 |= 0x00000100;
+	ram_wr32(fuc, 0x100718, unk718);
+
+	if (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)
+		ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/
+
+	ram_mask(fuc, mr[0], 0x100, 0x100);
+	ram_nsec(fuc, 1000);
+	ram_mask(fuc, mr[0], 0x100, 0x000);
+	ram_nsec(fuc, 1000);
+
+	ram_nsec(fuc, 2000);
+	ram_nsec(fuc, 12000);
+
+	ram_wr32(fuc, 0x611200, 0x00003330);
+	if ( (nv_ro08(bios, rammap.data + 0x04) & 0x02))
+		ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
+	if ( (nv_ro08(bios, ramcfg.data + 0x02) & 0x10)) {
+		ram_mask(fuc, 0x111104, 0x00000180, 0x00000180);
+		ram_mask(fuc, 0x111100, 0x40000000, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x111104, 0x00000600, 0x00000600);
+	}
+
+	if (mclk.pll) {
+		ram_mask(fuc, 0x004168, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x004168, 0x00000100, 0x00000000);
+	} else {
+		ram_mask(fuc, 0x004000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x004128, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x004128, 0x00000100, 0x00000000);
+	}
+
+	return 0;
+}
+
+static int
+nva3_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nva3_ram *ram = (void *)pfb->ram;
+	struct nva3_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nva3_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nva3_ram *ram = (void *)pfb->ram;
+	struct nva3_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, false);
+}
+
+static int
+nva3_ram_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object->parent;
+	struct nva3_ram   *ram = (void *)object;
+	int ret, i;
+
+	ret = nouveau_ram_init(&ram->base);
+	if (ret)
+		return ret;
+
+	/* prepare for ddr link training, and load training patterns */
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3: {
+		static const u32 pattern[16] = {
+			0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
+			0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
+			0x33333333, 0x55555555, 0x77777777, 0x66666666,
+			0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
+		};
+
+		nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/
+		nv_wr32(pfb, 0x1005a8, 0x0000ffff);
+		nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+		for (i = 0; i < 0x30; i++) {
+			nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
+			nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
+			nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+			nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+		}
+	}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 datasize,
+	      struct nouveau_object **pobject)
+{
+	struct nva3_ram *ram;
+	int ret, i;
+
+	ret = nv50_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3:
+		ram->base.calc = nva3_ram_calc;
+		ram->base.prog = nva3_ram_prog;
+		ram->base.tidy = nva3_ram_tidy;
+		break;
+	default:
+		nv_warn(ram, "reclocking of this ram type unsupported\n");
+		return 0;
+	}
+
+	ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
+	ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
+	ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
+	ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
+	ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
+	ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
+	ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
+	for (i = 0; i < 9; i++)
+		ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
+	ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
+	ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
+	ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
+	ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
+	ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
+	ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
+	ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
+	ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
+	ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
+	ram->fuc.r_0x100760 = ramfuc_reg(0x100760);
+	ram->fuc.r_0x1007a0 = ramfuc_reg(0x1007a0);
+	ram->fuc.r_0x1007e0 = ramfuc_reg(0x1007e0);
+	ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
+	ram->fuc.r_0x1110e0 = ramfuc_reg(0x1110e0);
+	ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
+	ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
+	ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
+
+	if (ram->base.ranks > 1) {
+		ram->fuc.r_mr[0] = ramfuc_reg2(0x1002c0, 0x1002c8);
+		ram->fuc.r_mr[1] = ramfuc_reg2(0x1002c4, 0x1002cc);
+		ram->fuc.r_mr[2] = ramfuc_reg2(0x1002e0, 0x1002e8);
+		ram->fuc.r_mr[3] = ramfuc_reg2(0x1002e4, 0x1002ec);
+	} else {
+		ram->fuc.r_mr[0] = ramfuc_reg(0x1002c0);
+		ram->fuc.r_mr[1] = ramfuc_reg(0x1002c4);
+		ram->fuc.r_mr[2] = ramfuc_reg(0x1002e0);
+		ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_ram_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_ram_ctor,
+		.dtor = _nouveau_ram_dtor,
+		.init = nva3_ram_init,
+		.fini = _nouveau_ram_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
new file mode 100644
index 0000000..00f2ca7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 datasize,
+	      struct nouveau_object **pobject)
+{
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_ram *ram;
+	int ret;
+
+	ret = nouveau_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	ram->size = nv_rd32(pfb, 0x10020c);
+	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
+			      (rsvd_head + rsvd_tail), 1);
+	if (ret)
+		return ret;
+
+	ram->type   = NV_MEM_TYPE_STOLEN;
+	ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+	ram->get = nv50_ram_get;
+	ram->put = nv50_ram_put;
+	return 0;
+}
+
+struct nouveau_oclass
+nvaa_ram_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvaa_ram_ctor,
+		.dtor = _nouveau_ram_dtor,
+		.init = _nouveau_ram_init,
+		.fini = _nouveau_ram_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index cf97c4d..f464547 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -23,9 +23,414 @@
  */
 
 #include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
 #include <subdev/ltcg.h>
 
-#include "priv.h"
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+
+#include <core/option.h>
+
+#include "ramfuc.h"
+
+#include "nvc0.h"
+
+struct nvc0_ramfuc {
+	struct ramfuc base;
+
+	struct ramfuc_reg r_0x10fe20;
+	struct ramfuc_reg r_0x10fe24;
+	struct ramfuc_reg r_0x137320;
+	struct ramfuc_reg r_0x137330;
+
+	struct ramfuc_reg r_0x132000;
+	struct ramfuc_reg r_0x132004;
+	struct ramfuc_reg r_0x132100;
+
+	struct ramfuc_reg r_0x137390;
+
+	struct ramfuc_reg r_0x10f290;
+	struct ramfuc_reg r_0x10f294;
+	struct ramfuc_reg r_0x10f298;
+	struct ramfuc_reg r_0x10f29c;
+	struct ramfuc_reg r_0x10f2a0;
+
+	struct ramfuc_reg r_0x10f300;
+	struct ramfuc_reg r_0x10f338;
+	struct ramfuc_reg r_0x10f340;
+	struct ramfuc_reg r_0x10f344;
+	struct ramfuc_reg r_0x10f348;
+
+	struct ramfuc_reg r_0x10f910;
+	struct ramfuc_reg r_0x10f914;
+
+	struct ramfuc_reg r_0x100b0c;
+	struct ramfuc_reg r_0x10f050;
+	struct ramfuc_reg r_0x10f090;
+	struct ramfuc_reg r_0x10f200;
+	struct ramfuc_reg r_0x10f210;
+	struct ramfuc_reg r_0x10f310;
+	struct ramfuc_reg r_0x10f314;
+	struct ramfuc_reg r_0x10f610;
+	struct ramfuc_reg r_0x10f614;
+	struct ramfuc_reg r_0x10f800;
+	struct ramfuc_reg r_0x10f808;
+	struct ramfuc_reg r_0x10f824;
+	struct ramfuc_reg r_0x10f830;
+	struct ramfuc_reg r_0x10f988;
+	struct ramfuc_reg r_0x10f98c;
+	struct ramfuc_reg r_0x10f990;
+	struct ramfuc_reg r_0x10f998;
+	struct ramfuc_reg r_0x10f9b0;
+	struct ramfuc_reg r_0x10f9b4;
+	struct ramfuc_reg r_0x10fb04;
+	struct ramfuc_reg r_0x10fb08;
+	struct ramfuc_reg r_0x137300;
+	struct ramfuc_reg r_0x137310;
+	struct ramfuc_reg r_0x137360;
+	struct ramfuc_reg r_0x1373ec;
+	struct ramfuc_reg r_0x1373f0;
+	struct ramfuc_reg r_0x1373f8;
+
+	struct ramfuc_reg r_0x61c140;
+	struct ramfuc_reg r_0x611200;
+
+	struct ramfuc_reg r_0x13d8f4;
+};
+
+struct nvc0_ram {
+	struct nouveau_ram base;
+	struct nvc0_ramfuc fuc;
+	struct nvbios_pll refpll;
+	struct nvbios_pll mempll;
+};
+
+static void
+nvc0_ram_train(struct nvc0_ramfuc *fuc, u32 magic)
+{
+	struct nvc0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	struct nouveau_fb *pfb = nouveau_fb(ram);
+	u32 part = nv_rd32(pfb, 0x022438), i;
+	u32 mask = nv_rd32(pfb, 0x022554);
+	u32 addr = 0x110974;
+
+	ram_wr32(fuc, 0x10f910, magic);
+	ram_wr32(fuc, 0x10f914, magic);
+
+	for (i = 0; (magic & 0x80000000) && i < part; addr += 0x1000, i++) {
+		if (mask & (1 << i))
+			continue;
+		ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
+	}
+}
+
+static int
+nvc0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_clock *clk = nouveau_clock(pfb);
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nvc0_ram *ram = (void *)pfb->ram;
+	struct nvc0_ramfuc *fuc = &ram->fuc;
+	struct bit_entry M;
+	u8  ver, cnt, strap;
+	u32 data;
+	struct {
+		u32 data;
+		u8  size;
+	} rammap, ramcfg, timing;
+	int ref, div, out;
+	int from, mode;
+	int N1, M1, P;
+	int ret;
+
+	/* lookup memory config data relevant to the target frequency */
+	rammap.data = nvbios_rammap_match(bios, freq / 1000, &ver, &rammap.size,
+					 &cnt, &ramcfg.size);
+	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
+		nv_error(pfb, "invalid/missing rammap entry\n");
+		return -EINVAL;
+	}
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 1);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
+	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
+		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		return -EINVAL;
+	}
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	if (strap != 0xff) {
+		timing.data = nvbios_timing_entry(bios, strap, &ver,
+						 &timing.size);
+		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
+			nv_error(pfb, "invalid/missing timing entry\n");
+			return -EINVAL;
+		}
+	} else {
+		timing.data = 0;
+	}
+
+	ret = ram_init(fuc, pfb);
+	if (ret)
+		return ret;
+
+	/* determine current mclk configuration */
+	from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */
+
+	/* determine target mclk configuration */
+	if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
+		ref = clk->read(clk, nv_clk_src_sppll0);
+	else
+		ref = clk->read(clk, nv_clk_src_sppll1);
+	div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
+	out = (ref * 2) / (div + 2);
+	mode = freq != out;
+
+	ram_mask(fuc, 0x137360, 0x00000002, 0x00000000);
+
+	if ((ram_rd32(fuc, 0x132000) & 0x00000002) || 0 /*XXX*/) {
+		ram_nuke(fuc, 0x132000);
+		ram_mask(fuc, 0x132000, 0x00000002, 0x00000002);
+		ram_mask(fuc, 0x132000, 0x00000002, 0x00000000);
+	}
+
+	if (mode == 1) {
+		ram_nuke(fuc, 0x10fe20);
+		ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000002);
+		ram_mask(fuc, 0x10fe20, 0x00000002, 0x00000000);
+	}
+
+// 0x00020034 // 0x0000000a
+	ram_wr32(fuc, 0x132100, 0x00000001);
+
+	if (mode == 1 && from == 0) {
+		/* calculate refpll */
+		ret = nva3_pll_calc(nv_subdev(pfb), &ram->refpll,
+				    ram->mempll.refclk, &N1, NULL, &M1, &P);
+		if (ret <= 0) {
+			nv_error(pfb, "unable to calc refpll\n");
+			return ret ? ret : -ERANGE;
+		}
+
+		ram_wr32(fuc, 0x10fe20, 0x20010000);
+		ram_wr32(fuc, 0x137320, 0x00000003);
+		ram_wr32(fuc, 0x137330, 0x81200006);
+		ram_wr32(fuc, 0x10fe24, (P << 16) | (N1 << 8) | M1);
+		ram_wr32(fuc, 0x10fe20, 0x20010001);
+		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+
+		/* calculate mempll */
+		ret = nva3_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+				   &N1, NULL, &M1, &P);
+		if (ret <= 0) {
+			nv_error(pfb, "unable to calc refpll\n");
+			return ret ? ret : -ERANGE;
+		}
+
+		ram_wr32(fuc, 0x10fe20, 0x20010005);
+		ram_wr32(fuc, 0x132004, (P << 16) | (N1 << 8) | M1);
+		ram_wr32(fuc, 0x132000, 0x18010101);
+		ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
+	} else
+	if (mode == 0) {
+		ram_wr32(fuc, 0x137300, 0x00000003);
+	}
+
+	if (from == 0) {
+		ram_nuke(fuc, 0x10fb04);
+		ram_mask(fuc, 0x10fb04, 0x0000ffff, 0x00000000);
+		ram_nuke(fuc, 0x10fb08);
+		ram_mask(fuc, 0x10fb08, 0x0000ffff, 0x00000000);
+		ram_wr32(fuc, 0x10f988, 0x2004ff00);
+		ram_wr32(fuc, 0x10f98c, 0x003fc040);
+		ram_wr32(fuc, 0x10f990, 0x20012001);
+		ram_wr32(fuc, 0x10f998, 0x00011a00);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+	} else {
+		ram_wr32(fuc, 0x10f988, 0x20010000);
+		ram_wr32(fuc, 0x10f98c, 0x00000000);
+		ram_wr32(fuc, 0x10f990, 0x20012001);
+		ram_wr32(fuc, 0x10f998, 0x00010a00);
+	}
+
+	if (from == 0) {
+// 0x00020039 // 0x000000ba
+	}
+
+// 0x0002003a // 0x00000002
+	ram_wr32(fuc, 0x100b0c, 0x00080012);
+// 0x00030014 // 0x00000000 // 0x02b5f070
+// 0x00030014 // 0x00010000 // 0x02b5f070
+	ram_wr32(fuc, 0x611200, 0x00003300);
+// 0x00020034 // 0x0000000a
+// 0x00030020 // 0x00000001 // 0x00000000
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+	ram_wr32(fuc, 0x10f210, 0x00000000);
+	ram_nsec(fuc, 1000);
+	if (mode == 0)
+		nvc0_ram_train(fuc, 0x000c1001);
+	ram_wr32(fuc, 0x10f310, 0x00000001);
+	ram_nsec(fuc, 1000);
+	ram_wr32(fuc, 0x10f090, 0x00000061);
+	ram_wr32(fuc, 0x10f090, 0xc000007f);
+	ram_nsec(fuc, 1000);
+
+	if (from == 0) {
+		ram_wr32(fuc, 0x10f824, 0x00007fd4);
+	} else {
+		ram_wr32(fuc, 0x1373ec, 0x00020404);
+	}
+
+	if (mode == 0) {
+		ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
+		ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+		ram_wr32(fuc, 0x10f830, 0x41500010);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+		ram_mask(fuc, 0x132100, 0x00000100, 0x00000100);
+		ram_wr32(fuc, 0x10f050, 0xff000090);
+		ram_wr32(fuc, 0x1373ec, 0x00020f0f);
+		ram_wr32(fuc, 0x1373f0, 0x00000003);
+		ram_wr32(fuc, 0x137310, 0x81201616);
+		ram_wr32(fuc, 0x132100, 0x00000001);
+// 0x00020039 // 0x000000ba
+		ram_wr32(fuc, 0x10f830, 0x00300017);
+		ram_wr32(fuc, 0x1373f0, 0x00000001);
+		ram_wr32(fuc, 0x10f824, 0x00007e77);
+		ram_wr32(fuc, 0x132000, 0x18030001);
+		ram_wr32(fuc, 0x10f090, 0x4000007e);
+		ram_nsec(fuc, 2000);
+		ram_wr32(fuc, 0x10f314, 0x00000001);
+		ram_wr32(fuc, 0x10f210, 0x80000000);
+		ram_wr32(fuc, 0x10f338, 0x00300220);
+		ram_wr32(fuc, 0x10f300, 0x0000011d);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f290, 0x02060505);
+		ram_wr32(fuc, 0x10f294, 0x34208288);
+		ram_wr32(fuc, 0x10f298, 0x44050411);
+		ram_wr32(fuc, 0x10f29c, 0x0000114c);
+		ram_wr32(fuc, 0x10f2a0, 0x42e10069);
+		ram_wr32(fuc, 0x10f614, 0x40044f77);
+		ram_wr32(fuc, 0x10f610, 0x40044f77);
+		ram_wr32(fuc, 0x10f344, 0x00600009);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f348, 0x00700008);
+		ram_wr32(fuc, 0x61c140, 0x19240000);
+		ram_wr32(fuc, 0x10f830, 0x00300017);
+		nvc0_ram_train(fuc, 0x80021001);
+		nvc0_ram_train(fuc, 0x80081001);
+		ram_wr32(fuc, 0x10f340, 0x00500004);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f830, 0x01300017);
+		ram_wr32(fuc, 0x10f830, 0x00300017);
+// 0x00030020 // 0x00000000 // 0x00000000
+// 0x00020034 // 0x0000000b
+		ram_wr32(fuc, 0x100b0c, 0x00080028);
+		ram_wr32(fuc, 0x611200, 0x00003330);
+	} else {
+		ram_wr32(fuc, 0x10f800, 0x00001800);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x1373ec, 0x00020404);
+		ram_wr32(fuc, 0x1373f0, 0x00000003);
+		ram_wr32(fuc, 0x10f830, 0x40700010);
+		ram_wr32(fuc, 0x10f830, 0x40500010);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x1373f8, 0x00000000);
+		ram_wr32(fuc, 0x132100, 0x00000101);
+		ram_wr32(fuc, 0x137310, 0x89201616);
+		ram_wr32(fuc, 0x10f050, 0xff000090);
+		ram_wr32(fuc, 0x1373ec, 0x00030404);
+		ram_wr32(fuc, 0x1373f0, 0x00000002);
+	// 0x00020039 // 0x00000011
+		ram_wr32(fuc, 0x132100, 0x00000001);
+		ram_wr32(fuc, 0x1373f8, 0x00002000);
+		ram_nsec(fuc, 2000);
+		ram_wr32(fuc, 0x10f808, 0x7aaa0050);
+		ram_wr32(fuc, 0x10f830, 0x00500010);
+		ram_wr32(fuc, 0x10f200, 0x00ce1000);
+		ram_wr32(fuc, 0x10f090, 0x4000007e);
+		ram_nsec(fuc, 2000);
+		ram_wr32(fuc, 0x10f314, 0x00000001);
+		ram_wr32(fuc, 0x10f210, 0x80000000);
+		ram_wr32(fuc, 0x10f338, 0x00300200);
+		ram_wr32(fuc, 0x10f300, 0x0000084d);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f290, 0x0b343825);
+		ram_wr32(fuc, 0x10f294, 0x3483028e);
+		ram_wr32(fuc, 0x10f298, 0x440c0600);
+		ram_wr32(fuc, 0x10f29c, 0x0000214c);
+		ram_wr32(fuc, 0x10f2a0, 0x42e20069);
+		ram_wr32(fuc, 0x10f200, 0x00ce0000);
+		ram_wr32(fuc, 0x10f614, 0x60044e77);
+		ram_wr32(fuc, 0x10f610, 0x60044e77);
+		ram_wr32(fuc, 0x10f340, 0x00500000);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f344, 0x00600228);
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f348, 0x00700000);
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x61c140, 0x09a40000);
+
+		nvc0_ram_train(fuc, 0x800e1008);
+
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f800, 0x00001804);
+	// 0x00030020 // 0x00000000 // 0x00000000
+	// 0x00020034 // 0x0000000b
+		ram_wr32(fuc, 0x13d8f4, 0x00000000);
+		ram_wr32(fuc, 0x100b0c, 0x00080028);
+		ram_wr32(fuc, 0x611200, 0x00003330);
+		ram_nsec(fuc, 100000);
+		ram_wr32(fuc, 0x10f9b0, 0x05313f41);
+		ram_wr32(fuc, 0x10f9b4, 0x00002f50);
+
+		nvc0_ram_train(fuc, 0x010c1001);
+	}
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000800);
+// 0x00020016 // 0x00000000
+
+	if (mode == 0)
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+	return 0;
+}
+
+static int
+nvc0_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nvc0_ram *ram = (void *)pfb->ram;
+	struct nvc0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nvc0_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nvc0_ram *ram = (void *)pfb->ram;
+	struct nvc0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, false);
+}
 
 extern const u8 nvc0_pte_storage_type_map[256];
 
@@ -110,10 +515,9 @@
 	return 0;
 }
 
-static int
-nvc0_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
-		struct nouveau_oclass *oclass, void *data, u32 size,
-		struct nouveau_object **pobject)
+int
+nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, int size, void **pobject)
 {
 	struct nouveau_fb *pfb = nouveau_fb(parent);
 	struct nouveau_bios *bios = nouveau_bios(pfb);
@@ -127,8 +531,8 @@
 	bool uniform = true;
 	int ret, part;
 
-	ret = nouveau_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	ret = nouveau_ram_create_(parent, engine, oclass, size, pobject);
+	ram = *pobject;
 	if (ret)
 		return ret;
 
@@ -182,13 +586,158 @@
 	return 0;
 }
 
+static int
+nvc0_ram_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object->parent;
+	struct nvc0_ram   *ram = (void *)object;
+	int ret, i;
+
+	ret = nouveau_ram_init(&ram->base);
+	if (ret)
+		return ret;
+
+	/* prepare for ddr link training, and load training patterns */
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5: {
+		static const u8  train0[] = {
+			0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+			0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+		};
+		static const u32 train1[] = {
+			0x00000000, 0xffffffff,
+			0x55555555, 0xaaaaaaaa,
+			0x33333333, 0xcccccccc,
+			0xf0f0f0f0, 0x0f0f0f0f,
+			0x00ff00ff, 0xff00ff00,
+			0x0000ffff, 0xffff0000,
+		};
+
+		for (i = 0; i < 0x30; i++) {
+			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+		}
+	}	break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nvc0_ram *ram;
+	int ret;
+
+	ret = nvc0_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
+	if (ret) {
+		nv_error(ram, "mclk refpll data not found\n");
+		return ret;
+	}
+
+	ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
+	if (ret) {
+		nv_error(ram, "mclk pll data not found\n");
+		return ret;
+	}
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5:
+		ram->base.calc = nvc0_ram_calc;
+		ram->base.prog = nvc0_ram_prog;
+		ram->base.tidy = nvc0_ram_tidy;
+		break;
+	default:
+		nv_warn(ram, "reclocking of this ram type unsupported\n");
+		return 0;
+	}
+
+	ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
+	ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
+	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
+	ram->fuc.r_0x137330 = ramfuc_reg(0x137330);
+
+	ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
+	ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
+	ram->fuc.r_0x132100 = ramfuc_reg(0x132100);
+
+	ram->fuc.r_0x137390 = ramfuc_reg(0x137390);
+
+	ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
+	ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
+	ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
+	ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
+	ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
+
+	ram->fuc.r_0x10f300 = ramfuc_reg(0x10f300);
+	ram->fuc.r_0x10f338 = ramfuc_reg(0x10f338);
+	ram->fuc.r_0x10f340 = ramfuc_reg(0x10f340);
+	ram->fuc.r_0x10f344 = ramfuc_reg(0x10f344);
+	ram->fuc.r_0x10f348 = ramfuc_reg(0x10f348);
+
+	ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
+	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
+
+	ram->fuc.r_0x100b0c = ramfuc_reg(0x100b0c);
+	ram->fuc.r_0x10f050 = ramfuc_reg(0x10f050);
+	ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
+	ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
+	ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
+	ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
+	ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
+	ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
+	ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
+	ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
+	ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
+	ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
+	ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
+	ram->fuc.r_0x10f988 = ramfuc_reg(0x10f988);
+	ram->fuc.r_0x10f98c = ramfuc_reg(0x10f98c);
+	ram->fuc.r_0x10f990 = ramfuc_reg(0x10f990);
+	ram->fuc.r_0x10f998 = ramfuc_reg(0x10f998);
+	ram->fuc.r_0x10f9b0 = ramfuc_reg(0x10f9b0);
+	ram->fuc.r_0x10f9b4 = ramfuc_reg(0x10f9b4);
+	ram->fuc.r_0x10fb04 = ramfuc_reg(0x10fb04);
+	ram->fuc.r_0x10fb08 = ramfuc_reg(0x10fb08);
+	ram->fuc.r_0x137310 = ramfuc_reg(0x137300);
+	ram->fuc.r_0x137310 = ramfuc_reg(0x137310);
+	ram->fuc.r_0x137360 = ramfuc_reg(0x137360);
+	ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
+	ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
+	ram->fuc.r_0x1373f8 = ramfuc_reg(0x1373f8);
+
+	ram->fuc.r_0x61c140 = ramfuc_reg(0x61c140);
+	ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
+
+	ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
+	return 0;
+}
+
 struct nouveau_oclass
 nvc0_ram_oclass = {
 	.handle = 0,
 	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_ram_create,
+		.ctor = nvc0_ram_ctor,
 		.dtor = _nouveau_ram_dtor,
-		.init = _nouveau_ram_init,
+		.init = nvc0_ram_init,
 		.fini = _nouveau_ram_fini,
 	}
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
new file mode 100644
index 0000000..bc86cfd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/pll.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+#include <subdev/bios/timing.h>
+
+#include <subdev/clock.h>
+#include <subdev/clock/pll.h>
+
+#include <subdev/timer.h>
+
+#include <core/option.h>
+
+#include "nvc0.h"
+
+#include "ramfuc.h"
+
+struct nve0_ramfuc {
+	struct ramfuc base;
+
+	struct nvbios_pll refpll;
+	struct nvbios_pll mempll;
+
+	struct ramfuc_reg r_gpioMV;
+	u32 r_funcMV[2];
+	struct ramfuc_reg r_gpio2E;
+	u32 r_func2E[2];
+	struct ramfuc_reg r_gpiotrig;
+
+	struct ramfuc_reg r_0x132020;
+	struct ramfuc_reg r_0x132028;
+	struct ramfuc_reg r_0x132024;
+	struct ramfuc_reg r_0x132030;
+	struct ramfuc_reg r_0x132034;
+	struct ramfuc_reg r_0x132000;
+	struct ramfuc_reg r_0x132004;
+	struct ramfuc_reg r_0x132040;
+
+	struct ramfuc_reg r_0x10f248;
+	struct ramfuc_reg r_0x10f290;
+	struct ramfuc_reg r_0x10f294;
+	struct ramfuc_reg r_0x10f298;
+	struct ramfuc_reg r_0x10f29c;
+	struct ramfuc_reg r_0x10f2a0;
+	struct ramfuc_reg r_0x10f2a4;
+	struct ramfuc_reg r_0x10f2a8;
+	struct ramfuc_reg r_0x10f2ac;
+	struct ramfuc_reg r_0x10f2cc;
+	struct ramfuc_reg r_0x10f2e8;
+	struct ramfuc_reg r_0x10f250;
+	struct ramfuc_reg r_0x10f24c;
+	struct ramfuc_reg r_0x10fec4;
+	struct ramfuc_reg r_0x10fec8;
+	struct ramfuc_reg r_0x10f604;
+	struct ramfuc_reg r_0x10f614;
+	struct ramfuc_reg r_0x10f610;
+	struct ramfuc_reg r_0x100770;
+	struct ramfuc_reg r_0x100778;
+	struct ramfuc_reg r_0x10f224;
+
+	struct ramfuc_reg r_0x10f870;
+	struct ramfuc_reg r_0x10f698;
+	struct ramfuc_reg r_0x10f694;
+	struct ramfuc_reg r_0x10f6b8;
+	struct ramfuc_reg r_0x10f808;
+	struct ramfuc_reg r_0x10f670;
+	struct ramfuc_reg r_0x10f60c;
+	struct ramfuc_reg r_0x10f830;
+	struct ramfuc_reg r_0x1373ec;
+	struct ramfuc_reg r_0x10f800;
+	struct ramfuc_reg r_0x10f82c;
+
+	struct ramfuc_reg r_0x10f978;
+	struct ramfuc_reg r_0x10f910;
+	struct ramfuc_reg r_0x10f914;
+
+	struct ramfuc_reg r_mr[16]; /* MR0 - MR8, MR15 */
+
+	struct ramfuc_reg r_0x62c000;
+	struct ramfuc_reg r_0x10f200;
+	struct ramfuc_reg r_0x10f210;
+	struct ramfuc_reg r_0x10f310;
+	struct ramfuc_reg r_0x10f314;
+	struct ramfuc_reg r_0x10f318;
+	struct ramfuc_reg r_0x10f090;
+	struct ramfuc_reg r_0x10f69c;
+	struct ramfuc_reg r_0x10f824;
+	struct ramfuc_reg r_0x1373f0;
+	struct ramfuc_reg r_0x1373f4;
+	struct ramfuc_reg r_0x137320;
+	struct ramfuc_reg r_0x10f65c;
+	struct ramfuc_reg r_0x10f6bc;
+	struct ramfuc_reg r_0x100710;
+	struct ramfuc_reg r_0x10f750;
+};
+
+struct nve0_ram {
+	struct nouveau_ram base;
+	struct nve0_ramfuc fuc;
+	int from;
+	int mode;
+	int N1, fN1, M1, P1;
+	int N2, M2, P2;
+};
+
+/*******************************************************************************
+ * GDDR5
+ ******************************************************************************/
+static void
+train(struct nve0_ramfuc *fuc, u32 magic)
+{
+	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	struct nouveau_fb *pfb = nouveau_fb(ram);
+	const int mc = nv_rd32(pfb, 0x02243c);
+	int i;
+
+	ram_mask(fuc, 0x10f910, 0xbc0e0000, magic);
+	ram_mask(fuc, 0x10f914, 0xbc0e0000, magic);
+	for (i = 0; i < mc; i++) {
+		const u32 addr = 0x110974 + (i * 0x1000);
+		ram_wait(fuc, addr, 0x0000000f, 0x00000000, 500000);
+	}
+}
+
+static void
+r1373f4_init(struct nve0_ramfuc *fuc)
+{
+	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	const u32 mcoef = ((--ram->P2 << 28) | (ram->N2 << 8) | ram->M2);
+	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
+	const u32 runk0 = ram->fN1 << 16;
+	const u32 runk1 = ram->fN1;
+
+	if (ram->from == 2) {
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
+	} else {
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
+	}
+
+	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
+	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
+
+	/* (re)program refpll, if required */
+	if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
+	    (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
+		ram_wr32(fuc, 0x137320, 0x00000000);
+		ram_mask(fuc, 0x132030, 0xffff0000, runk0);
+		ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
+		ram_wr32(fuc, 0x132024, rcoef);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
+		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
+	}
+
+	/* (re)program mempll, if required */
+	if (ram->mode == 2) {
+		ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
+		ram_wait(fuc, 0x137390, 0x00000002, 0x00000002, 64000);
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00001100);
+	} else {
+		ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010100);
+	}
+
+	ram_mask(fuc, 0x1373f4, 0x00000000, 0x00000010);
+}
+
+static void
+r1373f4_fini(struct nve0_ramfuc *fuc, u32 ramcfg)
+{
+	struct nve0_ram *ram = container_of(fuc, typeof(*ram), fuc);
+	struct nouveau_bios *bios = nouveau_bios(ram);
+	u8 v0 = (nv_ro08(bios, ramcfg + 0x03) & 0xc0) >> 6;
+	u8 v1 = (nv_ro08(bios, ramcfg + 0x03) & 0x30) >> 4;
+	u32 tmp;
+
+	tmp = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
+	ram_wr32(fuc, 0x1373ec, tmp | (v1 << 16));
+	ram_mask(fuc, 0x1373f0, (~ram->mode & 3), 0x00000000);
+	if (ram->mode == 2) {
+		ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000002);
+		ram_mask(fuc, 0x1373f4, 0x00001100, 0x000000000);
+	} else {
+		ram_mask(fuc, 0x1373f4, 0x00000003, 0x000000001);
+		ram_mask(fuc, 0x1373f4, 0x00010000, 0x000000000);
+	}
+	ram_mask(fuc, 0x10f800, 0x00000030, (v0 ^ v1) << 4);
+}
+
+static int
+nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	const u32 rammap = ram->base.rammap.data;
+	const u32 ramcfg = ram->base.ramcfg.data;
+	const u32 timing = ram->base.timing.data;
+	int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
+	int mv = 1; /*XXX*/
+	u32 mask, data;
+
+	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
+	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+	/* MR1: turn termination on early, for some reason.. */
+	if ((ram->base.mr[1] & 0x03c) != 0x030)
+		ram_mask(fuc, mr[1], 0x03c, ram->base.mr[1] & 0x03c);
+
+	if (vc == 1 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+
+	ram_mask(fuc, 0x10f914, 0x01020000, 0x000c0000);
+	ram_mask(fuc, 0x10f910, 0x01020000, 0x000c0000);
+
+	ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
+	ram_nsec(fuc, 1000);
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_nsec(fuc, 1000);
+
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_wr32(fuc, 0x10f090, 0x00000061);
+	ram_wr32(fuc, 0x10f090, 0xc000007f);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x10f698, 0x00000000);
+	ram_wr32(fuc, 0x10f69c, 0x00000000);
+
+	/*XXX: there does appear to be some kind of condition here, simply
+	 *     modifying these bits in the vbios from the default pl0
+	 *     entries shows no change.  however, the data does appear to
+	 *     be correct and may be required for the transition back
+	 */
+	mask = 0x800f07e0;
+	data = 0x00030000;
+	if (ram_rd32(fuc, 0x10f978) & 0x00800000)
+		data |= 0x00040000;
+
+	if (1) {
+		data |= 0x800807e0;
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
+		case 0xc0: data &= ~0x00000040; break;
+		case 0x80: data &= ~0x00000100; break;
+		case 0x40: data &= ~0x80000000; break;
+		case 0x00: data &= ~0x00000400; break;
+		}
+
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
+		case 0x30: data &= ~0x00000020; break;
+		case 0x20: data &= ~0x00000080; break;
+		case 0x10: data &= ~0x00080000; break;
+		case 0x00: data &= ~0x00000200; break;
+		}
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+		mask |= 0x03000000;
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+		mask |= 0x00002000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+		mask |= 0x00004000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+		mask |= 0x00000003;
+	else {
+		mask |= 0x34000000;
+		if (ram_rd32(fuc, 0x10f978) & 0x00800000)
+			mask |= 0x40000000;
+	}
+	ram_mask(fuc, 0x10f824, mask, data);
+
+	ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
+
+	if (ram->from == 2 && ram->mode != 2) {
+		ram_mask(fuc, 0x10f808, 0x00080000, 0x00000000);
+		ram_mask(fuc, 0x10f200, 0x00008000, 0x00008000);
+		ram_mask(fuc, 0x10f800, 0x00000000, 0x00000004);
+		ram_mask(fuc, 0x10f830, 0x00008000, 0x01040010);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+		r1373f4_init(fuc);
+		ram_mask(fuc, 0x1373f0, 0x00000002, 0x00000001);
+		r1373f4_fini(fuc, ramcfg);
+		ram_mask(fuc, 0x10f830, 0x00c00000, 0x00240001);
+	} else
+	if (ram->from != 2 && ram->mode != 2) {
+		r1373f4_init(fuc);
+		r1373f4_fini(fuc, ramcfg);
+	}
+
+	if (ram_have(fuc, gpioMV)) {
+		u32 temp  = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
+		if (temp != ram_rd32(fuc, gpioMV)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 64000);
+		}
+	}
+
+	if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
+	     (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+		ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
+		ram_nsec(fuc, 20000);
+	}
+
+	if (ram->from != 2 && ram->mode == 2) {
+		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
+		ram_mask(fuc, 0x1373f0, 0x00000000, 0x00000002);
+		ram_mask(fuc, 0x10f830, 0x00800001, 0x00408010);
+		r1373f4_init(fuc);
+		r1373f4_fini(fuc, ramcfg);
+		ram_mask(fuc, 0x10f808, 0x00000000, 0x00080000);
+		ram_mask(fuc, 0x10f200, 0x00808000, 0x00800000);
+	} else
+	if (ram->from == 2 && ram->mode == 2) {
+		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000000);
+		r1373f4_init(fuc);
+		r1373f4_fini(fuc, ramcfg);
+	}
+
+	if (ram->mode != 2) /*XXX*/ {
+		if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+			ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
+	}
+
+	data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
+	ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
+	ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+	ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+
+	data = nv_ro08(bios, ramcfg + 0x04);
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+		ram_wr32(fuc, 0x10f698, 0x01010101 * data);
+		ram_wr32(fuc, 0x10f69c, 0x01010101 * data);
+	}
+
+	if (ram->mode != 2) {
+		u32 temp = ram_rd32(fuc, 0x10f694) & ~0xff00ff00;
+		ram_wr32(fuc, 0x10f694, temp | (0x01000100 * data));
+	}
+
+	if (ram->mode == 2 && (nv_ro08(bios, ramcfg + 0x08) & 0x10))
+		data = 0x00000080;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f60c, 0x00000080, data);
+
+	mask = 0x00070000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+		data |= 0x03000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+		data |= 0x00002000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+		data |= 0x00004000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+		data |= 0x00000003;
+	else
+		data |= 0x74000000;
+	ram_mask(fuc, 0x10f824, mask, data);
+
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x08)
+		data = 0x00000000;
+	else
+		data = 0x00001000;
+	ram_mask(fuc, 0x10f200, 0x00001000, data);
+
+	if (ram_rd32(fuc, 0x10f670) & 0x80000000) {
+		ram_nsec(fuc, 10000);
+		ram_mask(fuc, 0x10f670, 0x80000000, 0x00000000);
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+		data = 0x00100000;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f82c, 0x00100000, data);
+
+	data = 0x00000000;
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x08)
+		data |= 0x00002000;
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x04)
+		data |= 0x00001000;
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x02)
+		data |= 0x00004000;
+	ram_mask(fuc, 0x10f830, 0x00007000, data);
+
+	/* PFB timing */
+	ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
+	ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
+	ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
+	ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
+	ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
+	ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
+	ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
+	ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
+	ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
+	ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
+	ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+
+	data = (nv_ro08(bios, ramcfg + 0x02) & 0x03) << 8;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
+		data |= 0x70000000;
+	ram_mask(fuc, 0x10f604, 0x70000300, data);
+
+	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+		data |= 0x00000100;
+	ram_mask(fuc, 0x10f614, 0x70000000, data);
+
+	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 28;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x02)
+		data |= 0x00000100;
+	ram_mask(fuc, 0x10f610, 0x70000000, data);
+
+	mask = 0x33f00000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+		data |= 0x20200000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+		data |= 0x12800000;
+	/*XXX: see note above about there probably being some condition
+	 *     for the 10f824 stuff that uses ramcfg 3...
+	 */
+	if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
+		if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
+			if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+				mask |= 0x00000020;
+			else
+				data |= 0x00000020;
+			mask |= 0x00000004;
+		}
+	} else {
+		mask |= 0x40000020;
+		data |= 0x00000004;
+	}
+
+	ram_mask(fuc, 0x10f808, mask, data);
+
+	data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
+	ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+
+	data = nv_ro08(bios, ramcfg + 0x02) & 0x03;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x10)
+		data |= 0x00000004;
+	if ((nv_rd32(bios, 0x100770) & 0x00000004) != (data & 0x00000004)) {
+		ram_wr32(fuc, 0x10f750, 0x04000009);
+		ram_wr32(fuc, 0x100710, 0x00000000);
+		ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
+	}
+	ram_mask(fuc, 0x100770, 0x00000007, data);
+
+	data = (nv_ro08(bios, timing + 0x30) & 0x07) << 8;
+	if (nv_ro08(bios, ramcfg + 0x01) & 0x01)
+		data |= 0x80000000;
+	ram_mask(fuc, 0x100778, 0x00000700, data);
+
+	data = nv_ro16(bios, timing + 0x2c);
+	ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
+	ram_mask(fuc, 0x10f24c, 0x7f000000, (data & 0x1fc0) << 18);
+
+	data = nv_ro08(bios, timing + 0x30);
+	ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+
+	data = nv_ro16(bios, timing + 0x31);
+	ram_mask(fuc, 0x10fec4, 0x041e0f07, (data & 0x0800) << 15 |
+					    (data & 0x0780) << 10 |
+					    (data & 0x0078) <<  5 |
+					    (data & 0x0007));
+	ram_mask(fuc, 0x10fec8, 0x00000027, (data & 0x8000) >> 10 |
+					    (data & 0x7000) >> 12);
+
+	ram_wr32(fuc, 0x10f090, 0x4000007e);
+	ram_nsec(fuc, 1000);
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_nsec(fuc, 2000);
+	ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
+
+	if ((nv_ro08(bios, ramcfg + 0x08) & 0x10) && (ram->mode == 2) /*XXX*/) {
+		u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
+		train(fuc, 0xa4010000); /*XXX*/
+		ram_nsec(fuc, 1000);
+		ram_wr32(fuc, 0x10f294, temp);
+	}
+
+	ram_mask(fuc, mr[3], 0xfff, ram->base.mr[3]);
+	ram_wr32(fuc, mr[0], ram->base.mr[0]);
+	ram_mask(fuc, mr[8], 0xfff, ram->base.mr[8]);
+	ram_nsec(fuc, 1000);
+	ram_mask(fuc, mr[1], 0xfff, ram->base.mr[1]);
+	ram_mask(fuc, mr[5], 0xfff, ram->base.mr[5]);
+	ram_mask(fuc, mr[6], 0xfff, ram->base.mr[6]);
+	ram_mask(fuc, mr[7], 0xfff, ram->base.mr[7]);
+
+	if (vc == 0 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	data  = ram_rd32(fuc, 0x10f978);
+	data &= ~0x00046144;
+	data |=  0x0000000b;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+		if (!(nv_ro08(bios, ramcfg + 0x07) & 0x04))
+			data |= 0x0000200c;
+		else
+			data |= 0x00000000;
+	} else {
+		data |= 0x00040044;
+	}
+	ram_wr32(fuc, 0x10f978, data);
+
+	if (ram->mode == 1) {
+		data = ram_rd32(fuc, 0x10f830) | 0x00000001;
+		ram_wr32(fuc, 0x10f830, data);
+	}
+
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08)) {
+		data = 0x88020000;
+		if ( (nv_ro08(bios, ramcfg + 0x07) & 0x04))
+			data |= 0x10000000;
+		if (!(nv_ro08(bios, rammap + 0x08) & 0x10))
+			data |= 0x00080000;
+	} else {
+		data = 0xa40e0000;
+	}
+	train(fuc, data);
+	ram_nsec(fuc, 1000);
+
+	if (ram->mode == 2) { /*XXX*/
+		ram_mask(fuc, 0x10f800, 0x00000004, 0x00000004);
+	}
+
+	/* MR5: (re)enable LP3 if necessary
+	 * XXX: need to find the switch, keeping off for now
+	 */
+	ram_mask(fuc, mr[5], 0x00000004, 0x00000000);
+
+	if (ram->mode != 2) {
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x02) {
+		ram_mask(fuc, 0x10f910, 0x80020000, 0x01000000);
+		ram_mask(fuc, 0x10f914, 0x80020000, 0x01000000);
+	}
+
+	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+	if (nv_ro08(bios, rammap + 0x08) & 0x01)
+		data = 0x00000800;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f200, 0x00000800, data);
+	return 0;
+}
+
+/*******************************************************************************
+ * DDR3
+ ******************************************************************************/
+
+static int
+nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
+	const u32 runk0 = ram->fN1 << 16;
+	const u32 runk1 = ram->fN1;
+	const u32 rammap = ram->base.rammap.data;
+	const u32 ramcfg = ram->base.ramcfg.data;
+	const u32 timing = ram->base.timing.data;
+	int vc = !(nv_ro08(bios, ramcfg + 0x02) & 0x08);
+	int mv = 1; /*XXX*/
+	u32 mask, data;
+
+	ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
+	ram_wr32(fuc, 0x62c000, 0x0f0f0000);
+
+	if (vc == 1 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[1]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
+	if ((nv_ro08(bios, ramcfg + 0x03) & 0xf0))
+		ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
+
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_wr32(fuc, 0x10f210, 0x00000000); /* REFRESH_AUTO = 0 */
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x10f090, 0x00000060);
+	ram_wr32(fuc, 0x10f090, 0xc000007e);
+
+	/*XXX: there does appear to be some kind of condition here, simply
+	 *     modifying these bits in the vbios from the default pl0
+	 *     entries shows no change.  however, the data does appear to
+	 *     be correct and may be required for the transition back
+	 */
+	mask = 0x00010000;
+	data = 0x00010000;
+
+	if (1) {
+		mask |= 0x800807e0;
+		data |= 0x800807e0;
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0xc0) {
+		case 0xc0: data &= ~0x00000040; break;
+		case 0x80: data &= ~0x00000100; break;
+		case 0x40: data &= ~0x80000000; break;
+		case 0x00: data &= ~0x00000400; break;
+		}
+
+		switch (nv_ro08(bios, ramcfg + 0x03) & 0x30) {
+		case 0x30: data &= ~0x00000020; break;
+		case 0x20: data &= ~0x00000080; break;
+		case 0x10: data &= ~0x00080000; break;
+		case 0x00: data &= ~0x00000200; break;
+		}
+	}
+
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x80)
+		mask |= 0x03000000;
+	if (nv_ro08(bios, ramcfg + 0x02) & 0x40)
+		mask |= 0x00002000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x10)
+		mask |= 0x00004000;
+	if (nv_ro08(bios, ramcfg + 0x07) & 0x08)
+		mask |= 0x00000003;
+	else
+		mask |= 0x14000000;
+	ram_mask(fuc, 0x10f824, mask, data);
+
+	ram_mask(fuc, 0x132040, 0x00010000, 0x00000000);
+
+	ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
+	data  = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
+	data |= (nv_ro08(bios, ramcfg + 0x03) & 0x30) << 12;
+	ram_wr32(fuc, 0x1373ec, data);
+	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
+	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
+
+	/* (re)program refpll, if required */
+	if ((ram_rd32(fuc, 0x132024) & 0xffffffff) != rcoef ||
+	    (ram_rd32(fuc, 0x132034) & 0x0000ffff) != runk1) {
+		ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000000);
+		ram_wr32(fuc, 0x137320, 0x00000000);
+		ram_mask(fuc, 0x132030, 0xffff0000, runk0);
+		ram_mask(fuc, 0x132034, 0x0000ffff, runk1);
+		ram_wr32(fuc, 0x132024, rcoef);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00080000);
+		ram_mask(fuc, 0x132020, 0x00000001, 0x00000001);
+		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
+		ram_mask(fuc, 0x132028, 0x00080000, 0x00000000);
+	}
+
+	ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000010);
+	ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000001);
+	ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+
+	if (ram_have(fuc, gpioMV)) {
+		u32 temp  = ram_mask(fuc, gpioMV, 0x3000, fuc->r_funcMV[mv]);
+		if (temp != ram_rd32(fuc, gpioMV)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 64000);
+		}
+	}
+
+	if ( (nv_ro08(bios, ramcfg + 0x02) & 0x40) ||
+	     (nv_ro08(bios, ramcfg + 0x07) & 0x10)) {
+		ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
+		ram_nsec(fuc, 20000);
+	}
+
+	if (ram->mode != 2) /*XXX*/ {
+		if (nv_ro08(bios, ramcfg + 0x07) & 0x40)
+			ram_mask(fuc, 0x10f670, 0x80000000, 0x80000000);
+	}
+
+	data = (nv_ro08(bios, rammap + 0x11) & 0x0c) >> 2;
+	ram_wr32(fuc, 0x10f65c, 0x00000011 * data);
+	ram_wr32(fuc, 0x10f6b8, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+	ram_wr32(fuc, 0x10f6bc, 0x01010101 * nv_ro08(bios, ramcfg + 0x09));
+
+	mask = 0x00010000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x80))
+		data |= 0x03000000;
+	if (!(nv_ro08(bios, ramcfg + 0x02) & 0x40))
+		data |= 0x00002000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x10))
+		data |= 0x00004000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x08))
+		data |= 0x00000003;
+	else
+		data |= 0x14000000;
+	ram_mask(fuc, 0x10f824, mask, data);
+	ram_nsec(fuc, 1000);
+
+	if (nv_ro08(bios, ramcfg + 0x08) & 0x01)
+		data = 0x00100000;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f82c, 0x00100000, data);
+
+	/* PFB timing */
+	ram_mask(fuc, 0x10f248, 0xffffffff, nv_ro32(bios, timing + 0x28));
+	ram_mask(fuc, 0x10f290, 0xffffffff, nv_ro32(bios, timing + 0x00));
+	ram_mask(fuc, 0x10f294, 0xffffffff, nv_ro32(bios, timing + 0x04));
+	ram_mask(fuc, 0x10f298, 0xffffffff, nv_ro32(bios, timing + 0x08));
+	ram_mask(fuc, 0x10f29c, 0xffffffff, nv_ro32(bios, timing + 0x0c));
+	ram_mask(fuc, 0x10f2a0, 0xffffffff, nv_ro32(bios, timing + 0x10));
+	ram_mask(fuc, 0x10f2a4, 0xffffffff, nv_ro32(bios, timing + 0x14));
+	ram_mask(fuc, 0x10f2a8, 0xffffffff, nv_ro32(bios, timing + 0x18));
+	ram_mask(fuc, 0x10f2ac, 0xffffffff, nv_ro32(bios, timing + 0x1c));
+	ram_mask(fuc, 0x10f2cc, 0xffffffff, nv_ro32(bios, timing + 0x20));
+	ram_mask(fuc, 0x10f2e8, 0xffffffff, nv_ro32(bios, timing + 0x24));
+
+	mask = 0x33f00000;
+	data = 0x00000000;
+	if (!(nv_ro08(bios, ramcfg + 0x01) & 0x04))
+		data |= 0x20200000;
+	if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+		data |= 0x12800000;
+	/*XXX: see note above about there probably being some condition
+	 *     for the 10f824 stuff that uses ramcfg 3...
+	 */
+	if ( (nv_ro08(bios, ramcfg + 0x03) & 0xf0)) {
+		if (nv_ro08(bios, rammap + 0x08) & 0x0c) {
+			if (!(nv_ro08(bios, ramcfg + 0x07) & 0x80))
+				mask |= 0x00000020;
+			else
+				data |= 0x00000020;
+			mask |= 0x08000004;
+		}
+		data |= 0x04000000;
+	} else {
+		mask |= 0x44000020;
+		data |= 0x08000004;
+	}
+
+	ram_mask(fuc, 0x10f808, mask, data);
+
+	data = nv_ro08(bios, ramcfg + 0x03) & 0x0f;
+	ram_wr32(fuc, 0x10f870, 0x11111111 * data);
+
+	data = nv_ro16(bios, timing + 0x2c);
+	ram_mask(fuc, 0x10f250, 0x000003f0, (data & 0x003f) <<  4);
+
+	if (((nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6) >
+	    ((nv_ro32(bios, timing + 0x28) & 0x7f000000) >> 24))
+		data = (nv_ro32(bios, timing + 0x2c) & 0x00001fc0) >>  6;
+	else
+		data = (nv_ro32(bios, timing + 0x28) & 0x1f000000) >> 24;
+	ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
+
+	data = nv_ro08(bios, timing + 0x30);
+	ram_mask(fuc, 0x10f224, 0x001f0000, (data & 0xf8) << 13);
+
+	ram_wr32(fuc, 0x10f090, 0x4000007f);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
+	ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
+	ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
+	ram_nsec(fuc, 1000);
+
+	ram_nuke(fuc, mr[0]);
+	ram_mask(fuc, mr[0], 0x100, 0x100);
+	ram_mask(fuc, mr[0], 0x100, 0x000);
+
+	ram_mask(fuc, mr[2], 0xfff, ram->base.mr[2]);
+	ram_wr32(fuc, mr[0], ram->base.mr[0]);
+	ram_nsec(fuc, 1000);
+
+	ram_nuke(fuc, mr[0]);
+	ram_mask(fuc, mr[0], 0x100, 0x100);
+	ram_mask(fuc, mr[0], 0x100, 0x000);
+
+	if (vc == 0 && ram_have(fuc, gpio2E)) {
+		u32 temp  = ram_mask(fuc, gpio2E, 0x3000, fuc->r_func2E[0]);
+		if (temp != ram_rd32(fuc, gpio2E)) {
+			ram_wr32(fuc, gpiotrig, 1);
+			ram_nsec(fuc, 20000);
+		}
+	}
+
+	if (ram->mode != 2) {
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x01000000);
+		ram_mask(fuc, 0x10f830, 0x01000000, 0x00000000);
+	}
+
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x80000000);
+	ram_wr32(fuc, 0x10f318, 0x00000001); /* NOP? */
+	ram_mask(fuc, 0x10f200, 0x80000000, 0x00000000);
+	ram_nsec(fuc, 1000);
+
+	ram_wr32(fuc, 0x62c000, 0x0f0f0f00);
+
+	if (nv_ro08(bios, rammap + 0x08) & 0x01)
+		data = 0x00000800;
+	else
+		data = 0x00000000;
+	ram_mask(fuc, 0x10f200, 0x00000800, data);
+	return 0;
+}
+
+/*******************************************************************************
+ * main hooks
+ ******************************************************************************/
+
+static int
+nve0_ram_calc(struct nouveau_fb *pfb, u32 freq)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	struct bit_entry M;
+	int ret, refclk, strap, i;
+	u32 data;
+	u8  cnt;
+
+	/* lookup memory config data relevant to the target frequency */
+	ram->base.rammap.data = nvbios_rammap_match(bios, freq / 1000,
+						   &ram->base.rammap.version,
+						   &ram->base.rammap.size, &cnt,
+						   &ram->base.ramcfg.size);
+	if (!ram->base.rammap.data || ram->base.rammap.version != 0x11 ||
+	     ram->base.rammap.size < 0x09) {
+		nv_error(pfb, "invalid/missing rammap entry\n");
+		return -EINVAL;
+	}
+
+	/* locate specific data set for the attached memory */
+	if (bit_entry(bios, 'M', &M) || M.version != 2 || M.length < 3) {
+		nv_error(pfb, "invalid/missing memory table\n");
+		return -EINVAL;
+	}
+
+	strap = (nv_rd32(pfb, 0x101000) & 0x0000003c) >> 2;
+	data = nv_ro16(bios, M.offset + 1);
+	if (data)
+		strap = nv_ro08(bios, data + strap);
+
+	if (strap >= cnt) {
+		nv_error(pfb, "invalid ramcfg strap\n");
+		return -EINVAL;
+	}
+
+	ram->base.ramcfg.version = ram->base.rammap.version;
+	ram->base.ramcfg.data = ram->base.rammap.data + ram->base.rammap.size +
+			       (ram->base.ramcfg.size * strap);
+	if (!ram->base.ramcfg.data || ram->base.ramcfg.version != 0x11 ||
+	     ram->base.ramcfg.size < 0x08) {
+		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		return -EINVAL;
+	}
+
+	/* lookup memory timings, if bios says they're present */
+	strap = nv_ro08(bios, ram->base.ramcfg.data + 0x00);
+	if (strap != 0xff) {
+		ram->base.timing.data =
+			nvbios_timing_entry(bios, strap,
+					   &ram->base.timing.version,
+					   &ram->base.timing.size);
+		if (!ram->base.timing.data ||
+		     ram->base.timing.version != 0x20 ||
+		     ram->base.timing.size < 0x33) {
+			nv_error(pfb, "invalid/missing timing entry\n");
+			return -EINVAL;
+		}
+	} else {
+		ram->base.timing.data = 0;
+	}
+
+	ret = ram_init(fuc, pfb);
+	if (ret)
+		return ret;
+
+	ram->mode = (freq > fuc->refpll.vco1.max_freq) ? 2 : 1;
+	ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f;
+
+	/* XXX: this is *not* what nvidia do.  on fermi nvidia generally
+	 * select, based on some unknown condition, one of the two possible
+	 * reference frequencies listed in the vbios table for mempll and
+	 * program refpll to that frequency.
+	 *
+	 * so far, i've seen very weird values being chosen by nvidia on
+	 * kepler boards, no idea how/why they're chosen.
+	 */
+	refclk = freq;
+	if (ram->mode == 2)
+		refclk = fuc->mempll.refclk;
+
+	/* calculate refpll coefficients */
+	ret = nva3_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+			   &ram->fN1, &ram->M1, &ram->P1);
+	fuc->mempll.refclk = ret;
+	if (ret <= 0) {
+		nv_error(pfb, "unable to calc refpll\n");
+		return -EINVAL;
+	}
+
+	/* calculate mempll coefficients, if we're using it */
+	if (ram->mode == 2) {
+		/* post-divider doesn't work... the reg takes the values but
+		 * appears to completely ignore it.  there *is* a bit at
+		 * bit 28 that appears to divide the clock by 2 if set.
+		 */
+		fuc->mempll.min_p = 1;
+		fuc->mempll.max_p = 2;
+
+		ret = nva3_pll_calc(nv_subdev(pfb), &fuc->mempll, freq,
+				   &ram->N2, NULL, &ram->M2, &ram->P2);
+		if (ret <= 0) {
+			nv_error(pfb, "unable to calc mempll\n");
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(fuc->r_mr); i++) {
+		if (ram_have(fuc, mr[i]))
+			ram->base.mr[i] = ram_rd32(fuc, mr[i]);
+	}
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3:
+		ret = nouveau_sddr3_calc(&ram->base);
+		if (ret == 0)
+			ret = nve0_ram_calc_sddr3(pfb, freq);
+		break;
+	case NV_MEM_TYPE_GDDR5:
+		ret = nouveau_gddr5_calc(&ram->base);
+		if (ret == 0)
+			ret = nve0_ram_calc_gddr5(pfb, freq);
+		break;
+	default:
+		ret = -ENOSYS;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+nve0_ram_prog(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", false));
+	return 0;
+}
+
+static void
+nve0_ram_tidy(struct nouveau_fb *pfb)
+{
+	struct nve0_ram *ram = (void *)pfb->ram;
+	struct nve0_ramfuc *fuc = &ram->fuc;
+	ram_exec(fuc, false);
+}
+
+static int
+nve0_ram_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object->parent;
+	struct nve0_ram *ram   = (void *)object;
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	static const u8  train0[] = {
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+	};
+	static const u32 train1[] = {
+		0x00000000, 0xffffffff,
+		0x55555555, 0xaaaaaaaa,
+		0x33333333, 0xcccccccc,
+		0xf0f0f0f0, 0x0f0f0f0f,
+		0x00ff00ff, 0xff00ff00,
+		0x0000ffff, 0xffff0000,
+	};
+	u8  ver, hdr, cnt, len, snr, ssz;
+	u32 data, save;
+	int ret, i;
+
+	ret = nouveau_ram_init(&ram->base);
+	if (ret)
+		return ret;
+
+	/* run a bunch of tables from rammap table.  there's actually
+	 * individual pointers for each rammap entry too, but, nvidia
+	 * seem to just run the last two entries' scripts early on in
+	 * their init, and never again.. we'll just run 'em all once
+	 * for now.
+	 *
+	 * i strongly suspect that each script is for a separate mode
+	 * (likely selected by 0x10f65c's lower bits?), and the
+	 * binary driver skips the one that's already been setup by
+	 * the init tables.
+	 */
+	data = nvbios_rammap_table(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+	if (!data || hdr < 0x15)
+		return -EINVAL;
+
+	cnt  = nv_ro08(bios, data + 0x14); /* guess at count */
+	data = nv_ro32(bios, data + 0x10); /* guess u32... */
+	save = nv_rd32(pfb, 0x10f65c);
+	for (i = 0; i < cnt; i++) {
+		nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+		nvbios_exec(&(struct nvbios_init) {
+				.subdev = nv_subdev(pfb),
+				.bios = bios,
+				.offset = nv_ro32(bios, data), /* guess u32 */
+				.execute = 1,
+			    });
+		data += 4;
+	}
+	nv_wr32(pfb, 0x10f65c, save);
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5:
+		for (i = 0; i < 0x30; i++) {
+			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
+
+			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
+			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
+			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
+		}
+
+		for (i = 0; i < 0x100; i++) {
+			nv_wr32(pfb, 0x10f968, i);
+			nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
+		}
+
+		for (i = 0; i < 0x100; i++) {
+			nv_wr32(pfb, 0x10f96c, i);
+			nv_wr32(pfb, 0x10f900, train1[2 + (i & 1)]);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nve0_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	struct nouveau_gpio *gpio = nouveau_gpio(pfb);
+	struct dcb_gpio_func func;
+	struct nve0_ram *ram;
+	int ret;
+
+	ret = nvc0_ram_create(parent, engine, oclass, &ram);
+	*pobject = nv_object(ram);
+	if (ret)
+		return ret;
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_DDR3:
+	case NV_MEM_TYPE_GDDR5:
+		ram->base.calc = nve0_ram_calc;
+		ram->base.prog = nve0_ram_prog;
+		ram->base.tidy = nve0_ram_tidy;
+		break;
+	default:
+		nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
+		break;
+	}
+
+	// parse bios data for both pll's
+	ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
+	if (ret) {
+		nv_error(pfb, "mclk refpll data not found\n");
+		return ret;
+	}
+
+	ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
+	if (ret) {
+		nv_error(pfb, "mclk pll data not found\n");
+		return ret;
+	}
+
+	ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+	if (ret == 0) {
+		ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
+		ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
+		ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
+	}
+
+	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	if (ret == 0) {
+		ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
+		ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
+		ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12;
+	}
+
+	ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
+
+	ram->fuc.r_0x132020 = ramfuc_reg(0x132020);
+	ram->fuc.r_0x132028 = ramfuc_reg(0x132028);
+	ram->fuc.r_0x132024 = ramfuc_reg(0x132024);
+	ram->fuc.r_0x132030 = ramfuc_reg(0x132030);
+	ram->fuc.r_0x132034 = ramfuc_reg(0x132034);
+	ram->fuc.r_0x132000 = ramfuc_reg(0x132000);
+	ram->fuc.r_0x132004 = ramfuc_reg(0x132004);
+	ram->fuc.r_0x132040 = ramfuc_reg(0x132040);
+
+	ram->fuc.r_0x10f248 = ramfuc_reg(0x10f248);
+	ram->fuc.r_0x10f290 = ramfuc_reg(0x10f290);
+	ram->fuc.r_0x10f294 = ramfuc_reg(0x10f294);
+	ram->fuc.r_0x10f298 = ramfuc_reg(0x10f298);
+	ram->fuc.r_0x10f29c = ramfuc_reg(0x10f29c);
+	ram->fuc.r_0x10f2a0 = ramfuc_reg(0x10f2a0);
+	ram->fuc.r_0x10f2a4 = ramfuc_reg(0x10f2a4);
+	ram->fuc.r_0x10f2a8 = ramfuc_reg(0x10f2a8);
+	ram->fuc.r_0x10f2ac = ramfuc_reg(0x10f2ac);
+	ram->fuc.r_0x10f2cc = ramfuc_reg(0x10f2cc);
+	ram->fuc.r_0x10f2e8 = ramfuc_reg(0x10f2e8);
+	ram->fuc.r_0x10f250 = ramfuc_reg(0x10f250);
+	ram->fuc.r_0x10f24c = ramfuc_reg(0x10f24c);
+	ram->fuc.r_0x10fec4 = ramfuc_reg(0x10fec4);
+	ram->fuc.r_0x10fec8 = ramfuc_reg(0x10fec8);
+	ram->fuc.r_0x10f604 = ramfuc_reg(0x10f604);
+	ram->fuc.r_0x10f614 = ramfuc_reg(0x10f614);
+	ram->fuc.r_0x10f610 = ramfuc_reg(0x10f610);
+	ram->fuc.r_0x100770 = ramfuc_reg(0x100770);
+	ram->fuc.r_0x100778 = ramfuc_reg(0x100778);
+	ram->fuc.r_0x10f224 = ramfuc_reg(0x10f224);
+
+	ram->fuc.r_0x10f870 = ramfuc_reg(0x10f870);
+	ram->fuc.r_0x10f698 = ramfuc_reg(0x10f698);
+	ram->fuc.r_0x10f694 = ramfuc_reg(0x10f694);
+	ram->fuc.r_0x10f6b8 = ramfuc_reg(0x10f6b8);
+	ram->fuc.r_0x10f808 = ramfuc_reg(0x10f808);
+	ram->fuc.r_0x10f670 = ramfuc_reg(0x10f670);
+	ram->fuc.r_0x10f60c = ramfuc_reg(0x10f60c);
+	ram->fuc.r_0x10f830 = ramfuc_reg(0x10f830);
+	ram->fuc.r_0x1373ec = ramfuc_reg(0x1373ec);
+	ram->fuc.r_0x10f800 = ramfuc_reg(0x10f800);
+	ram->fuc.r_0x10f82c = ramfuc_reg(0x10f82c);
+
+	ram->fuc.r_0x10f978 = ramfuc_reg(0x10f978);
+	ram->fuc.r_0x10f910 = ramfuc_reg(0x10f910);
+	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
+
+	switch (ram->base.type) {
+	case NV_MEM_TYPE_GDDR5:
+		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
+		ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
+		ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
+		ram->fuc.r_mr[3] = ramfuc_reg(0x10f338);
+		ram->fuc.r_mr[4] = ramfuc_reg(0x10f33c);
+		ram->fuc.r_mr[5] = ramfuc_reg(0x10f340);
+		ram->fuc.r_mr[6] = ramfuc_reg(0x10f344);
+		ram->fuc.r_mr[7] = ramfuc_reg(0x10f348);
+		ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
+		ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
+		break;
+	case NV_MEM_TYPE_DDR3:
+		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
+		ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
+		break;
+	default:
+		break;
+	}
+
+	ram->fuc.r_0x62c000 = ramfuc_reg(0x62c000);
+	ram->fuc.r_0x10f200 = ramfuc_reg(0x10f200);
+	ram->fuc.r_0x10f210 = ramfuc_reg(0x10f210);
+	ram->fuc.r_0x10f310 = ramfuc_reg(0x10f310);
+	ram->fuc.r_0x10f314 = ramfuc_reg(0x10f314);
+	ram->fuc.r_0x10f318 = ramfuc_reg(0x10f318);
+	ram->fuc.r_0x10f090 = ramfuc_reg(0x10f090);
+	ram->fuc.r_0x10f69c = ramfuc_reg(0x10f69c);
+	ram->fuc.r_0x10f824 = ramfuc_reg(0x10f824);
+	ram->fuc.r_0x1373f0 = ramfuc_reg(0x1373f0);
+	ram->fuc.r_0x1373f4 = ramfuc_reg(0x1373f4);
+	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
+	ram->fuc.r_0x10f65c = ramfuc_reg(0x10f65c);
+	ram->fuc.r_0x10f6bc = ramfuc_reg(0x10f6bc);
+	ram->fuc.r_0x100710 = ramfuc_reg(0x100710);
+	ram->fuc.r_0x10f750 = ramfuc_reg(0x10f750);
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_ram_oclass = {
+	.handle = 0,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_ram_ctor,
+		.dtor = _nouveau_ram_dtor,
+		.init = nve0_ram_init,
+		.fini = _nouveau_ram_fini,
+	}
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
new file mode 100644
index 0000000..571077e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramseq.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_FBRAM_SEQ_H__
+#define __NVKM_FBRAM_SEQ_H__
+
+#include <subdev/bus.h>
+#include <subdev/bus/hwsq.h>
+
+#define ram_init(s,p)       hwsq_init(&(s)->base, (p))
+#define ram_exec(s,e)       hwsq_exec(&(s)->base, (e))
+#define ram_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define ram_rd32(s,r)       hwsq_rd32(&(s)->base, &(s)->r_##r)
+#define ram_wr32(s,r,d)     hwsq_wr32(&(s)->base, &(s)->r_##r, (d))
+#define ram_nuke(s,r)       hwsq_nuke(&(s)->base, &(s)->r_##r)
+#define ram_mask(s,r,m,d)   hwsq_mask(&(s)->base, &(s)->r_##r, (m), (d))
+#define ram_setf(s,f,d)     hwsq_setf(&(s)->base, (f), (d))
+#define ram_wait(s,f,d)     hwsq_wait(&(s)->base, (f), (d))
+#define ram_nsec(s,n)       hwsq_nsec(&(s)->base, (n))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
new file mode 100644
index 0000000..ebd4cd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/bios.h>
+#include "priv.h"
+
+struct ramxlat {
+	int id;
+	u8 enc;
+};
+
+static inline int
+ramxlat(const struct ramxlat *xlat, int id)
+{
+	while (xlat->id >= 0) {
+		if (xlat->id == id)
+			return xlat->enc;
+		xlat++;
+	}
+	return -EINVAL;
+}
+
+static const struct ramxlat
+ramddr3_cl[] = {
+	{ 5, 2 }, { 6, 4 }, { 7, 6 }, { 8, 8 }, { 9, 10 }, { 10, 12 },
+	{ 11, 14 },
+	/* the below are mentioned in some, but not all, ddr3 docs */
+	{ 12, 1 }, { 13, 3 }, { 14, 5 },
+	{ -1 }
+};
+
+static const struct ramxlat
+ramddr3_wr[] = {
+	{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
+	/* the below are mentioned in some, but not all, ddr3 docs */
+	{ 14, 7 }, { 16, 0 },
+	{ -1 }
+};
+
+static const struct ramxlat
+ramddr3_cwl[] = {
+	{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
+	/* the below are mentioned in some, but not all, ddr3 docs */
+	{ 9, 4 },
+	{ -1 }
+};
+
+int
+nouveau_sddr3_calc(struct nouveau_ram *ram)
+{
+	struct nouveau_bios *bios = nouveau_bios(ram);
+	int WL, CL, WR;
+
+	switch (!!ram->timing.data * ram->timing.version) {
+	case 0x20:
+		WL = (nv_ro16(bios, ram->timing.data + 0x04) & 0x0f80) >> 7;
+		CL =  nv_ro08(bios, ram->timing.data + 0x04) & 0x1f;
+		WR =  nv_ro08(bios, ram->timing.data + 0x0a) & 0x7f;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	WL = ramxlat(ramddr3_cwl, WL);
+	CL = ramxlat(ramddr3_cl, CL);
+	WR = ramxlat(ramddr3_wr, WR);
+	if (WL < 0 || CL < 0 || WR < 0)
+		return -EINVAL;
+
+	ram->mr[0] &= ~0xe74;
+	ram->mr[0] |= (WR & 0x07) << 9;
+	ram->mr[0] |= (CL & 0x0e) << 3;
+	ram->mr[0] |= (CL & 0x01) << 2;
+
+	ram->mr[2] &= ~0x038;
+	ram->mr[2] |= (WL & 0x07) << 3;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index d422acc..f572c28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -67,7 +67,7 @@
 		}
 	}
 
-	return -EINVAL;
+	return -ENOENT;
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2895c19..041fd5e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -195,7 +195,7 @@
 
 static int
 nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
-		     struct i2c_board_info *info,
+		     struct nouveau_i2c_board_info *info,
 		     bool (*match)(struct nouveau_i2c_port *,
 				   struct i2c_board_info *))
 {
@@ -208,12 +208,29 @@
 	}
 
 	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
-	for (i = 0; info[i].addr; i++) {
-		if (nv_probe_i2c(port, info[i].addr) &&
-		    (!match || match(port, &info[i]))) {
-			nv_info(i2c, "detected %s: %s\n", what, info[i].type);
+	for (i = 0; info[i].dev.addr; i++) {
+		u8 orig_udelay = 0;
+
+		if ((port->adapter.algo == &i2c_bit_algo) &&
+		    (info[i].udelay != 0)) {
+			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
+			nv_debug(i2c, "using custom udelay %d instead of %d\n",
+			         info[i].udelay, algo->udelay);
+			orig_udelay = algo->udelay;
+			algo->udelay = info[i].udelay;
+		}
+
+		if (nv_probe_i2c(port, info[i].dev.addr) &&
+		    (!match || match(port, &info[i].dev))) {
+			nv_info(i2c, "detected %s: %s\n", what,
+				info[i].dev.type);
 			return i;
 		}
+
+		if (orig_udelay) {
+			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
+			algo->udelay = orig_udelay;
+		}
 	}
 
 	nv_debug(i2c, "no devices found.\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index e290cfa..b4b9943 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -25,38 +25,48 @@
 #include <subdev/mc.h>
 #include <core/option.h>
 
+static inline u32
+nouveau_mc_intr_mask(struct nouveau_mc *pmc)
+{
+	u32 intr = nv_rd32(pmc, 0x000100);
+	if (intr == 0xffffffff) /* likely fallen off the bus */
+		intr = 0x00000000;
+	return intr;
+}
+
 static irqreturn_t
 nouveau_mc_intr(int irq, void *arg)
 {
 	struct nouveau_mc *pmc = arg;
-	const struct nouveau_mc_intr *map = pmc->intr_map;
-	struct nouveau_device *device = nv_device(pmc);
+	const struct nouveau_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
+	const struct nouveau_mc_intr *map = oclass->intr;
 	struct nouveau_subdev *unit;
-	u32 stat, intr;
+	u32 intr;
 
-	intr = stat = nv_rd32(pmc, 0x000100);
-	if (intr == 0xffffffff)
-		return IRQ_NONE;
-	while (stat && map->stat) {
-		if (stat & map->stat) {
-			unit = nouveau_subdev(pmc, map->unit);
-			if (unit && unit->intr)
-				unit->intr(unit);
-			intr &= ~map->stat;
-		}
-		map++;
-	}
-
+	nv_wr32(pmc, 0x000140, 0x00000000);
+	nv_rd32(pmc, 0x000140);
+	intr = nouveau_mc_intr_mask(pmc);
 	if (pmc->use_msi)
-		nv_wr08(pmc->base.base.parent, 0x00088068, 0xff);
+		oclass->msi_rearm(pmc);
 
 	if (intr) {
-		nv_error(pmc, "unknown intr 0x%08x\n", stat);
+		u32 stat = intr = nouveau_mc_intr_mask(pmc);
+		while (map->stat) {
+			if (intr & map->stat) {
+				unit = nouveau_subdev(pmc, map->unit);
+				if (unit && unit->intr)
+					unit->intr(unit);
+				stat &= ~map->stat;
+			}
+			map++;
+		}
+
+		if (stat)
+			nv_error(pmc, "unknown intr 0x%08x\n", stat);
 	}
 
-	if (stat == IRQ_HANDLED)
-		pm_runtime_mark_last_busy(&device->pdev->dev);
-	return stat ? IRQ_HANDLED : IRQ_NONE;
+	nv_wr32(pmc, 0x000140, 0x00000001);
+	return intr ? IRQ_HANDLED : IRQ_NONE;
 }
 
 int
@@ -91,37 +101,42 @@
 
 int
 nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
-		   struct nouveau_oclass *oclass,
-		   const struct nouveau_mc_intr *intr_map,
-		   int length, void **pobject)
+		   struct nouveau_oclass *bclass, int length, void **pobject)
 {
+	const struct nouveau_mc_oclass *oclass = (void *)bclass;
 	struct nouveau_device *device = nv_device(parent);
 	struct nouveau_mc *pmc;
 	int ret;
 
-	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
+	ret = nouveau_subdev_create_(parent, engine, bclass, 0, "PMC",
 				     "master", length, pobject);
 	pmc = *pobject;
 	if (ret)
 		return ret;
 
-	pmc->intr_map = intr_map;
-
 	switch (device->pdev->device & 0x0ff0) {
-	case 0x00f0: /* BR02? */
-	case 0x02e0: /* BR02? */
-		pmc->use_msi = false;
+	case 0x00f0:
+	case 0x02e0:
+		/* BR02? NFI how these would be handled yet exactly */
 		break;
 	default:
-		pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
-		if (pmc->use_msi) {
-			pmc->use_msi = pci_enable_msi(device->pdev) == 0;
-			if (pmc->use_msi) {
-				nv_info(pmc, "MSI interrupts enabled\n");
-				nv_wr08(device, 0x00088068, 0xff);
-			}
+		switch (device->chipset) {
+		case 0xaa: break; /* reported broken, nv also disable it */
+		default:
+			pmc->use_msi = true;
+			break;
 		}
-		break;
+	}
+
+	pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", pmc->use_msi);
+	if (pmc->use_msi && oclass->msi_rearm) {
+		pmc->use_msi = pci_enable_msi(device->pdev) == 0;
+		if (pmc->use_msi) {
+			nv_info(pmc, "MSI interrupts enabled\n");
+			oclass->msi_rearm(pmc);
+		}
+	} else {
+		pmc->use_msi = false;
 	}
 
 	ret = request_irq(device->pdev->irq, nouveau_mc_intr,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 64aa4ed..2d787e4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -22,17 +22,14 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
-
-struct nv04_mc_priv {
-	struct nouveau_mc base;
-};
+#include "nv04.h"
 
 const struct nouveau_mc_intr
 nv04_mc_intr[] = {
 	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
 	{ 0x00000100, NVDEV_ENGINE_FIFO },
 	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00010000, NVDEV_ENGINE_DISP },
 	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
@@ -42,22 +39,6 @@
 	{}
 };
 
-static int
-nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv04_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 int
 nv04_mc_init(struct nouveau_object *object)
 {
@@ -69,13 +50,30 @@
 	return nouveau_mc_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv04_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x04),
-	.ofuncs = &(struct nouveau_ofuncs) {
+int
+nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv04_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass *
+nv04_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x04),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
 		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv04_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv04_mc_intr,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
new file mode 100644
index 0000000..b0d5c31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_MC_NV04_H__
+#define __NVKM_MC_NV04_H__
+
+#include <subdev/mc.h>
+
+struct nv04_mc_priv {
+	struct nouveau_mc base;
+};
+
+int  nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
+		  struct nouveau_oclass *, void *, u32,
+		  struct nouveau_object **);
+
+extern const struct nouveau_mc_intr nv04_mc_intr[];
+int  nv04_mc_init(struct nouveau_object *);
+void nv40_mc_msi_rearm(struct nouveau_mc *);
+int  nv50_mc_init(struct nouveau_object *);
+extern const struct nouveau_mc_intr nv50_mc_intr[];
+extern const struct nouveau_mc_intr nvc0_mc_intr[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
new file mode 100644
index 0000000..5b1faec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv40.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+void
+nv40_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+	struct nv04_mc_priv *priv = (void *)pmc;
+	nv_wr08(priv, 0x088068, 0xff);
+}
+
+struct nouveau_oclass *
+nv40_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x40),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv04_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nv04_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index d989178..3bfee5c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -22,32 +22,12 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
-
-struct nv44_mc_priv {
-	struct nouveau_mc base;
-};
-
-static int
-nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv44_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
+#include "nv04.h"
 
 static int
 nv44_mc_init(struct nouveau_object *object)
 {
-	struct nv44_mc_priv *priv = (void *)object;
+	struct nv04_mc_priv *priv = (void *)object;
 	u32 tmp = nv_rd32(priv, 0x10020c);
 
 	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
@@ -60,13 +40,15 @@
 	return nouveau_mc_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv44_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x44),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv44_mc_ctor,
+struct nouveau_oclass *
+nv44_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x44),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv44_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv04_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 2b1afe2..e8822a93 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -22,13 +22,9 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
+#include "nv04.h"
 
-struct nv50_mc_priv {
-	struct nouveau_mc base;
-};
-
-static const struct nouveau_mc_intr
+const struct nouveau_mc_intr
 nv50_mc_intr[] = {
 	{ 0x00000001, NVDEV_ENGINE_MPEG },
 	{ 0x00000100, NVDEV_ENGINE_FIFO },
@@ -45,37 +41,30 @@
 	{},
 };
 
-static int
-nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
+static void
+nv50_mc_msi_rearm(struct nouveau_mc *pmc)
 {
-	struct nv50_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nouveau_device *device = nv_device(pmc);
+	pci_write_config_byte(device->pdev, 0x68, 0xff);
 }
 
 int
 nv50_mc_init(struct nouveau_object *object)
 {
-	struct nv50_mc_priv *priv = (void *)object;
+	struct nv04_mc_priv *priv = (void *)object;
 	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
 	return nouveau_mc_init(&priv->base);
 }
 
-struct nouveau_oclass
-nv50_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x50),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv50_mc_ctor,
+struct nouveau_oclass *
+nv50_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x50),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv50_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv50_mc_intr,
+	.msi_rearm = nv50_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
new file mode 100644
index 0000000..5f45411
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv94.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+nv94_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x94),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nv50_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index 0671041..f8a6f18 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -22,11 +22,7 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
-
-struct nv98_mc_priv {
-	struct nouveau_mc base;
-};
+#include "nv04.h"
 
 static const struct nouveau_mc_intr
 nv98_mc_intr[] = {
@@ -36,6 +32,7 @@
 	{ 0x00004000, NVDEV_ENGINE_CRYPT },	/* NV84:NVA3 */
 	{ 0x00008000, NVDEV_ENGINE_BSP },
 	{ 0x00020000, NVDEV_ENGINE_VP },
+	{ 0x00040000, NVDEV_SUBDEV_PWR },	/* NVA3:NVC0 */
 	{ 0x00080000, NVDEV_SUBDEV_THERM },	/* NVA3:NVC0 */
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -47,29 +44,15 @@
 	{},
 };
 
-static int
-nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
-{
-	struct nv98_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nouveau_oclass
-nv98_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0x98),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nv98_mc_ctor,
+struct nouveau_oclass *
+nv98_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0x98),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv50_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nv98_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 104175c..c02b476 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -22,13 +22,9 @@
  * Authors: Ben Skeggs
  */
 
-#include <subdev/mc.h>
+#include "nv04.h"
 
-struct nvc0_mc_priv {
-	struct nouveau_mc base;
-};
-
-static const struct nouveau_mc_intr
+const struct nouveau_mc_intr
 nvc0_mc_intr[] = {
 	{ 0x00000001, NVDEV_ENGINE_PPP },
 	{ 0x00000020, NVDEV_ENGINE_COPY0 },
@@ -41,6 +37,7 @@
 	{ 0x00020000, NVDEV_ENGINE_VP },
 	{ 0x00100000, NVDEV_SUBDEV_TIMER },
 	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x01000000, NVDEV_SUBDEV_PWR },
 	{ 0x02000000, NVDEV_SUBDEV_LTCG },
 	{ 0x04000000, NVDEV_ENGINE_DISP },
 	{ 0x10000000, NVDEV_SUBDEV_BUS },
@@ -49,29 +46,22 @@
 	{},
 };
 
-static int
-nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-	     struct nouveau_oclass *oclass, void *data, u32 size,
-	     struct nouveau_object **pobject)
+static void
+nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
 {
-	struct nvc0_mc_priv *priv;
-	int ret;
-
-	ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nv04_mc_priv *priv = (void *)pmc;
+	nv_wr32(priv, 0x088704, 0x00000000);
 }
 
-struct nouveau_oclass
-nvc0_mc_oclass = {
-	.handle = NV_SUBDEV(MC, 0xc0),
-	.ofuncs = &(struct nouveau_ofuncs) {
-		.ctor = nvc0_mc_ctor,
+struct nouveau_oclass *
+nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0xc0),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
 		.dtor = _nouveau_mc_dtor,
 		.init = nv50_mc_init,
 		.fini = _nouveau_mc_fini,
 	},
-};
+	.intr = nvc0_mc_intr,
+	.msi_rearm = nvc0_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
new file mode 100644
index 0000000..837e545
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
+	.base.handle = NV_SUBDEV(MC, 0xc3),
+	.base.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+	.intr = nvc0_mc_intr,
+	.msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
new file mode 100644
index 0000000..9908f1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+#include <subdev/timer.h>
+
+static int
+nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
+		 u32 process, u32 message, u32 data0, u32 data1)
+{
+	struct nouveau_subdev *subdev = nv_subdev(ppwr);
+	u32 addr;
+
+	/* we currently only support a single process at a time waiting
+	 * on a synchronous reply, take the PPWR mutex and tell the
+	 * receive handler what we're waiting for
+	 */
+	if (reply) {
+		mutex_lock(&subdev->mutex);
+		ppwr->recv.message = message;
+		ppwr->recv.process = process;
+	}
+
+	/* wait for a free slot in the fifo */
+	addr  = nv_rd32(ppwr, 0x10a4a0);
+	if (!nv_wait_ne(ppwr, 0x10a4b0, 0xffffffff, addr ^ 8))
+		return -EBUSY;
+
+	/* acquire data segment access */
+	do {
+		nv_wr32(ppwr, 0x10a580, 0x00000001);
+	} while (nv_rd32(ppwr, 0x10a580) != 0x00000001);
+
+	/* write the packet */
+	nv_wr32(ppwr, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+				ppwr->send.base));
+	nv_wr32(ppwr, 0x10a1c4, process);
+	nv_wr32(ppwr, 0x10a1c4, message);
+	nv_wr32(ppwr, 0x10a1c4, data0);
+	nv_wr32(ppwr, 0x10a1c4, data1);
+	nv_wr32(ppwr, 0x10a4a0, (addr + 1) & 0x0f);
+
+	/* release data segment access */
+	nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+	/* wait for reply, if requested */
+	if (reply) {
+		wait_event(ppwr->recv.wait, (ppwr->recv.process == 0));
+		reply[0] = ppwr->recv.data[0];
+		reply[1] = ppwr->recv.data[1];
+		mutex_unlock(&subdev->mutex);
+	}
+
+	return 0;
+}
+
+static void
+nouveau_pwr_recv(struct work_struct *work)
+{
+	struct nouveau_pwr *ppwr =
+		container_of(work, struct nouveau_pwr, recv.work);
+	u32 process, message, data0, data1;
+
+	/* nothing to do if GET == PUT */
+	u32 addr =  nv_rd32(ppwr, 0x10a4cc);
+	if (addr == nv_rd32(ppwr, 0x10a4c8))
+		return;
+
+	/* acquire data segment access */
+	do {
+		nv_wr32(ppwr, 0x10a580, 0x00000002);
+	} while (nv_rd32(ppwr, 0x10a580) != 0x00000002);
+
+	/* read the packet */
+	nv_wr32(ppwr, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+				ppwr->recv.base));
+	process = nv_rd32(ppwr, 0x10a1c4);
+	message = nv_rd32(ppwr, 0x10a1c4);
+	data0   = nv_rd32(ppwr, 0x10a1c4);
+	data1   = nv_rd32(ppwr, 0x10a1c4);
+	nv_wr32(ppwr, 0x10a4cc, (addr + 1) & 0x0f);
+
+	/* release data segment access */
+	nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+	/* wake process if it's waiting on a synchronous reply */
+	if (ppwr->recv.process) {
+		if (process == ppwr->recv.process &&
+		    message == ppwr->recv.message) {
+			ppwr->recv.data[0] = data0;
+			ppwr->recv.data[1] = data1;
+			ppwr->recv.process = 0;
+			wake_up(&ppwr->recv.wait);
+			return;
+		}
+	}
+
+	/* right now there's no other expected responses from the engine,
+	 * so assume that any unexpected message is an error.
+	 */
+	nv_warn(ppwr, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
+		(char)((process & 0x000000ff) >>  0),
+		(char)((process & 0x0000ff00) >>  8),
+		(char)((process & 0x00ff0000) >> 16),
+		(char)((process & 0xff000000) >> 24),
+		process, message, data0, data1);
+}
+
+static void
+nouveau_pwr_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_pwr *ppwr = (void *)subdev;
+	u32 disp = nv_rd32(ppwr, 0x10a01c);
+	u32 intr = nv_rd32(ppwr, 0x10a008) & disp & ~(disp >> 16);
+
+	if (intr & 0x00000020) {
+		u32 stat = nv_rd32(ppwr, 0x10a16c);
+		if (stat & 0x80000000) {
+			nv_error(ppwr, "UAS fault at 0x%06x addr 0x%08x\n",
+				 stat & 0x00ffffff, nv_rd32(ppwr, 0x10a168));
+			nv_wr32(ppwr, 0x10a16c, 0x00000000);
+			intr &= ~0x00000020;
+		}
+	}
+
+	if (intr & 0x00000040) {
+		schedule_work(&ppwr->recv.work);
+		nv_wr32(ppwr, 0x10a004, 0x00000040);
+		intr &= ~0x00000040;
+	}
+
+	if (intr & 0x00000080) {
+		nv_info(ppwr, "wr32 0x%06x 0x%08x\n", nv_rd32(ppwr, 0x10a7a0),
+						      nv_rd32(ppwr, 0x10a7a4));
+		nv_wr32(ppwr, 0x10a004, 0x00000080);
+		intr &= ~0x00000080;
+	}
+
+	if (intr) {
+		nv_error(ppwr, "intr 0x%08x\n", intr);
+		nv_wr32(ppwr, 0x10a004, intr);
+	}
+}
+
+int
+_nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_pwr *ppwr = (void *)object;
+
+	nv_wr32(ppwr, 0x10a014, 0x00000060);
+	flush_work(&ppwr->recv.work);
+
+	return nouveau_subdev_fini(&ppwr->base, suspend);
+}
+
+int
+_nouveau_pwr_init(struct nouveau_object *object)
+{
+	struct nouveau_pwr *ppwr = (void *)object;
+	int ret, i;
+
+	ret = nouveau_subdev_init(&ppwr->base);
+	if (ret)
+		return ret;
+
+	nv_subdev(ppwr)->intr = nouveau_pwr_intr;
+	ppwr->message = nouveau_pwr_send;
+
+	/* prevent previous ucode from running, wait for idle, reset */
+	nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+	nv_wait(ppwr, 0x10a04c, 0xffffffff, 0x00000000);
+	nv_mask(ppwr, 0x000200, 0x00002000, 0x00000000);
+	nv_mask(ppwr, 0x000200, 0x00002000, 0x00002000);
+
+	/* upload data segment */
+	nv_wr32(ppwr, 0x10a1c0, 0x01000000);
+	for (i = 0; i < ppwr->data.size / 4; i++)
+		nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
+
+	/* upload code segment */
+	nv_wr32(ppwr, 0x10a180, 0x01000000);
+	for (i = 0; i < ppwr->code.size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(ppwr, 0x10a188, i >> 6);
+		nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
+	}
+
+	/* start it running */
+	nv_wr32(ppwr, 0x10a10c, 0x00000000);
+	nv_wr32(ppwr, 0x10a104, 0x00000000);
+	nv_wr32(ppwr, 0x10a100, 0x00000002);
+
+	/* wait for valid host->pwr ring configuration */
+	if (!nv_wait_ne(ppwr, 0x10a4d0, 0xffffffff, 0x00000000))
+		return -EBUSY;
+	ppwr->send.base = nv_rd32(ppwr, 0x10a4d0) & 0x0000ffff;
+	ppwr->send.size = nv_rd32(ppwr, 0x10a4d0) >> 16;
+
+	/* wait for valid pwr->host ring configuration */
+	if (!nv_wait_ne(ppwr, 0x10a4dc, 0xffffffff, 0x00000000))
+		return -EBUSY;
+	ppwr->recv.base = nv_rd32(ppwr, 0x10a4dc) & 0x0000ffff;
+	ppwr->recv.size = nv_rd32(ppwr, 0x10a4dc) >> 16;
+
+	nv_wr32(ppwr, 0x10a010, 0x000000e0);
+	return 0;
+}
+
+int
+nouveau_pwr_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_pwr *ppwr;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PPWR",
+				     "pwr", length, pobject);
+	ppwr = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_WORK(&ppwr->recv.work, nouveau_pwr_recv);
+	init_waitqueue_head(&ppwr->recv.wait);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
new file mode 100644
index 0000000..2284ecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_HOST, #host_init, #host_recv)
+#endif
+
+/******************************************************************************
+ * HOST data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+// HOST (R)FIFO packet format
+.equ #fifo_process 0x00
+.equ #fifo_message 0x04
+.equ #fifo_data0   0x08
+.equ #fifo_data1   0x0c
+
+// HOST HOST->PWR queue description
+.equ #fifo_qlen 4 // log2(size of queue entry in bytes)
+.equ #fifo_qnum 3 // log2(max number of entries in queue)
+.equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue
+.equ #fifo_qmaskp (#fifo_qmaskb - 1)
+.equ #fifo_qmaskf ((#fifo_qmaskb << 1) - 1)
+.equ #fifo_qsize  (1 << (#fifo_qlen + #fifo_qnum))
+fifo_queue: .skip 128 // #fifo_qsize
+
+// HOST PWR->HOST queue description
+.equ #rfifo_qlen 4 // log2(size of queue entry in bytes)
+.equ #rfifo_qnum 3 // log2(max number of entries in queue)
+.equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
+.equ #rfifo_qmaskp (#rfifo_qmaskb - 1)
+.equ #rfifo_qmaskf ((#rfifo_qmaskb << 1) - 1)
+.equ #rfifo_qsize  (1 << (#rfifo_qlen + #rfifo_qnum))
+rfifo_queue: .skip 128 // #rfifo_qsize
+#endif
+
+/******************************************************************************
+ * HOST code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// HOST->PWR comms - dequeue message(s) for process(es) from FIFO
+//
+// $r15 - current (host)
+// $r0  - zero
+host_send:
+	nv_iord($r1, NV_PPWR_FIFO_GET(0))
+	nv_iord($r2, NV_PPWR_FIFO_PUT(0))
+	cmp b32 $r1 $r2
+	bra e #host_send_done
+		// calculate address of message
+		and $r14 $r1 #fifo_qmaskp
+		shl b32 $r14 $r14 #fifo_qlen
+		add b32 $r14 #fifo_queue
+
+		// read message data, and pass to appropriate process
+		ld b32 $r11 D[$r14 + #fifo_data1]
+		ld b32 $r12 D[$r14 + #fifo_data0]
+		ld b32 $r13 D[$r14 + #fifo_message]
+		ld b32 $r14 D[$r14 + #fifo_process]
+		call(send)
+
+		// increment GET
+		add b32 $r1 0x1
+		and $r14 $r1 #fifo_qmaskf
+		nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+		bra #host_send
+	host_send_done:
+	ret
+
+// PWR->HOST comms - enqueue message for HOST to RFIFO
+//
+// $r15 - current (host)
+// $r14 - process
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0  - zero
+host_recv:
+	// message from intr handler == HOST->PWR comms pending
+	mov $r1 (PROC_KERN & 0x0000ffff)
+	sethi $r1 (PROC_KERN & 0xffff0000)
+	cmp b32 $r14 $r1
+	bra e #host_send
+
+	// wait for space in RFIFO
+	host_recv_wait:
+	nv_iord($r1, NV_PPWR_RFIFO_GET)
+	nv_iord($r2, NV_PPWR_RFIFO_PUT)
+	xor $r1 #rfifo_qmaskb
+	cmp b32 $r1 $r2
+	bra e #host_recv_wait
+
+	and $r3 $r2 #rfifo_qmaskp
+	shl b32 $r3 #rfifo_qlen
+	add b32 $r3 #rfifo_queue
+
+	// enqueue message
+	st b32 D[$r3 + #fifo_data1] $r11
+	st b32 D[$r3 + #fifo_data0] $r12
+	st b32 D[$r3 + #fifo_message] $r13
+	st b32 D[$r3 + #fifo_process] $r14
+
+	add b32 $r2 0x1
+	and $r2 #rfifo_qmaskf
+	nv_iowr(NV_PPWR_RFIFO_PUT, $r2)
+
+	// notify host of pending message
+	mov $r2 NV_PPWR_INTR_TRIGGER_USER0
+	nv_iowr(NV_PPWR_INTR_TRIGGER, $r2)
+	ret
+
+// $r15 - current (host)
+// $r0  - zero
+host_init:
+	// store each fifo's base/size in H2D/D2H scratch regs
+	mov $r1 #fifo_qsize
+	shl b32 $r1 16
+	or $r1 #fifo_queue
+	nv_iowr(NV_PPWR_H2D, $r1);
+
+	mov $r1 #rfifo_qsize
+	shl b32 $r1 16
+	or $r1 #rfifo_queue
+	nv_iowr(NV_PPWR_D2H, $r1);
+
+	// enable fifo subintr for first fifo
+	mov $r1 1
+	nv_iowr(NV_PPWR_FIFO_INTR_EN, $r1)
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
new file mode 100644
index 0000000..98f1c37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/idle.fuc
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_IDLE, #idle, #idle_recv)
+#endif
+
+/******************************************************************************
+ * IDLE data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * IDLE code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (idle)
+// $r14 - message
+// $r0  - zero
+idle_recv:
+	ret
+
+// description
+//
+// $r15 - current (idle)
+// $r0  - zero
+idle:
+	// set our "no interrupt has occurred during our execution" flag
+	bset $flags $p0
+
+	// count IDLE invocations for debugging purposes
+	nv_iord($r1, NV_PPWR_DSCRATCH(1))
+	add b32 $r1 1
+	nv_iowr(NV_PPWR_DSCRATCH(1), $r1)
+
+	// keep looping while there's pending messages for any process
+	idle_loop:
+	mov $r1 #proc_list_head
+	bclr $flags $p2
+	idle_proc:
+		// process the process' messages until there's none left
+		idle_proc_exec:
+			push $r1
+			mov b32 $r14 $r1
+			call(recv)
+			pop $r1
+			bra not $p1 #idle_proc_next
+			bset $flags $p2
+			bra #idle_proc_exec
+		// next process!
+		idle_proc_next:
+		add b32 $r1 #proc_size
+		cmp b32 $r1 $r15
+		bra ne #idle_proc
+	bra $p2 #idle_loop
+
+	// sleep if no interrupts have occurred
+	sleep $p0
+	bra #idle
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
new file mode 100644
index 0000000..0a7b05f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/kernel.fuc
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/******************************************************************************
+ * kernel data segment
+ *****************************************************************************/
+#ifdef INCLUDE_PROC
+proc_kern:
+process(PROC_KERN, 0, 0)
+proc_list_head:
+#endif
+
+#ifdef INCLUDE_DATA
+proc_list_tail:
+time_prev: .b32 0
+time_next: .b32 0
+#endif
+
+/******************************************************************************
+ * kernel code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+	bra #init
+
+// read nv register
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - data (return)
+// $r0  - zero
+rd32:
+	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
+	mov $r14 NV_PPWR_MMIO_CTRL_OP_RD
+	sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+	nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+	rd32_wait:
+		nv_iord($r14, NV_PPWR_MMIO_CTRL)
+		and $r14 NV_PPWR_MMIO_CTRL_STATUS
+		bra nz #rd32_wait
+	nv_iord($r13, NV_PPWR_MMIO_DATA)
+	ret
+
+// write nv register
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - data
+// $r0  - zero
+wr32:
+	nv_iowr(NV_PPWR_MMIO_ADDR, $r14)
+	nv_iowr(NV_PPWR_MMIO_DATA, $r13)
+	mov $r14 NV_PPWR_MMIO_CTRL_OP_WR
+	or $r14 NV_PPWR_MMIO_CTRL_MASK_B32_0
+	sethi $r14 NV_PPWR_MMIO_CTRL_TRIGGER
+
+#ifdef NVKM_FALCON_MMIO_TRAP
+	mov $r8 NV_PPWR_INTR_TRIGGER_USER1
+	nv_iowr(NV_PPWR_INTR_TRIGGER, $r8)
+	wr32_host:
+		nv_iord($r8, NV_PPWR_INTR)
+		and $r8 NV_PPWR_INTR_USER1
+		bra nz #wr32_host
+#endif
+
+	nv_iowr(NV_PPWR_MMIO_CTRL, $r14)
+	wr32_wait:
+		nv_iord($r14, NV_PPWR_MMIO_CTRL)
+		and $r14 NV_PPWR_MMIO_CTRL_STATUS
+		bra nz #wr32_wait
+	ret
+
+// busy-wait for a period of time
+//
+// $r15 - current
+// $r14 - ns
+// $r0  - zero
+nsec:
+	nv_iord($r8, NV_PPWR_TIMER_LOW)
+	nsec_loop:
+		nv_iord($r9, NV_PPWR_TIMER_LOW)
+		sub b32 $r9 $r8
+		cmp b32 $r9 $r14
+		bra l #nsec_loop
+	ret
+
+// busy-wait for a period of time
+//
+// $r15 - current
+// $r14 - addr
+// $r13 - mask
+// $r12 - data
+// $r11 - timeout (ns)
+// $r0  - zero
+wait:
+	nv_iord($r8, NV_PPWR_TIMER_LOW)
+	wait_loop:
+		nv_rd32($r10, $r14)
+		and $r10 $r13
+		cmp b32 $r10 $r12
+		bra e #wait_done
+		nv_iord($r9, NV_PPWR_TIMER_LOW)
+		sub b32 $r9 $r8
+		cmp b32 $r9 $r11
+		bra l #wait_loop
+	wait_done:
+	ret
+
+// $r15 - current (kern)
+// $r14 - process
+// $r8  - NV_PPWR_INTR
+intr_watchdog:
+	// read process' timer status, skip if not enabled
+	ld b32 $r9 D[$r14 + #proc_time]
+	cmp b32 $r9 0
+	bra z #intr_watchdog_next_proc
+
+	// subtract last timer's value from process' timer,
+	// if it's <= 0 then the timer has expired
+	ld b32 $r10 D[$r0 + #time_prev]
+	sub b32 $r9 $r10
+	bra g #intr_watchdog_next_time
+		mov $r13 KMSG_ALARM
+		call(send_proc)
+		clear b32 $r9
+		bra #intr_watchdog_next_proc
+
+	// otherwise, update the next timer's value if this
+	// process' timer is the soonest
+	intr_watchdog_next_time:
+		// ... or if there's no next timer yet
+		ld b32 $r10 D[$r0 + #time_next]
+		cmp b32 $r10 0
+		bra z #intr_watchdog_next_time_set
+
+		cmp b32 $r9 $r10
+		bra g #intr_watchdog_next_proc
+		intr_watchdog_next_time_set:
+		st b32 D[$r0 + #time_next] $r9
+
+	// update process' timer status, and advance
+	intr_watchdog_next_proc:
+	st b32 D[$r14 + #proc_time] $r9
+	add b32 $r14 #proc_size
+	cmp b32 $r14 #proc_list_tail
+	bra ne #intr_watchdog
+	ret
+
+intr:
+	push $r0
+	clear b32 $r0
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r12
+	push $r13
+	push $r14
+	push $r15
+	mov $r15 #proc_kern
+	mov $r8 $flags
+	push $r8
+
+	nv_iord($r8, NV_PPWR_DSCRATCH(0))
+	add b32 $r8 1
+	nv_iowr(NV_PPWR_DSCRATCH(0), $r8)
+
+	nv_iord($r8, NV_PPWR_INTR)
+	and $r9 $r8 NV_PPWR_INTR_WATCHDOG
+	bra z #intr_skip_watchdog
+		st b32 D[$r0 + #time_next] $r0
+		mov $r14 #proc_list_head
+		call(intr_watchdog)
+		ld b32 $r9 D[$r0 + #time_next]
+		cmp b32 $r9 0
+		bra z #intr_skip_watchdog
+			nv_iowr(NV_PPWR_WATCHDOG_TIME, $r9)
+			st b32 D[$r0 + #time_prev] $r9
+
+	intr_skip_watchdog:
+	and $r9 $r8 NV_PPWR_INTR_SUBINTR
+	bra z #intr_skip_subintr
+		nv_iord($r9, NV_PPWR_SUBINTR)
+		and $r10 $r9 NV_PPWR_SUBINTR_FIFO
+		bra z #intr_subintr_skip_fifo
+			nv_iord($r12, NV_PPWR_FIFO_INTR)
+			push $r12
+			mov $r14 (PROC_HOST & 0x0000ffff)
+			sethi $r14 (PROC_HOST & 0xffff0000)
+			mov $r13 KMSG_FIFO
+			call(send)
+			pop $r12
+			nv_iowr(NV_PPWR_FIFO_INTR, $r12)
+		intr_subintr_skip_fifo:
+		nv_iowr(NV_PPWR_SUBINTR, $r9)
+
+	intr_skip_subintr:
+	and $r9 $r8 NV_PPWR_INTR_PAUSE
+	bra z #intr_skip_pause
+		and $r10 0xffbf
+
+	intr_skip_pause:
+	and $r9 $r8 NV_PPWR_INTR_USER0
+	bra z #intr_skip_user0
+		and $r10 0xffbf
+
+	intr_skip_user0:
+	nv_iowr(NV_PPWR_INTR_ACK, $r8)
+	pop $r8
+	mov $flags $r8
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r12
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	pop $r0
+	bclr $flags $p0
+	iret
+
+// request the current process be sent a message after a timeout expires
+//
+// $r15 - current
+// $r14 - ticks
+// $r0  - zero
+timer:
+	// interrupts off to prevent racing with timer isr
+	bclr $flags ie0
+
+	// if current process already has a timer set, bail
+	ld b32 $r8 D[$r15 + #proc_time]
+	cmp b32 $r8 0
+	bra g #timer_done
+	st b32 D[$r15 + #proc_time] $r14
+
+	// halt watchdog timer temporarily and check for a pending
+	// interrupt.  if there's one already pending, we can just
+	// bail since the timer isr will queue the next soonest
+	// right after it's done
+	nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
+	nv_iord($r8, NV_PPWR_INTR)
+	and $r8 NV_PPWR_INTR_WATCHDOG
+	bra nz #timer_enable
+
+	// update the watchdog if this timer should expire first,
+	// or if there's no timeout already set
+	nv_iord($r8, NV_PPWR_WATCHDOG_TIME)
+	cmp b32 $r14 $r0
+	bra e #timer_reset
+	cmp b32 $r14 $r8
+	bra l #timer_done
+	timer_reset:
+	nv_iowr(NV_PPWR_WATCHDOG_TIME, $r14)
+	st b32 D[$r0 + #time_prev] $r14
+
+	// re-enable the watchdog timer
+	timer_enable:
+	mov $r8 1
+	nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r8)
+
+	// interrupts back on
+	timer_done:
+	bset $flags ie0
+	ret
+
+// send message to another process
+//
+// $r15 - current
+// $r14 - process
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0  - zero
+send_proc:
+	push $r8
+	push $r9
+	// check for space in queue
+	ld b32 $r8 D[$r14 + #proc_qget]
+	ld b32 $r9 D[$r14 + #proc_qput]
+	xor $r8 #proc_qmaskb
+	cmp b32 $r8 $r9
+	bra e #send_done
+
+	// enqueue message
+	and $r8 $r9 #proc_qmaskp
+	shl b32 $r8 $r8 #proc_qlen
+	add b32 $r8 #proc_queue
+	add b32 $r8 $r14
+
+	ld b32 $r10 D[$r15 + #proc_id]
+	st b32 D[$r8 + #msg_process] $r10
+	st b32 D[$r8 + #msg_message] $r13
+	st b32 D[$r8 + #msg_data0] $r12
+	st b32 D[$r8 + #msg_data1] $r11
+
+	// increment PUT
+	add b32 $r9 1
+	and $r9 #proc_qmaskf
+	st b32 D[$r14 + #proc_qput] $r9
+	bset $flags $p2
+	send_done:
+	pop $r9
+	pop $r8
+	ret
+
+// lookup process structure by its name
+//
+// $r15 - current
+// $r14 - process name
+// $r0  - zero
+//
+// $r14 - process
+// $p1  - success
+find:
+	push $r8
+	mov $r8 #proc_list_head
+	bset $flags $p1
+	find_loop:
+		ld b32 $r10 D[$r8 + #proc_id]
+		cmp b32 $r10 $r14
+		bra e #find_done
+		add b32 $r8 #proc_size
+		cmp b32 $r8 #proc_list_tail
+		bra ne #find_loop
+		bclr $flags $p1
+	find_done:
+	mov b32 $r14 $r8
+	pop $r8
+	ret
+
+// send message to another process
+//
+// $r15 - current
+// $r14 - process id
+// $r13 - message
+// $r12 - message data 0
+// $r11 - message data 1
+// $r0  - zero
+send:
+	call(find)
+	bra $p1 #send_proc
+	ret
+
+// process single message for a given process
+//
+// $r15 - current
+// $r14 - process
+// $r0  - zero
+recv:
+	ld b32 $r8 D[$r14 + #proc_qget]
+	ld b32 $r9 D[$r14 + #proc_qput]
+	bclr $flags $p1
+	cmp b32 $r8 $r9
+	bra e #recv_done
+		// dequeue message
+		and $r9 $r8 #proc_qmaskp
+		add b32 $r8 1
+		and $r8 #proc_qmaskf
+		st b32 D[$r14 + #proc_qget] $r8
+		ld b32 $r10 D[$r14 + #proc_recv]
+
+		push $r15
+		mov $r15 $flags
+		push $r15
+		mov b32 $r15 $r14
+
+		shl b32 $r9 $r9 #proc_qlen
+		add b32 $r14 $r9
+		add b32 $r14 #proc_queue
+		ld b32 $r11 D[$r14 + #msg_data1]
+		ld b32 $r12 D[$r14 + #msg_data0]
+		ld b32 $r13 D[$r14 + #msg_message]
+		ld b32 $r14 D[$r14 + #msg_process]
+
+		// process it
+		call $r10
+		pop $r15
+		mov $flags $r15
+		bset $flags $p1
+		pop $r15
+	recv_done:
+	ret
+
+init:
+	// setup stack
+	nv_iord($r1, NV_PPWR_CAPS)
+	extr $r1 $r1 9:17
+	shl b32 $r1 8
+	mov $sp $r1
+
+#ifdef NVKM_FALCON_MMIO_UAS
+	// somehow allows the magic "access mmio via D[]" stuff that's
+	// used by the nv_rd32/nv_wr32 macros to work
+	mov $r1 0x0010
+	sethi $r1 NV_PPWR_UAS_CONFIG_ENABLE
+	nv_iowrs(NV_PPWR_UAS_CONFIG, $r1)
+#endif
+
+	// route all interrupts except user0/1 and pause to fuc
+	mov $r1 0x00e0
+	sethi $r1 0x00000000
+	nv_iowr(NV_PPWR_INTR_ROUTE, $r1)
+
+	// enable watchdog and subintr intrs
+	mov $r1 NV_PPWR_INTR_EN_CLR_MASK
+	nv_iowr(NV_PPWR_INTR_EN_CLR, $r1)
+	mov $r1 NV_PPWR_INTR_EN_SET_WATCHDOG
+	or $r1 NV_PPWR_INTR_EN_SET_SUBINTR
+	nv_iowr(NV_PPWR_INTR_EN_SET, $r1)
+
+	// enable interrupts globally
+	mov $r1 #intr
+	sethi $r1 0x00000000
+	mov $iv0 $r1
+	bset $flags ie0
+
+	// enable watchdog timer
+	mov $r1 1
+	nv_iowr(NV_PPWR_WATCHDOG_ENABLE, $r1)
+
+	// bootstrap processes, idle process will be last, and not return
+	mov $r15 #proc_list_head
+	init_proc:
+		ld b32 $r1 D[$r15 + #proc_init]
+		cmp b32 $r1 0
+		bra z #init_proc
+		call $r1
+		add b32 $r15 #proc_size
+		bra #init_proc
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
new file mode 100644
index 0000000..2a74ea9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define GT215 0xa3
+#define GF100 0xc0
+#define GF119 0xd9
+#define GK208 0x108
+
+#include "os.h"
+
+// IO addresses
+#define NV_PPWR_INTR_TRIGGER                                             0x0000
+#define NV_PPWR_INTR_TRIGGER_USER1                                   0x00000080
+#define NV_PPWR_INTR_TRIGGER_USER0                                   0x00000040
+#define NV_PPWR_INTR_ACK                                                 0x0004
+#define NV_PPWR_INTR_ACK_SUBINTR                                     0x00000800
+#define NV_PPWR_INTR_ACK_WATCHDOG                                    0x00000002
+#define NV_PPWR_INTR                                                     0x0008
+#define NV_PPWR_INTR_SUBINTR                                         0x00000800
+#define NV_PPWR_INTR_USER1                                           0x00000080
+#define NV_PPWR_INTR_USER0                                           0x00000040
+#define NV_PPWR_INTR_PAUSE                                           0x00000020
+#define NV_PPWR_INTR_WATCHDOG                                        0x00000002
+#define NV_PPWR_INTR_EN_SET                                              0x0010
+#define NV_PPWR_INTR_EN_SET_SUBINTR                                  0x00000800
+#define NV_PPWR_INTR_EN_SET_WATCHDOG                                 0x00000002
+#define NV_PPWR_INTR_EN_CLR                                              0x0014
+#define NV_PPWR_INTR_EN_CLR_MASK                    /* fuck i hate envyas */ -1
+#define NV_PPWR_INTR_ROUTE                                               0x001c
+#define NV_PPWR_TIMER_LOW                                                0x002c
+#define NV_PPWR_WATCHDOG_TIME                                            0x0034
+#define NV_PPWR_WATCHDOG_ENABLE                                          0x0038
+#define NV_PPWR_CAPS                                                     0x0108
+#define NV_PPWR_UAS_CONFIG                                               0x0164
+#define NV_PPWR_UAS_CONFIG_ENABLE                                    0x00010000
+#if NVKM_PPWR_CHIPSET >= GK208
+#define NV_PPWR_DSCRATCH(i)                                   (4 * (i) + 0x0450)
+#endif
+#define NV_PPWR_FIFO_PUT(i)                                   (4 * (i) + 0x04a0)
+#define NV_PPWR_FIFO_GET(i)                                   (4 * (i) + 0x04b0)
+#define NV_PPWR_FIFO_INTR                                                0x04c0
+#define NV_PPWR_FIFO_INTR_EN                                             0x04c4
+#define NV_PPWR_RFIFO_PUT                                                0x04c8
+#define NV_PPWR_RFIFO_GET                                                0x04cc
+#define NV_PPWR_H2D                                                      0x04d0
+#define NV_PPWR_D2H                                                      0x04dc
+#if NVKM_PPWR_CHIPSET < GK208
+#define NV_PPWR_DSCRATCH(i)                                   (4 * (i) + 0x05d0)
+#endif
+#define NV_PPWR_SUBINTR                                                  0x0688
+#define NV_PPWR_SUBINTR_FIFO                                         0x00000002
+#define NV_PPWR_MMIO_ADDR                                                0x07a0
+#define NV_PPWR_MMIO_DATA                                                0x07a4
+#define NV_PPWR_MMIO_CTRL                                                0x07ac
+#define NV_PPWR_MMIO_CTRL_TRIGGER                                    0x00010000
+#define NV_PPWR_MMIO_CTRL_STATUS                                     0x00007000
+#define NV_PPWR_MMIO_CTRL_STATUS_IDLE                                0x00000000
+#define NV_PPWR_MMIO_CTRL_MASK                                       0x000000f0
+#define NV_PPWR_MMIO_CTRL_MASK_B32_0                                 0x000000f0
+#define NV_PPWR_MMIO_CTRL_OP                                         0x00000003
+#define NV_PPWR_MMIO_CTRL_OP_RD                                      0x00000001
+#define NV_PPWR_MMIO_CTRL_OP_WR                                      0x00000002
+#define NV_PPWR_OUTPUT                                                   0x07c0
+#define NV_PPWR_OUTPUT_FB_PAUSE                                      0x00000004
+#define NV_PPWR_OUTPUT_SET                                               0x07e0
+#define NV_PPWR_OUTPUT_SET_FB_PAUSE                                  0x00000004
+#define NV_PPWR_OUTPUT_CLR                                               0x07e4
+#define NV_PPWR_OUTPUT_CLR_FB_PAUSE                                  0x00000004
+
+// Inter-process message format
+.equ #msg_process 0x00 /* send() target, recv() sender */
+.equ #msg_message 0x04
+.equ #msg_data0   0x08
+.equ #msg_data1   0x0c
+
+// Kernel message IDs
+#define KMSG_FIFO  0x00000000
+#define KMSG_ALARM 0x00000001
+
+// Process message queue description
+.equ #proc_qlen 4 // log2(size of queue entry in bytes)
+.equ #proc_qnum 2 // log2(max number of entries in queue)
+.equ #proc_qmaskb (1 << #proc_qnum) // max number of entries in queue
+.equ #proc_qmaskp (#proc_qmaskb - 1)
+.equ #proc_qmaskf ((#proc_qmaskb << 1) - 1)
+.equ #proc_qsize  (1 << (#proc_qlen + #proc_qnum))
+
+// Process table entry
+.equ #proc_id    0x00
+.equ #proc_init  0x04
+.equ #proc_recv  0x08
+.equ #proc_time  0x0c
+.equ #proc_qput  0x10
+.equ #proc_qget  0x14
+.equ #proc_queue 0x18
+.equ #proc_size (0x18 + #proc_qsize)
+
+#define process(id,init,recv) /*
+*/	.b32 id /*
+*/	.b32 init /*
+*/	.b32 recv /*
+*/	.b32 0 /*
+*/	.b32 0 /*
+*/	.b32 0 /*
+*/	.skip 64
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iord(reg,ior) /*
+*/	mov reg ior /*
+*/ 	shl b32 reg 6 /*
+*/ 	iord reg I[reg + 0x000]
+#else
+#define nv_iord(reg,ior) /*
+*/	mov reg ior /*
+*/ 	iord reg I[reg + 0x000]
+#endif
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iowr(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	shl b32 $r0 6 /*
+*/ 	iowr I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#else
+#define nv_iowr(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	iowr I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#endif
+
+#ifndef NVKM_FALCON_UNSHIFTED_IO
+#define nv_iowrs(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	shl b32 $r0 6 /*
+*/ 	iowrs I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#else
+#define nv_iowrs(ior,reg) /*
+*/	mov $r0 ior /*
+*/ 	iowrs I[$r0 + 0x000] reg /*
+*/	clear b32 $r0
+#endif
+
+#define hash #
+#define fn(a) a
+#ifndef NVKM_FALCON_PC24
+#define call(a) call fn(hash)a
+#else
+#define call(a) lcall fn(hash)a
+#endif
+
+#ifndef NVKM_FALCON_MMIO_UAS
+#define nv_rd32(reg,addr) /*
+*/	mov b32 $r14 addr /*
+*/	call(rd32) /*
+*/	mov b32 reg $r13
+#else
+#define nv_rd32(reg,addr) /*
+*/ 	sethi $r0 0x14000000 /*
+*/	or $r0 addr /*
+*/	ld b32 reg D[$r0] /*
+*/	clear b32 $r0
+#endif
+
+#if !defined(NVKM_FALCON_MMIO_UAS) || defined(NVKM_FALCON_MMIO_TRAP)
+#define nv_wr32(addr,reg) /*
+*/	push addr /*
+*/	push reg /*
+*/	pop $r13 /*
+*/	pop $r14 /*
+*/	call(wr32) /*
+#else
+#define nv_wr32(addr,reg) /*
+*/ 	sethi $r0 0x14000000 /*
+*/	or $r0 addr /*
+*/	st b32 D[$r0] reg /*
+*/	clear b32 $r0
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
new file mode 100644
index 0000000..d43741e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_MEMX, #memx_init, #memx_recv)
+#endif
+
+/******************************************************************************
+ * MEMX data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+.equ #memx_opcode 0
+.equ #memx_header 2
+.equ #memx_length 4
+.equ #memx_func   8
+
+#define handler(cmd,hdr,len,func) /*
+*/	.b16 MEMX_##cmd /*
+*/	.b16 hdr /*
+*/	.b16 len /*
+*/      .b16 0 /*
+*/	.b32 func
+
+memx_func_head:
+handler(ENTER , 0x0001, 0x0000, #memx_func_enter)
+memx_func_next:
+handler(LEAVE , 0x0000, 0x0000, #memx_func_leave)
+handler(WR32  , 0x0000, 0x0002, #memx_func_wr32)
+handler(WAIT  , 0x0004, 0x0000, #memx_func_wait)
+handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
+memx_func_tail:
+
+.equ #memx_func_size #memx_func_next - #memx_func_head
+.equ #memx_func_num (#memx_func_tail - #memx_func_head) / #memx_func_size
+
+memx_data_head:
+.skip 0x0800
+memx_data_tail:
+#endif
+
+/******************************************************************************
+ * MEMX code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00: bitmask of heads to wait for vblank on
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_enter:
+	mov $r6 NV_PPWR_OUTPUT_SET_FB_PAUSE
+	nv_iowr(NV_PPWR_OUTPUT_SET, $r6)
+	memx_func_enter_wait:
+		nv_iord($r6, NV_PPWR_OUTPUT)
+		and $r6 NV_PPWR_OUTPUT_FB_PAUSE
+		bra z #memx_func_enter_wait
+	//XXX: TODO
+	ld b32 $r6 D[$r1 + 0x00]
+	add b32 $r1 0x04
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_leave:
+	mov $r6 NV_PPWR_OUTPUT_CLR_FB_PAUSE
+	nv_iowr(NV_PPWR_OUTPUT_CLR, $r6)
+	memx_func_leave_wait:
+		nv_iord($r6, NV_PPWR_OUTPUT)
+		and $r6 NV_PPWR_OUTPUT_FB_PAUSE
+		bra nz #memx_func_leave_wait
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00*n: addr
+//	+04*n: data
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_wr32:
+	ld b32 $r6 D[$r1 + 0x00]
+	ld b32 $r5 D[$r1 + 0x04]
+	add b32 $r1 0x08
+	nv_wr32($r6, $r5)
+	sub b32 $r4 0x02
+	bra nz #memx_func_wr32
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00: addr
+//	+04: mask
+//	+08: data
+//	+0c: timeout (ns)
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_wait:
+	nv_iord($r8, NV_PPWR_TIMER_LOW)
+	ld b32 $r14 D[$r1 + 0x00]
+	ld b32 $r13 D[$r1 + 0x04]
+	ld b32 $r12 D[$r1 + 0x08]
+	ld b32 $r11 D[$r1 + 0x0c]
+	add b32 $r1 0x10
+	call(wait)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r4  - packet length
+//	+00: time (ns)
+// $r3  - opcode desciption
+// $r0  - zero
+memx_func_delay:
+	ld b32 $r14 D[$r1 + 0x00]
+	add b32 $r1 0x04
+	call(nsec)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message (exec)
+// $r12 - head of script
+// $r11 - tail of script
+// $r0  - zero
+memx_exec:
+	push $r14
+	push $r13
+	mov b32 $r1 $r12
+	mov b32 $r2 $r11
+	memx_exec_next:
+		// fetch the packet header, and locate opcode info
+		ld b32 $r3 D[$r1]
+		add b32 $r1 4
+		shr b32 $r4 $r3 16
+		mulu $r3 #memx_func_size
+
+		// execute the opcode handler
+		ld b32 $r5 D[$r3 + #memx_func_head + #memx_func]
+		call $r5
+
+		// keep going, if we haven't reached the end
+		cmp b32 $r1 $r2
+		bra l #memx_exec_next
+
+	// send completion reply
+	pop $r13
+	pop $r14
+	call(send)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+memx_info:
+	mov $r12 #memx_data_head
+	mov $r11 #memx_data_tail - #memx_data_head
+	call(send)
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+memx_recv:
+	cmp b32 $r13 MEMX_MSG_EXEC
+	bra e #memx_exec
+	cmp b32 $r13 MEMX_MSG_INFO
+	bra e #memx_info
+	ret
+
+// description
+//
+// $r15 - current (memx)
+// $r0  - zero
+memx_init:
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
new file mode 100644
index 0000000..947be53
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GK208
+
+#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nv108_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nv108_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
new file mode 100644
index 0000000..9342e2d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -0,0 +1,1165 @@
+uint32_t nv108_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x00000379,
+	0x0000032a,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000046f,
+	0x00000461,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x00000473,
+	0x00000471,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x00000494,
+	0x00000475,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x0000049f,
+	0x0000049d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x000003a9,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x000003c7,
+	0x00000002,
+	0x00000002,
+	0x000003df,
+	0x00040003,
+	0x00000000,
+	0x00000407,
+	0x00010004,
+	0x00000000,
+	0x00000421,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nv108_pwr_code[] = {
+	0x02910ef5,
+/* 0x0004: rd32 */
+	0xf607a040,
+	0x04bd000e,
+	0xe3f0010e,
+	0x07ac4001,
+	0xbd000ef6,
+/* 0x0019: rd32_wait */
+	0x07ac4e04,
+	0xf100eecf,
+	0xf47000e4,
+	0xa44df61b,
+	0x00ddcf07,
+/* 0x002e: wr32 */
+	0xa04000f8,
+	0x000ef607,
+	0xa44004bd,
+	0x000df607,
+	0x020e04bd,
+	0xf0f0e5f0,
+	0xac4001e3,
+	0x000ef607,
+/* 0x004e: wr32_wait */
+	0xac4e04bd,
+	0x00eecf07,
+	0x7000e4f1,
+	0xf8f61bf4,
+/* 0x005d: nsec */
+	0xcf2c0800,
+/* 0x0062: nsec_loop */
+	0x2c090088,
+	0xbb0099cf,
+	0x9ea60298,
+	0xf8f61ef4,
+/* 0x0071: wait */
+	0xcf2c0800,
+/* 0x0076: wait_loop */
+	0xeeb20088,
+	0x0000047e,
+	0xadfddab2,
+	0xf4aca604,
+	0x2c09100b,
+	0xbb0099cf,
+	0x9ba60298,
+/* 0x0093: wait_done */
+	0xf8e61ef4,
+/* 0x0095: intr_watchdog */
+	0x03e99800,
+	0xf40096b0,
+	0x0a98280b,
+	0x029abb84,
+	0x0d0e1cf4,
+	0x01de7e01,
+	0xf494bd00,
+/* 0x00b2: intr_watchdog_next_time */
+	0x0a98140e,
+	0x00a6b085,
+	0xa6080bf4,
+	0x061cf49a,
+/* 0x00c0: intr_watchdog_next_time_set */
+/* 0x00c3: intr_watchdog_next_proc */
+	0xb58509b5,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc81bf402,
+/* 0x00d2: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0x000ff0f9,
+	0xf90188fe,
+	0x04504880,
+	0xb60088cf,
+	0x50400180,
+	0x0008f604,
+	0x080804bd,
+	0xc40088cf,
+	0x0bf40289,
+	0x8500b51f,
+	0x957e580e,
+	0x09980000,
+	0x0096b085,
+	0x000d0bf4,
+	0x0009f634,
+	0x09b504bd,
+/* 0x0125: intr_skip_watchdog */
+	0x0089e484,
+	0x360bf408,
+	0xcf068849,
+	0x9ac40099,
+	0x220bf402,
+	0xcf04c04c,
+	0xc0f900cc,
+	0xf14f484e,
+	0x0d5453e3,
+	0x023f7e00,
+	0x40c0fc00,
+	0x0cf604c0,
+/* 0x0157: intr_subintr_skip_fifo */
+	0x4004bd00,
+	0x09f60688,
+/* 0x015f: intr_skip_subintr */
+	0xc404bd00,
+	0x0bf42089,
+	0xbfa4f107,
+/* 0x0169: intr_skip_pause */
+	0x4089c4ff,
+	0xf1070bf4,
+/* 0x0173: intr_skip_user0 */
+	0x00ffbfa4,
+	0x0008f604,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x0196: timer */
+	0x32f401f8,
+	0x03f89810,
+	0xf40086b0,
+	0xfeb53a1c,
+	0xf6380003,
+	0x04bd0008,
+	0x88cf0808,
+	0x0284f000,
+	0x081c1bf4,
+	0x0088cf34,
+	0x0bf4e0a6,
+	0xf4e8a608,
+/* 0x01c6: timer_reset */
+	0x3400161e,
+	0xbd000ef6,
+	0x840eb504,
+/* 0x01d0: timer_enable */
+	0x38000108,
+	0xbd0008f6,
+/* 0x01d9: timer_done */
+	0x1031f404,
+/* 0x01de: send_proc */
+	0x80f900f8,
+	0xe89890f9,
+	0x04e99805,
+	0xa60486f0,
+	0x2a0bf489,
+	0x940398c4,
+	0x80b60488,
+	0x008ebb18,
+	0xb500fa98,
+	0x8db5008a,
+	0x028cb501,
+	0xb6038bb5,
+	0x94f00190,
+	0x04e9b507,
+/* 0x0217: send_done */
+	0xfc0231f4,
+	0xf880fc90,
+/* 0x021d: find */
+	0x0880f900,
+	0x0131f458,
+/* 0x0224: find_loop */
+	0xa6008a98,
+	0x100bf4ae,
+	0xb15880b6,
+	0xf4021086,
+	0x32f4f11b,
+/* 0x0239: find_done */
+	0xfc8eb201,
+/* 0x023f: send */
+	0x7e00f880,
+	0xf400021d,
+	0x00f89b01,
+/* 0x0248: recv */
+	0x9805e898,
+	0x32f404e9,
+	0xf489a601,
+	0x89c43c0b,
+	0x0180b603,
+	0xb50784f0,
+	0xea9805e8,
+	0xfef0f902,
+	0xf0f9018f,
+	0x9994efb2,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x028f: recv_done */
+	0xf8f0fc01,
+/* 0x0291: init */
+	0x01084100,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0x00e04100,
+	0x000013f0,
+	0x0001f61c,
+	0xff0104bd,
+	0x01f61400,
+	0x0104bd00,
+	0x0015f102,
+	0xf6100008,
+	0x04bd0001,
+	0xf000d241,
+	0x10fe0013,
+	0x1031f400,
+	0x38000101,
+	0xbd0001f6,
+/* 0x02db: init_proc */
+	0x98580f04,
+	0x16b001f1,
+	0xfa0bf400,
+	0xf0b615f9,
+	0xf20ef458,
+/* 0x02ec: host_send */
+	0xcf04b041,
+	0xa0420011,
+	0x0022cf04,
+	0x0bf412a6,
+	0x071ec42e,
+	0xb704ee94,
+	0x980218e0,
+	0xec9803eb,
+	0x01ed9802,
+	0x7e00ee98,
+	0xb600023f,
+	0x1ec40110,
+	0x04b0400f,
+	0xbd0001f6,
+	0xc70ef404,
+/* 0x0328: host_send_done */
+/* 0x032a: host_recv */
+	0x494100f8,
+	0x5413f14e,
+	0xf4e1a652,
+/* 0x0336: host_recv_wait */
+	0xcc41b90b,
+	0x0011cf04,
+	0xcf04c842,
+	0x16f00022,
+	0xf412a608,
+	0x23c4ef0b,
+	0x0434b607,
+	0x029830b7,
+	0xb5033bb5,
+	0x3db5023c,
+	0x003eb501,
+	0xf00120b6,
+	0xc8400f24,
+	0x0002f604,
+	0x400204bd,
+	0x02f60000,
+	0xf804bd00,
+/* 0x0379: host_init */
+	0x00804100,
+	0xf11014b6,
+	0x40021815,
+	0x01f604d0,
+	0x4104bd00,
+	0x14b60080,
+	0x9815f110,
+	0x04dc4002,
+	0xbd0001f6,
+	0x40010104,
+	0x01f604c4,
+	0xf804bd00,
+/* 0x03a9: memx_func_enter */
+	0x40040600,
+	0x06f607e0,
+/* 0x03b3: memx_func_enter_wait */
+	0x4604bd00,
+	0x66cf07c0,
+	0x0464f000,
+	0x98f70bf4,
+	0x10b60016,
+/* 0x03c7: memx_func_leave */
+	0x0600f804,
+	0x07e44004,
+	0xbd0006f6,
+/* 0x03d1: memx_func_leave_wait */
+	0x07c04604,
+	0xf00066cf,
+	0x1bf40464,
+/* 0x03df: memx_func_wr32 */
+	0x9800f8f7,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0x00002e7e,
+	0x140003f1,
+	0xa00506fd,
+	0xb604bd05,
+	0x1bf40242,
+/* 0x0407: memx_func_wait */
+	0x0800f8dd,
+	0x0088cf2c,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0x7e1010b6,
+	0xf8000071,
+/* 0x0421: memx_func_delay */
+	0x001e9800,
+	0x7e0410b6,
+	0xf800005d,
+/* 0x042d: memx_exec */
+	0xf9e0f900,
+	0xb2c1b2d0,
+/* 0x0435: memx_exec_next */
+	0x001398b2,
+	0x950410b6,
+	0x30f01034,
+	0xc835980c,
+	0x12a655f9,
+	0xfced1ef4,
+	0x7ee0fcd0,
+	0xf800023f,
+/* 0x0455: memx_info */
+	0x03544c00,
+	0x7e08004b,
+	0xf800023f,
+/* 0x0461: memx_recv */
+	0x01d6b000,
+	0xb0c90bf4,
+	0x0bf400d6,
+/* 0x046f: memx_init */
+	0xf800f8eb,
+/* 0x0471: perf_recv */
+/* 0x0473: perf_init */
+	0xf800f800,
+/* 0x0475: test_recv */
+	0x04584100,
+	0xb60011cf,
+	0x58400110,
+	0x0001f604,
+	0xe7f104bd,
+	0xe3f1d900,
+	0x967e134f,
+	0x00f80001,
+/* 0x0494: test_init */
+	0x7e08004e,
+	0xf8000196,
+/* 0x049d: idle_recv */
+/* 0x049f: idle */
+	0xf400f800,
+	0x54410031,
+	0x0011cf04,
+	0x400110b6,
+	0x01f60454,
+/* 0x04b3: idle_loop */
+	0x0104bd00,
+	0x0232f458,
+/* 0x04b8: idle_proc */
+/* 0x04b8: idle_proc_exec */
+	0x1eb210f9,
+	0x0002487e,
+	0x11f410fc,
+	0x0231f409,
+/* 0x04cb: idle_proc_next */
+	0xb6f00ef4,
+	0x1fa65810,
+	0xf4e81bf4,
+	0x28f4e002,
+	0xc60ef400,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
new file mode 100644
index 0000000..6fde0b89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GT215
+
+//#define NVKM_FALCON_PC24
+//#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nva3_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nva3_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
new file mode 100644
index 0000000..0fa4d7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nva3_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x00000430,
+	0x000003cd,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000054e,
+	0x00000540,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x00000552,
+	0x00000550,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x0000057b,
+	0x00000554,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x00000587,
+	0x00000585,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x0000046f,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x00000496,
+	0x00000002,
+	0x00000002,
+	0x000004b7,
+	0x00040003,
+	0x00000000,
+	0x000004df,
+	0x00010004,
+	0x00000000,
+	0x000004fc,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nva3_pwr_code[] = {
+	0x030d0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xd00604b6,
+	0x04bd000e,
+	0xf001e7f0,
+	0x07f101e3,
+	0x04b607ac,
+	0x000ed006,
+/* 0x0022: rd32_wait */
+	0xe7f104bd,
+	0xe4b607ac,
+	0x00eecf06,
+	0x7000e4f1,
+	0xf1f21bf4,
+	0xb607a4d7,
+	0xddcf06d4,
+/* 0x003f: wr32 */
+	0xf100f800,
+	0xb607a007,
+	0x0ed00604,
+	0xf104bd00,
+	0xb607a407,
+	0x0dd00604,
+	0xf004bd00,
+	0xe5f002e7,
+	0x01e3f0f0,
+	0x07ac07f1,
+	0xd00604b6,
+	0x04bd000e,
+/* 0x006c: wr32_wait */
+	0x07ace7f1,
+	0xcf06e4b6,
+	0xe4f100ee,
+	0x1bf47000,
+/* 0x007f: nsec */
+	0xf000f8f2,
+	0x84b62c87,
+	0x0088cf06,
+/* 0x0088: nsec_loop */
+	0xb62c97f0,
+	0x99cf0694,
+	0x0298bb00,
+	0xf4069eb8,
+	0x00f8f11e,
+/* 0x009c: wait */
+	0xb62c87f0,
+	0x88cf0684,
+/* 0x00a5: wait_loop */
+	0x02eeb900,
+	0xb90421f4,
+	0xadfd02da,
+	0x06acb804,
+	0xf0150bf4,
+	0x94b62c97,
+	0x0099cf06,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00c9: wait_done */
+/* 0x00cb: intr_watchdog */
+	0x9800f8df,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb840a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x025421f5,
+	0x0ef494bd,
+/* 0x00e9: intr_watchdog_next_time */
+	0x850a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00f8: intr_watchdog_next_time_set */
+/* 0x00fb: intr_watchdog_next_proc */
+	0x80850980,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc61bf402,
+/* 0x010a: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x84b605d0,
+	0x0088cf06,
+	0xf10180b6,
+	0xb605d007,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40289c4,
+	0x0080230b,
+	0x58e7f085,
+	0x98cb21f4,
+	0x96b08509,
+	0x110bf400,
+	0xb63407f0,
+	0x09d00604,
+	0x8004bd00,
+/* 0x016e: intr_skip_watchdog */
+	0x89e48409,
+	0x0bf40800,
+	0x8897f148,
+	0x0694b606,
+	0xc40099cf,
+	0x0bf4029a,
+	0xc0c7f12c,
+	0x06c4b604,
+	0xf900cccf,
+	0x48e7f1c0,
+	0x53e3f14f,
+	0x00d7f054,
+	0x02b921f5,
+	0x07f1c0fc,
+	0x04b604c0,
+	0x000cd006,
+/* 0x01ae: intr_subintr_skip_fifo */
+	0x07f104bd,
+	0x04b60688,
+	0x0009d006,
+/* 0x01ba: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x01c4: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x01ce: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0604b604,
+	0xbd0008d0,
+	0xfe80fc04,
+	0xf0fc0088,
+	0xd0fce0fc,
+	0xb0fcc0fc,
+	0x90fca0fc,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x01f5: timer */
+	0x1032f401,
+	0xb003f898,
+	0x1cf40086,
+	0x03fe8051,
+	0xb63807f0,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40284f0,
+	0x87f0261b,
+	0x0684b634,
+	0xb80088cf,
+	0x0bf406e0,
+	0x06e8b809,
+/* 0x0233: timer_reset */
+	0xf01f1ef4,
+	0x04b63407,
+	0x000ed006,
+	0x0e8004bd,
+/* 0x0241: timer_enable */
+	0x0187f084,
+	0xb63807f0,
+	0x08d00604,
+/* 0x024f: timer_done */
+	0xf404bd00,
+	0x00f81031,
+/* 0x0254: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x028e: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x0294: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x029c: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x021086b1,
+	0xf4f01bf4,
+/* 0x02b2: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02b9: send */
+	0x9421f500,
+	0x9701f402,
+/* 0x02c2: recv */
+	0xe89800f8,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x030b: recv_done */
+	0xf8f0fc01,
+/* 0x030d: init */
+	0x0817f100,
+	0x0614b601,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0xe017f100,
+	0x0013f000,
+	0xb61c07f0,
+	0x01d00604,
+	0xf004bd00,
+	0x07f0ff17,
+	0x0604b614,
+	0xbd0001d0,
+	0x0217f004,
+	0x080015f1,
+	0xb61007f0,
+	0x01d00604,
+	0xf104bd00,
+	0xf0010a17,
+	0x10fe0013,
+	0x1031f400,
+	0xf00117f0,
+	0x04b63807,
+	0x0001d006,
+	0xf7f004bd,
+/* 0x0371: init_proc */
+	0x01f19858,
+	0xf40016b0,
+	0x15f9fa0b,
+	0xf458f0b6,
+/* 0x0382: host_send */
+	0x17f1f20e,
+	0x14b604b0,
+	0x0011cf06,
+	0x04a027f1,
+	0xcf0624b6,
+	0x12b80022,
+	0x320bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980218,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602b9,
+	0x0f1ec401,
+	0x04b007f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x03cb: host_send_done */
+	0xf8ba0ef4,
+/* 0x03cd: host_recv */
+	0x4917f100,
+	0x5413f14e,
+	0x06e1b852,
+/* 0x03db: host_recv_wait */
+	0xf1aa0bf4,
+	0xb604cc17,
+	0x11cf0614,
+	0xc827f100,
+	0x0624b604,
+	0xf00022cf,
+	0x12b80816,
+	0xe60bf406,
+	0xb60723c4,
+	0x30b70434,
+	0x3b800298,
+	0x023c8003,
+	0x80013d80,
+	0x20b6003e,
+	0x0f24f001,
+	0x04c807f1,
+	0xd00604b6,
+	0x04bd0002,
+	0xf04027f0,
+	0x04b60007,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0430: host_init */
+	0x008017f1,
+	0xf11014b6,
+	0xf1021815,
+	0xb604d007,
+	0x01d00604,
+	0xf104bd00,
+	0xb6008017,
+	0x15f11014,
+	0x07f10298,
+	0x04b604dc,
+	0x0001d006,
+	0x17f004bd,
+	0xc407f101,
+	0x0604b604,
+	0xbd0001d0,
+/* 0x046f: memx_func_enter */
+	0xf000f804,
+	0x07f10467,
+	0x04b607e0,
+	0x0006d006,
+/* 0x047e: memx_func_enter_wait */
+	0x67f104bd,
+	0x64b607c0,
+	0x0066cf06,
+	0xf40464f0,
+	0x1698f30b,
+	0x0410b600,
+/* 0x0496: memx_func_leave */
+	0x67f000f8,
+	0xe407f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x04a5: memx_func_leave_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x1bf40464,
+/* 0x04b7: memx_func_wr32 */
+	0x9800f8f3,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0xf13f21f4,
+	0xfd140003,
+	0x05800506,
+	0xb604bd00,
+	0x1bf40242,
+/* 0x04df: memx_func_wait */
+	0xf000f8dd,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f89c21,
+/* 0x04fc: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x0507: memx_exec */
+	0xf900f87f,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x0511: memx_exec_next */
+	0x00139802,
+	0x950410b6,
+	0x30f01034,
+	0xc835980c,
+	0x12b855f9,
+	0xec1ef406,
+	0xe0fcd0fc,
+	0x02b921f5,
+/* 0x0532: memx_info */
+	0xc7f100f8,
+	0xb7f10354,
+	0x21f50800,
+	0x00f802b9,
+/* 0x0540: memx_recv */
+	0xf401d6b0,
+	0xd6b0c40b,
+	0xe90bf400,
+/* 0x054e: memx_init */
+	0x00f800f8,
+/* 0x0550: perf_recv */
+/* 0x0552: perf_init */
+	0x00f800f8,
+/* 0x0554: test_recv */
+	0x05d817f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd807f101,
+	0x0604b605,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0xf521f513,
+/* 0x057b: test_init */
+	0xf100f801,
+	0xf50800e7,
+	0xf801f521,
+/* 0x0585: idle_recv */
+/* 0x0587: idle */
+	0xf400f800,
+	0x17f10031,
+	0x14b605d4,
+	0x0011cf06,
+	0xf10110b6,
+	0xb605d407,
+	0x01d00604,
+/* 0x05a3: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x05a9: idle_proc */
+/* 0x05a9: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02c2,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x05bd: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00bb0ef4,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
new file mode 100644
index 0000000..eaa64da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF100
+
+//#define NVKM_FALCON_PC24
+//#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nvc0_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nvc0_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
new file mode 100644
index 0000000..82c8e8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nvc0_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x00000430,
+	0x000003cd,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000054e,
+	0x00000540,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x00000552,
+	0x00000550,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x0000057b,
+	0x00000554,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x00000587,
+	0x00000585,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x0000046f,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x00000496,
+	0x00000002,
+	0x00000002,
+	0x000004b7,
+	0x00040003,
+	0x00000000,
+	0x000004df,
+	0x00010004,
+	0x00000000,
+	0x000004fc,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nvc0_pwr_code[] = {
+	0x030d0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xd00604b6,
+	0x04bd000e,
+	0xf001e7f0,
+	0x07f101e3,
+	0x04b607ac,
+	0x000ed006,
+/* 0x0022: rd32_wait */
+	0xe7f104bd,
+	0xe4b607ac,
+	0x00eecf06,
+	0x7000e4f1,
+	0xf1f21bf4,
+	0xb607a4d7,
+	0xddcf06d4,
+/* 0x003f: wr32 */
+	0xf100f800,
+	0xb607a007,
+	0x0ed00604,
+	0xf104bd00,
+	0xb607a407,
+	0x0dd00604,
+	0xf004bd00,
+	0xe5f002e7,
+	0x01e3f0f0,
+	0x07ac07f1,
+	0xd00604b6,
+	0x04bd000e,
+/* 0x006c: wr32_wait */
+	0x07ace7f1,
+	0xcf06e4b6,
+	0xe4f100ee,
+	0x1bf47000,
+/* 0x007f: nsec */
+	0xf000f8f2,
+	0x84b62c87,
+	0x0088cf06,
+/* 0x0088: nsec_loop */
+	0xb62c97f0,
+	0x99cf0694,
+	0x0298bb00,
+	0xf4069eb8,
+	0x00f8f11e,
+/* 0x009c: wait */
+	0xb62c87f0,
+	0x88cf0684,
+/* 0x00a5: wait_loop */
+	0x02eeb900,
+	0xb90421f4,
+	0xadfd02da,
+	0x06acb804,
+	0xf0150bf4,
+	0x94b62c97,
+	0x0099cf06,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00c9: wait_done */
+/* 0x00cb: intr_watchdog */
+	0x9800f8df,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb840a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x025421f5,
+	0x0ef494bd,
+/* 0x00e9: intr_watchdog_next_time */
+	0x850a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00f8: intr_watchdog_next_time_set */
+/* 0x00fb: intr_watchdog_next_proc */
+	0x80850980,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc61bf402,
+/* 0x010a: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x84b605d0,
+	0x0088cf06,
+	0xf10180b6,
+	0xb605d007,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40289c4,
+	0x0080230b,
+	0x58e7f085,
+	0x98cb21f4,
+	0x96b08509,
+	0x110bf400,
+	0xb63407f0,
+	0x09d00604,
+	0x8004bd00,
+/* 0x016e: intr_skip_watchdog */
+	0x89e48409,
+	0x0bf40800,
+	0x8897f148,
+	0x0694b606,
+	0xc40099cf,
+	0x0bf4029a,
+	0xc0c7f12c,
+	0x06c4b604,
+	0xf900cccf,
+	0x48e7f1c0,
+	0x53e3f14f,
+	0x00d7f054,
+	0x02b921f5,
+	0x07f1c0fc,
+	0x04b604c0,
+	0x000cd006,
+/* 0x01ae: intr_subintr_skip_fifo */
+	0x07f104bd,
+	0x04b60688,
+	0x0009d006,
+/* 0x01ba: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x01c4: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x01ce: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0604b604,
+	0xbd0008d0,
+	0xfe80fc04,
+	0xf0fc0088,
+	0xd0fce0fc,
+	0xb0fcc0fc,
+	0x90fca0fc,
+	0x00fc80fc,
+	0xf80032f4,
+/* 0x01f5: timer */
+	0x1032f401,
+	0xb003f898,
+	0x1cf40086,
+	0x03fe8051,
+	0xb63807f0,
+	0x08d00604,
+	0xf004bd00,
+	0x84b60887,
+	0x0088cf06,
+	0xf40284f0,
+	0x87f0261b,
+	0x0684b634,
+	0xb80088cf,
+	0x0bf406e0,
+	0x06e8b809,
+/* 0x0233: timer_reset */
+	0xf01f1ef4,
+	0x04b63407,
+	0x000ed006,
+	0x0e8004bd,
+/* 0x0241: timer_enable */
+	0x0187f084,
+	0xb63807f0,
+	0x08d00604,
+/* 0x024f: timer_done */
+	0xf404bd00,
+	0x00f81031,
+/* 0x0254: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x028e: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x0294: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x029c: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x021086b1,
+	0xf4f01bf4,
+/* 0x02b2: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02b9: send */
+	0x9421f500,
+	0x9701f402,
+/* 0x02c2: recv */
+	0xe89800f8,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x030b: recv_done */
+	0xf8f0fc01,
+/* 0x030d: init */
+	0x0817f100,
+	0x0614b601,
+	0xe70011cf,
+	0xb6010911,
+	0x14fe0814,
+	0xe017f100,
+	0x0013f000,
+	0xb61c07f0,
+	0x01d00604,
+	0xf004bd00,
+	0x07f0ff17,
+	0x0604b614,
+	0xbd0001d0,
+	0x0217f004,
+	0x080015f1,
+	0xb61007f0,
+	0x01d00604,
+	0xf104bd00,
+	0xf0010a17,
+	0x10fe0013,
+	0x1031f400,
+	0xf00117f0,
+	0x04b63807,
+	0x0001d006,
+	0xf7f004bd,
+/* 0x0371: init_proc */
+	0x01f19858,
+	0xf40016b0,
+	0x15f9fa0b,
+	0xf458f0b6,
+/* 0x0382: host_send */
+	0x17f1f20e,
+	0x14b604b0,
+	0x0011cf06,
+	0x04a027f1,
+	0xcf0624b6,
+	0x12b80022,
+	0x320bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980218,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602b9,
+	0x0f1ec401,
+	0x04b007f1,
+	0xd00604b6,
+	0x04bd0001,
+/* 0x03cb: host_send_done */
+	0xf8ba0ef4,
+/* 0x03cd: host_recv */
+	0x4917f100,
+	0x5413f14e,
+	0x06e1b852,
+/* 0x03db: host_recv_wait */
+	0xf1aa0bf4,
+	0xb604cc17,
+	0x11cf0614,
+	0xc827f100,
+	0x0624b604,
+	0xf00022cf,
+	0x12b80816,
+	0xe60bf406,
+	0xb60723c4,
+	0x30b70434,
+	0x3b800298,
+	0x023c8003,
+	0x80013d80,
+	0x20b6003e,
+	0x0f24f001,
+	0x04c807f1,
+	0xd00604b6,
+	0x04bd0002,
+	0xf04027f0,
+	0x04b60007,
+	0x0002d006,
+	0x00f804bd,
+/* 0x0430: host_init */
+	0x008017f1,
+	0xf11014b6,
+	0xf1021815,
+	0xb604d007,
+	0x01d00604,
+	0xf104bd00,
+	0xb6008017,
+	0x15f11014,
+	0x07f10298,
+	0x04b604dc,
+	0x0001d006,
+	0x17f004bd,
+	0xc407f101,
+	0x0604b604,
+	0xbd0001d0,
+/* 0x046f: memx_func_enter */
+	0xf000f804,
+	0x07f10467,
+	0x04b607e0,
+	0x0006d006,
+/* 0x047e: memx_func_enter_wait */
+	0x67f104bd,
+	0x64b607c0,
+	0x0066cf06,
+	0xf40464f0,
+	0x1698f30b,
+	0x0410b600,
+/* 0x0496: memx_func_leave */
+	0x67f000f8,
+	0xe407f104,
+	0x0604b607,
+	0xbd0006d0,
+/* 0x04a5: memx_func_leave_wait */
+	0xc067f104,
+	0x0664b607,
+	0xf00066cf,
+	0x1bf40464,
+/* 0x04b7: memx_func_wr32 */
+	0x9800f8f3,
+	0x15980016,
+	0x0810b601,
+	0x50f960f9,
+	0xe0fcd0fc,
+	0xf13f21f4,
+	0xfd140003,
+	0x05800506,
+	0xb604bd00,
+	0x1bf40242,
+/* 0x04df: memx_func_wait */
+	0xf000f8dd,
+	0x84b62c87,
+	0x0088cf06,
+	0x98001e98,
+	0x1c98011d,
+	0x031b9802,
+	0xf41010b6,
+	0x00f89c21,
+/* 0x04fc: memx_func_delay */
+	0xb6001e98,
+	0x21f40410,
+/* 0x0507: memx_exec */
+	0xf900f87f,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x0511: memx_exec_next */
+	0x00139802,
+	0x950410b6,
+	0x30f01034,
+	0xc835980c,
+	0x12b855f9,
+	0xec1ef406,
+	0xe0fcd0fc,
+	0x02b921f5,
+/* 0x0532: memx_info */
+	0xc7f100f8,
+	0xb7f10354,
+	0x21f50800,
+	0x00f802b9,
+/* 0x0540: memx_recv */
+	0xf401d6b0,
+	0xd6b0c40b,
+	0xe90bf400,
+/* 0x054e: memx_init */
+	0x00f800f8,
+/* 0x0550: perf_recv */
+/* 0x0552: perf_init */
+	0x00f800f8,
+/* 0x0554: test_recv */
+	0x05d817f1,
+	0xcf0614b6,
+	0x10b60011,
+	0xd807f101,
+	0x0604b605,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0xf521f513,
+/* 0x057b: test_init */
+	0xf100f801,
+	0xf50800e7,
+	0xf801f521,
+/* 0x0585: idle_recv */
+/* 0x0587: idle */
+	0xf400f800,
+	0x17f10031,
+	0x14b605d4,
+	0x0011cf06,
+	0xf10110b6,
+	0xb605d407,
+	0x01d00604,
+/* 0x05a3: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x05a9: idle_proc */
+/* 0x05a9: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02c2,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x05bd: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00bb0ef4,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
new file mode 100644
index 0000000..32d65ea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF119
+
+//#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #nvd0_pwr_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #nvd0_pwr_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
new file mode 100644
index 0000000..ce65e2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -0,0 +1,1229 @@
+uint32_t nvd0_pwr_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x000003be,
+	0x00000367,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x000004c4,
+	0x000004b6,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x000004c8,
+	0x000004c6,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x000004eb,
+	0x000004ca,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x000004f7,
+	0x000004f5,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0210: proc_list_tail */
+/* 0x0210: time_prev */
+	0x00000000,
+/* 0x0214: time_next */
+	0x00000000,
+/* 0x0218: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0298: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0318: memx_func_head */
+	0x00010000,
+	0x00000000,
+	0x000003f4,
+/* 0x0324: memx_func_next */
+	0x00000001,
+	0x00000000,
+	0x00000415,
+	0x00000002,
+	0x00000002,
+	0x00000430,
+	0x00040003,
+	0x00000000,
+	0x00000458,
+	0x00010004,
+	0x00000000,
+	0x00000472,
+/* 0x0354: memx_func_tail */
+/* 0x0354: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0b54: memx_data_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t nvd0_pwr_code[] = {
+	0x02bf0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xbd000ed0,
+	0x01e7f004,
+	0xf101e3f0,
+	0xd007ac07,
+	0x04bd000e,
+/* 0x001c: rd32_wait */
+	0x07ace7f1,
+	0xf100eecf,
+	0xf47000e4,
+	0xd7f1f51b,
+	0xddcf07a4,
+/* 0x0033: wr32 */
+	0xf100f800,
+	0xd007a007,
+	0x04bd000e,
+	0x07a407f1,
+	0xbd000dd0,
+	0x02e7f004,
+	0xf0f0e5f0,
+	0x07f101e3,
+	0x0ed007ac,
+/* 0x0057: wr32_wait */
+	0xf104bd00,
+	0xcf07ace7,
+	0xe4f100ee,
+	0x1bf47000,
+/* 0x0067: nsec */
+	0xf000f8f5,
+	0x88cf2c87,
+/* 0x006d: nsec_loop */
+	0x2c97f000,
+	0xbb0099cf,
+	0x9eb80298,
+	0xf41ef406,
+/* 0x007e: wait */
+	0x87f000f8,
+	0x0088cf2c,
+/* 0x0084: wait_loop */
+	0xf402eeb9,
+	0xdab90421,
+	0x04adfd02,
+	0xf406acb8,
+	0x97f0120b,
+	0x0099cf2c,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00a5: wait_done */
+/* 0x00a7: intr_watchdog */
+	0x9800f8e2,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb840a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x020621f5,
+	0x0ef494bd,
+/* 0x00c5: intr_watchdog_next_time */
+	0x850a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00d4: intr_watchdog_next_time_set */
+/* 0x00d7: intr_watchdog_next_proc */
+	0x80850980,
+	0xe0b603e9,
+	0x10e6b158,
+	0xc61bf402,
+/* 0x00e6: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x88cf05d0,
+	0x0180b600,
+	0x05d007f1,
+	0xbd0008d0,
+	0x0887f004,
+	0xc40088cf,
+	0x0bf40289,
+	0x85008020,
+	0xf458e7f0,
+	0x0998a721,
+	0x0096b085,
+	0xf00e0bf4,
+	0x09d03407,
+	0x8004bd00,
+/* 0x013e: intr_skip_watchdog */
+	0x89e48409,
+	0x0bf40800,
+	0x8897f13c,
+	0x0099cf06,
+	0xf4029ac4,
+	0xc7f1260b,
+	0xcccf04c0,
+	0xf1c0f900,
+	0xf14f48e7,
+	0xf05453e3,
+	0x21f500d7,
+	0xc0fc026b,
+	0x04c007f1,
+	0xbd000cd0,
+/* 0x0175: intr_subintr_skip_fifo */
+	0x8807f104,
+	0x0009d006,
+/* 0x017e: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x0188: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x0192: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0008d004,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x01b6: timer */
+	0x32f401f8,
+	0x03f89810,
+	0xf40086b0,
+	0xfe80421c,
+	0x3807f003,
+	0xbd0008d0,
+	0x0887f004,
+	0xf00088cf,
+	0x1bf40284,
+	0x3487f020,
+	0xb80088cf,
+	0x0bf406e0,
+	0x06e8b809,
+/* 0x01eb: timer_reset */
+	0xf0191ef4,
+	0x0ed03407,
+	0x8004bd00,
+/* 0x01f6: timer_enable */
+	0x87f0840e,
+	0x3807f001,
+	0xbd0008d0,
+/* 0x0201: timer_done */
+	0x1031f404,
+/* 0x0206: send_proc */
+	0x80f900f8,
+	0xe89890f9,
+	0x04e99805,
+	0xb80486f0,
+	0x0bf40689,
+	0x0398c42a,
+	0xb6048894,
+	0x8ebb1880,
+	0x00fa9800,
+	0x80008a80,
+	0x8c80018d,
+	0x038b8002,
+	0xf00190b6,
+	0xe9800794,
+	0x0231f404,
+/* 0x0240: send_done */
+	0x80fc90fc,
+/* 0x0246: find */
+	0x80f900f8,
+	0xf45887f0,
+/* 0x024e: find_loop */
+	0x8a980131,
+	0x06aeb800,
+	0xb6100bf4,
+	0x86b15880,
+	0x1bf40210,
+	0x0132f4f0,
+/* 0x0264: find_done */
+	0xfc028eb9,
+/* 0x026b: send */
+	0xf500f880,
+	0xf4024621,
+	0x00f89701,
+/* 0x0274: recv */
+	0x9805e898,
+	0x32f404e9,
+	0x0689b801,
+	0xc43d0bf4,
+	0x80b60389,
+	0x0784f001,
+	0x9805e880,
+	0xf0f902ea,
+	0xf9018ffe,
+	0x02efb9f0,
+	0xbb049994,
+	0xe0b600e9,
+	0x03eb9818,
+	0x9802ec98,
+	0xee9801ed,
+	0xfca5f900,
+	0x00f8fef0,
+	0xfc0131f4,
+/* 0x02bd: recv_done */
+/* 0x02bf: init */
+	0xf100f8f0,
+	0xcf010817,
+	0x11e70011,
+	0x14b60109,
+	0x0014fe08,
+	0x00e017f1,
+	0xf00013f0,
+	0x01d01c07,
+	0xf004bd00,
+	0x07f0ff17,
+	0x0001d014,
+	0x17f004bd,
+	0x0015f102,
+	0x1007f008,
+	0xbd0001d0,
+	0xe617f104,
+	0x0013f000,
+	0xf40010fe,
+	0x17f01031,
+	0x3807f001,
+	0xbd0001d0,
+	0x58f7f004,
+/* 0x0314: init_proc */
+	0xb001f198,
+	0x0bf40016,
+	0xb615f9fa,
+	0x0ef458f0,
+/* 0x0325: host_send */
+	0xb017f1f2,
+	0x0011cf04,
+	0x04a027f1,
+	0xb80022cf,
+	0x0bf40612,
+	0x071ec42f,
+	0xb704ee94,
+	0x980218e0,
+	0xec9803eb,
+	0x01ed9802,
+	0xf500ee98,
+	0xb6026b21,
+	0x1ec40110,
+	0xb007f10f,
+	0x0001d004,
+	0x0ef404bd,
+/* 0x0365: host_send_done */
+/* 0x0367: host_recv */
+	0xf100f8c3,
+	0xf14e4917,
+	0xb8525413,
+	0x0bf406e1,
+/* 0x0375: host_recv_wait */
+	0xcc17f1b3,
+	0x0011cf04,
+	0x04c827f1,
+	0xf00022cf,
+	0x12b80816,
+	0xec0bf406,
+	0xb60723c4,
+	0x30b70434,
+	0x3b800298,
+	0x023c8003,
+	0x80013d80,
+	0x20b6003e,
+	0x0f24f001,
+	0x04c807f1,
+	0xbd0002d0,
+	0x4027f004,
+	0xd00007f0,
+	0x04bd0002,
+/* 0x03be: host_init */
+	0x17f100f8,
+	0x14b60080,
+	0x1815f110,
+	0xd007f102,
+	0x0001d004,
+	0x17f104bd,
+	0x14b60080,
+	0x9815f110,
+	0xdc07f102,
+	0x0001d004,
+	0x17f004bd,
+	0xc407f101,
+	0x0001d004,
+	0x00f804bd,
+/* 0x03f4: memx_func_enter */
+	0xf10467f0,
+	0xd007e007,
+	0x04bd0006,
+/* 0x0400: memx_func_enter_wait */
+	0x07c067f1,
+	0xf00066cf,
+	0x0bf40464,
+	0x001698f6,
+	0xf80410b6,
+/* 0x0415: memx_func_leave */
+	0x0467f000,
+	0x07e407f1,
+	0xbd0006d0,
+/* 0x0421: memx_func_leave_wait */
+	0xc067f104,
+	0x0066cf07,
+	0xf40464f0,
+	0x00f8f61b,
+/* 0x0430: memx_func_wr32 */
+	0x98001698,
+	0x10b60115,
+	0xf960f908,
+	0xfcd0fc50,
+	0x3321f4e0,
+	0x140003f1,
+	0x800506fd,
+	0x04bd0005,
+	0xf40242b6,
+	0x00f8dd1b,
+/* 0x0458: memx_func_wait */
+	0xcf2c87f0,
+	0x1e980088,
+	0x011d9800,
+	0x98021c98,
+	0x10b6031b,
+	0x7e21f410,
+/* 0x0472: memx_func_delay */
+	0x1e9800f8,
+	0x0410b600,
+	0xf86721f4,
+/* 0x047d: memx_exec */
+	0xf9e0f900,
+	0x02c1b9d0,
+/* 0x0487: memx_exec_next */
+	0x9802b2b9,
+	0x10b60013,
+	0x10349504,
+	0x980c30f0,
+	0x55f9c835,
+	0xf40612b8,
+	0xd0fcec1e,
+	0x21f5e0fc,
+	0x00f8026b,
+/* 0x04a8: memx_info */
+	0x0354c7f1,
+	0x0800b7f1,
+	0x026b21f5,
+/* 0x04b6: memx_recv */
+	0xd6b000f8,
+	0xc40bf401,
+	0xf400d6b0,
+	0x00f8e90b,
+/* 0x04c4: memx_init */
+/* 0x04c6: perf_recv */
+	0x00f800f8,
+/* 0x04c8: perf_init */
+/* 0x04ca: test_recv */
+	0x17f100f8,
+	0x11cf05d8,
+	0x0110b600,
+	0x05d807f1,
+	0xbd0001d0,
+	0x00e7f104,
+	0x4fe3f1d9,
+	0xb621f513,
+/* 0x04eb: test_init */
+	0xf100f801,
+	0xf50800e7,
+	0xf801b621,
+/* 0x04f5: idle_recv */
+/* 0x04f7: idle */
+	0xf400f800,
+	0x17f10031,
+	0x11cf05d4,
+	0x0110b600,
+	0x05d407f1,
+	0xbd0001d0,
+/* 0x050d: idle_loop */
+	0x5817f004,
+/* 0x0513: idle_proc */
+/* 0x0513: idle_proc_exec */
+	0xf90232f4,
+	0x021eb910,
+	0x027421f5,
+	0x11f410fc,
+	0x0231f409,
+/* 0x0527: idle_proc_next */
+	0xb6ef0ef4,
+	0x1fb85810,
+	0xe61bf406,
+	0xf4dd02f4,
+	0x0ef40028,
+	0x000000c1,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
new file mode 100644
index 0000000..5fb0ccc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -0,0 +1,27 @@
+#ifndef __NVKM_PWR_OS_H__
+#define __NVKM_PWR_OS_H__
+
+/* Process names */
+#define PROC_KERN 0x52544e49
+#define PROC_IDLE 0x454c4449
+#define PROC_HOST 0x54534f48
+#define PROC_MEMX 0x584d454d
+#define PROC_PERF 0x46524550
+#define PROC_TEST 0x54534554
+
+/* KERN: message identifiers */
+#define KMSG_FIFO   0x00000000
+#define KMSG_ALARM  0x00000001
+
+/* MEMX: message identifiers */
+#define MEMX_MSG_INFO 0
+#define MEMX_MSG_EXEC 1
+
+/* MEMX: script opcode definitions */
+#define MEMX_ENTER  0
+#define MEMX_LEAVE  1
+#define MEMX_WR32   2
+#define MEMX_WAIT   3
+#define MEMX_DELAY  4
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
new file mode 100644
index 0000000..38eadf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/perf.fuc
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_PERF, #perf_init, #perf_recv)
+#endif
+
+/******************************************************************************
+ * PERF data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * PERF code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+
+// description
+//
+// $r15 - current (perf)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+perf_recv:
+	ret
+
+// description
+//
+// $r15 - current (perf)
+// $r0  - zero
+perf_init:
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
new file mode 100644
index 0000000..0c3a71b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/test.fuc
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef INCLUDE_PROC
+process(PROC_TEST, #test_init, #test_recv)
+#endif
+
+/******************************************************************************
+ * TEST data segment
+ *****************************************************************************/
+#ifdef INCLUDE_DATA
+#endif
+
+/******************************************************************************
+ * TEST code segment
+ *****************************************************************************/
+#ifdef INCLUDE_CODE
+// description
+//
+// $r15 - current (test)
+// $r14 - sender process name
+// $r13 - message
+// $r12 - data0
+// $r11 - data1
+// $r0  - zero
+test_recv:
+	nv_iord($r1, NV_PPWR_DSCRATCH(2))
+	add b32 $r1 1
+	nv_iowr(NV_PPWR_DSCRATCH(2), $r1)
+	mov $r14 -0x2700 /* 0xd900, envyas grrr! */
+	sethi $r14 0x134f0000
+	call(timer)
+	ret
+
+// description
+//
+// $r15 - current (test)
+// $r0  - zero
+test_init:
+	mov $r14 0x800
+	call(timer)
+	ret
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
new file mode 100644
index 0000000..03de310
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -0,0 +1,121 @@
+#ifndef __NVKM_PWR_MEMX_H__
+#define __NVKM_PWR_MEMX_H__
+
+#include <subdev/pwr.h>
+#include <subdev/pwr/fuc/os.h>
+
+struct nouveau_memx {
+	struct nouveau_pwr *ppwr;
+	u32 base;
+	u32 size;
+	struct {
+		u32 mthd;
+		u32 size;
+		u32 data[64];
+	} c;
+};
+
+static void
+memx_out(struct nouveau_memx *memx)
+{
+	struct nouveau_pwr *ppwr = memx->ppwr;
+	int i;
+
+	if (memx->c.size) {
+		nv_wr32(ppwr, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+		for (i = 0; i < memx->c.size; i++)
+			nv_wr32(ppwr, 0x10a1c4, memx->c.data[i]);
+		memx->c.size = 0;
+	}
+}
+
+static void
+memx_cmd(struct nouveau_memx *memx, u32 mthd, u32 size, u32 data[])
+{
+	if ((memx->c.size + size >= ARRAY_SIZE(memx->c.data)) ||
+	    (memx->c.size && memx->c.mthd != mthd))
+		memx_out(memx);
+	memcpy(&memx->c.data[memx->c.size], data, size * sizeof(data[0]));
+	memx->c.size += size;
+	memx->c.mthd  = mthd;
+}
+
+int
+nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
+{
+	struct nouveau_memx *memx;
+	u32 reply[2];
+	int ret;
+
+	ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0);
+	if (ret)
+		return ret;
+
+	memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL);
+	if (!memx)
+		return -ENOMEM;
+	memx->ppwr = ppwr;
+	memx->base = reply[0];
+	memx->size = reply[1];
+
+	/* acquire data segment access */
+	do {
+		nv_wr32(ppwr, 0x10a580, 0x00000003);
+	} while (nv_rd32(ppwr, 0x10a580) != 0x00000003);
+	nv_wr32(ppwr, 0x10a1c0, 0x01000000 | memx->base);
+	nv_wr32(ppwr, 0x10a1c4, 0x00010000 | MEMX_ENTER);
+	nv_wr32(ppwr, 0x10a1c4, 0x00000000);
+	return 0;
+}
+
+int
+nouveau_memx_fini(struct nouveau_memx **pmemx, bool exec)
+{
+	struct nouveau_memx *memx = *pmemx;
+	struct nouveau_pwr *ppwr = memx->ppwr;
+	u32 finish, reply[2];
+
+	/* flush the cache... */
+	memx_out(memx);
+
+	/* release data segment access */
+	nv_wr32(ppwr, 0x10a1c4, 0x00000000 | MEMX_LEAVE);
+	finish = nv_rd32(ppwr, 0x10a1c0) & 0x00ffffff;
+	nv_wr32(ppwr, 0x10a580, 0x00000000);
+
+	/* call MEMX process to execute the script, and wait for reply */
+	if (exec) {
+		ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_EXEC,
+				 memx->base, finish);
+	}
+
+	kfree(memx);
+	return 0;
+}
+
+void
+nouveau_memx_wr32(struct nouveau_memx *memx, u32 addr, u32 data)
+{
+	nv_debug(memx->ppwr, "R[%06x] = 0x%08x\n", addr, data);
+	memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
+}
+
+void
+nouveau_memx_wait(struct nouveau_memx *memx,
+		  u32 addr, u32 mask, u32 data, u32 nsec)
+{
+	nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
+				addr, mask, data, nsec);
+	memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec });
+	memx_out(memx); /* fuc can't handle multiple */
+}
+
+void
+nouveau_memx_nsec(struct nouveau_memx *memx, u32 nsec)
+{
+	nv_debug(memx->ppwr, "    DELAY = %d ns\n", nsec);
+	memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
+	memx_out(memx); /* fuc can't handle multiple */
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
new file mode 100644
index 0000000..52c8541
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nv108.fuc.h"
+
+struct nv108_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv108_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nv108_pwr_code;
+	priv->base.code.size = sizeof(nv108_pwr_code);
+	priv->base.data.data = nv108_pwr_data;
+	priv->base.data.size = sizeof(nv108_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nv108_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv108_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = _nouveau_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
new file mode 100644
index 0000000..c132b7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nva3_pwr_init(struct nouveau_object *object)
+{
+	struct nva3_pwr_priv *priv = (void *)object;
+	nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
+	nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
+	return nouveau_pwr_init(&priv->base);
+}
+
+static int
+nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nva3_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nva3_pwr_code;
+	priv->base.code.size = sizeof(nva3_pwr_code);
+	priv->base.data.data = nva3_pwr_data;
+	priv->base.data.size = sizeof(nva3_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = nva3_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
new file mode 100644
index 0000000..495f685
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nvc0_pwr_code;
+	priv->base.code.size = sizeof(nvc0_pwr_code);
+	priv->base.data.data = nvc0_pwr_data;
+	priv->base.data.size = sizeof(nvc0_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = _nouveau_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
new file mode 100644
index 0000000..043aa14
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/pwr.h>
+
+#include "fuc/nvd0.fuc.h"
+
+struct nvd0_pwr_priv {
+	struct nouveau_pwr base;
+};
+
+static int
+nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvd0_pwr_priv *priv;
+	int ret;
+
+	ret = nouveau_pwr_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.code.data = nvd0_pwr_code;
+	priv->base.code.size = sizeof(nvd0_pwr_code);
+	priv->base.data.data = nvd0_pwr_data;
+	priv->base.data.size = sizeof(nvd0_pwr_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_pwr_oclass = {
+	.handle = NV_SUBDEV(PWR, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_pwr_ctor,
+		.dtor = _nouveau_pwr_dtor,
+		.init = _nouveau_pwr_init,
+		.fini = _nouveau_pwr_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index f1de7a9..21b2b30 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -92,10 +92,11 @@
 	struct nouveau_timer *ptimer = nouveau_timer(therm);
 	struct nouveau_therm_priv *priv = (void *)therm;
 	unsigned long flags;
-	int duty;
+	bool immd = true;
+	bool poll = true;
+	int duty = -1;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	nv_debug(therm, "FAN speed check\n");
 	if (mode < 0)
 		mode = priv->mode;
 	priv->mode = mode;
@@ -106,28 +107,48 @@
 		duty = nouveau_therm_fan_get(therm);
 		if (duty < 0)
 			duty = 100;
+		poll = false;
 		break;
 	case NOUVEAU_THERM_CTRL_AUTO:
-		if (priv->fan->bios.nr_fan_trip)
+		if (priv->fan->bios.nr_fan_trip) {
 			duty = nouveau_therm_update_trip(therm);
-		else
+		} else
+		if (priv->fan->bios.linear_min_temp ||
+		    priv->fan->bios.linear_max_temp) {
 			duty = nouveau_therm_update_linear(therm);
+		} else {
+			duty = priv->cstate;
+			poll = false;
+		}
+		immd = false;
 		break;
 	case NOUVEAU_THERM_CTRL_NONE:
 	default:
 		ptimer->alarm_cancel(ptimer, &priv->alarm);
-		goto done;
+		poll = false;
 	}
 
-	nv_debug(therm, "FAN target request: %d%%\n", duty);
-	nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
-
-done:
-	if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+	if (list_empty(&priv->alarm.head) && poll)
 		ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
-	else if (!list_empty(&priv->alarm.head))
-		nv_debug(therm, "therm fan alarm list is not empty\n");
 	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (duty >= 0) {
+		nv_debug(therm, "FAN target request: %d%%\n", duty);
+		nouveau_therm_fan_set(therm, immd, duty);
+	}
+}
+
+int
+nouveau_therm_cstate(struct nouveau_therm *ptherm, int fan, int dir)
+{
+	struct nouveau_therm_priv *priv = (void *)ptherm;
+	if (!dir || (dir < 0 && fan < priv->cstate) ||
+		    (dir > 0 && fan > priv->cstate)) {
+		nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
+		priv->cstate = fan;
+		nouveau_therm_update(ptherm, -1);
+	}
+	return 0;
 }
 
 static void
@@ -149,14 +170,15 @@
 		"automatic"
 	};
 
-	/* The default PDAEMON ucode interferes with fan management */
+	/* The default PPWR ucode on fermi interferes with fan management */
 	if ((mode >= ARRAY_SIZE(name)) ||
-	    (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+	    (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
+	     !nouveau_subdev(device, NVDEV_SUBDEV_PWR)))
 		return -EINVAL;
 
 	/* do not allow automatic fan management if the thermal sensor is
 	 * not available */
-	if (priv->mode == 2 && therm->temp_get(therm) < 0)
+	if (priv->mode == NOUVEAU_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
 		return -EINVAL;
 
 	if (priv->mode == mode)
@@ -335,7 +357,7 @@
 	nouveau_therm_ic_ctor(therm);
 	nouveau_therm_fan_ctor(therm);
 
-	nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+	nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
 	nouveau_therm_sensor_preinit(therm);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 39f47b9..95f6129 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -185,8 +185,11 @@
 	priv->fan->bios.max_duty = 100;
 	priv->fan->bios.bump_period = 500;
 	priv->fan->bios.slow_down_period = 2000;
+/*XXX: talk to mupuf */
+#if 0
 	priv->fan->bios.linear_min_temp = 40;
 	priv->fan->bios.linear_max_temp = 85;
+#endif
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
index e601773..f69dab1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -97,6 +97,13 @@
 {
 	struct nouveau_therm_priv *tpriv = (void *)therm;
 	struct nouveau_fantog_priv *priv;
+	int ret;
+
+	if (therm->pwm_ctrl) {
+		ret = therm->pwm_ctrl(therm, func->line, false);
+		if (ret)
+			return ret;
+	}
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	tpriv->fan = &priv->base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index 8b3adec..13b8500 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -55,28 +55,28 @@
 	return true;
 }
 
-static struct i2c_board_info
+static struct nouveau_i2c_board_info
 nv_board_infos[] = {
-	{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
-	{ I2C_BOARD_INFO("w83781d", 0x2d) },
-	{ I2C_BOARD_INFO("adt7473", 0x2e) },
-	{ I2C_BOARD_INFO("adt7473", 0x2d) },
-	{ I2C_BOARD_INFO("adt7473", 0x2c) },
-	{ I2C_BOARD_INFO("f75375", 0x2e) },
-	{ I2C_BOARD_INFO("lm99", 0x4c) },
-	{ I2C_BOARD_INFO("lm90", 0x4c) },
-	{ I2C_BOARD_INFO("lm90", 0x4d) },
-	{ I2C_BOARD_INFO("adm1021", 0x18) },
-	{ I2C_BOARD_INFO("adm1021", 0x19) },
-	{ I2C_BOARD_INFO("adm1021", 0x1a) },
-	{ I2C_BOARD_INFO("adm1021", 0x29) },
-	{ I2C_BOARD_INFO("adm1021", 0x2a) },
-	{ I2C_BOARD_INFO("adm1021", 0x2b) },
-	{ I2C_BOARD_INFO("adm1021", 0x4c) },
-	{ I2C_BOARD_INFO("adm1021", 0x4d) },
-	{ I2C_BOARD_INFO("adm1021", 0x4e) },
-	{ I2C_BOARD_INFO("lm63", 0x18) },
-	{ I2C_BOARD_INFO("lm63", 0x4e) },
+	{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
+	{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0  },
+	{ { I2C_BOARD_INFO("adt7473", 0x2e) }, 20  },
+	{ { I2C_BOARD_INFO("adt7473", 0x2d) }, 20  },
+	{ { I2C_BOARD_INFO("adt7473", 0x2c) }, 20  },
+	{ { I2C_BOARD_INFO("f75375", 0x2e) }, 0  },
+	{ { I2C_BOARD_INFO("lm99", 0x4c) }, 0  },
+	{ { I2C_BOARD_INFO("lm90", 0x4c) }, 0  },
+	{ { I2C_BOARD_INFO("lm90", 0x4d) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x18) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x19) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x1a) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x29) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x2a) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x2b) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x4c) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x4d) }, 0  },
+	{ { I2C_BOARD_INFO("adm1021", 0x4e) }, 0  },
+	{ { I2C_BOARD_INFO("lm63", 0x18) }, 0  },
+	{ { I2C_BOARD_INFO("lm63", 0x4e) }, 0  },
 	{ }
 };
 
@@ -89,9 +89,9 @@
 	struct nvbios_extdev_func extdev_entry;
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
-		struct i2c_board_info board[] = {
-			{ I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
-			{ }
+		struct nouveau_i2c_board_info board[] = {
+		  { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
+		  { }
 		};
 
 		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
@@ -101,9 +101,9 @@
 	}
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
-		struct i2c_board_info board[] = {
-			{ I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
-			{ }
+		struct nouveau_i2c_board_info board[] = {
+		  { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
+		  { }
 		};
 
 		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
index 42ba633..1d15c52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -126,7 +126,7 @@
 
 	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
 
-	intr = nv_rd32(therm, 0x20100);
+	intr = nv_rd32(therm, 0x20100) & 0x3ff;
 
 	/* THRS_4: downclock */
 	if (intr & 0x002) {
@@ -209,6 +209,19 @@
 	return nouveau_therm_preinit(&priv->base.base);
 }
 
+int
+nv84_therm_fini(struct nouveau_object *object, bool suspend)
+{
+	/* Disable PTherm IRQs */
+	nv_wr32(object, 0x20000, 0x00000000);
+
+	/* ACK all PTherm IRQs */
+	nv_wr32(object, 0x20100, 0xffffffff);
+	nv_wr32(object, 0x1100, 0x10000); /* PBUS */
+
+	return _nouveau_therm_fini(object, suspend);
+}
+
 struct nouveau_oclass
 nv84_therm_oclass = {
 	.handle = NV_SUBDEV(THERM, 0x84),
@@ -216,6 +229,6 @@
 		.ctor = nv84_therm_ctor,
 		.dtor = _nouveau_therm_dtor,
 		.init = _nouveau_therm_init,
-		.fini = _nouveau_therm_fini,
+		.fini = nv84_therm_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
index d11a7c4..3b2c458 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -94,6 +94,6 @@
 		.ctor = nva3_therm_ctor,
 		.dtor = _nouveau_therm_dtor,
 		.init = nva3_therm_init,
-		.fini = _nouveau_therm_fini,
+		.fini = nv84_therm_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 54c28bd..4dd4f81 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -148,6 +148,6 @@
 		.ctor = nvd0_therm_ctor,
 		.dtor = _nouveau_therm_dtor,
 		.init = nvd0_therm_init,
-		.fini = _nouveau_therm_fini,
+		.fini = nv84_therm_fini,
 	},
 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index dd38529..96f8f95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -76,6 +76,7 @@
 	spinlock_t lock;
 	struct nouveau_therm_trip_point *last_trip;
 	int mode;
+	int cstate;
 	int suspend;
 
 	/* bios */
@@ -144,6 +145,7 @@
 int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
 int nv50_fan_pwm_clock(struct nouveau_therm *);
 int nv84_temp_get(struct nouveau_therm *therm);
+int nv84_therm_fini(struct nouveau_object *object, bool suspend);
 
 int nva3_therm_fan_sense(struct nouveau_therm *);
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index b80a330..cfde9eb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,8 +180,6 @@
 
 	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
 
-	nv_debug(therm, "polling the internal temperature\n");
-
 	nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
 					     NOUVEAU_THERM_THRS_FANBOOST);
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 57711ec..c0bdd10 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -119,16 +119,8 @@
 {
 	struct nv04_timer_priv *priv = (void *)ptimer;
 	unsigned long flags;
-
-	/* avoid deleting an entry while the alarm intr is running */
 	spin_lock_irqsave(&priv->lock, flags);
-
-	/* delete the alarm from the list */
-	list_del(&alarm->head);
-
-	/* reset the head so as list_empty returns 1 */
-	INIT_LIST_HEAD(&alarm->head);
-
+	list_del_init(&alarm->head);
 	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
new file mode 100644
index 0000000..32794a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/vmap.h>
+#include <subdev/bios/volt.h>
+
+static int
+nouveau_volt_get(struct nouveau_volt *volt)
+{
+	if (volt->vid_get) {
+		int ret = volt->vid_get(volt), i;
+		if (ret >= 0) {
+			for (i = 0; i < volt->vid_nr; i++) {
+				if (volt->vid[i].vid == ret)
+					return volt->vid[i].uv;
+			}
+			ret = -EINVAL;
+		}
+		return ret;
+	}
+	return -ENODEV;
+}
+
+static int
+nouveau_volt_set(struct nouveau_volt *volt, u32 uv)
+{
+	if (volt->vid_set) {
+		int i, ret = -EINVAL;
+		for (i = 0; i < volt->vid_nr; i++) {
+			if (volt->vid[i].uv == uv) {
+				ret = volt->vid_set(volt, volt->vid[i].vid);
+				nv_debug(volt, "set %duv: %d\n", uv, ret);
+				break;
+			}
+		}
+		return ret;
+	}
+	return -ENODEV;
+}
+
+static int
+nouveau_volt_map(struct nouveau_volt *volt, u8 id)
+{
+	struct nouveau_bios *bios = nouveau_bios(volt);
+	struct nvbios_vmap_entry info;
+	u8  ver, len;
+	u16 vmap;
+
+	vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info);
+	if (vmap) {
+		if (info.link != 0xff) {
+			int ret = nouveau_volt_map(volt, info.link);
+			if (ret < 0)
+				return ret;
+			info.min += ret;
+		}
+		return info.min;
+	}
+
+	return id ? id * 10000 : -ENODEV;
+}
+
+static int
+nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
+{
+	int ret = nouveau_volt_map(volt, id);
+	if (ret >= 0) {
+		int prev = nouveau_volt_get(volt);
+		if (!condition || prev < 0 ||
+		    (condition < 0 && ret < prev) ||
+		    (condition > 0 && ret > prev)) {
+			ret = nouveau_volt_set(volt, ret);
+		} else {
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+int
+_nouveau_volt_init(struct nouveau_object *object)
+{
+	struct nouveau_volt *volt = (void *)object;
+	int ret;
+
+	ret = nouveau_subdev_init(&volt->base);
+	if (ret)
+		return ret;
+
+	ret = volt->get(volt);
+	if (ret < 0) {
+		if (ret != -ENODEV)
+			nv_debug(volt, "current voltage unknown\n");
+		return 0;
+	}
+
+	nv_info(volt, "GPU voltage: %duv\n", ret);
+	return 0;
+}
+
+void
+_nouveau_volt_dtor(struct nouveau_object *object)
+{
+	struct nouveau_volt *volt = (void *)object;
+	nouveau_subdev_destroy(&volt->base);
+}
+
+int
+nouveau_volt_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nouveau_volt *volt;
+	struct nvbios_volt_entry ivid;
+	struct nvbios_volt info;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+	int ret, i;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
+				     "voltage", length, pobject);
+	volt = *pobject;
+	if (ret)
+		return ret;
+
+	volt->get = nouveau_volt_get;
+	volt->set = nouveau_volt_set;
+	volt->set_id = nouveau_volt_set_id;
+
+	data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
+	if (data && info.vidmask && info.base && info.step) {
+		for (i = 0; i < info.vidmask + 1; i++) {
+			if (info.base >= info.min &&
+			    info.base <= info.max) {
+				volt->vid[volt->vid_nr].uv = info.base;
+				volt->vid[volt->vid_nr].vid = i;
+				volt->vid_nr++;
+			}
+			info.base += info.step;
+		}
+		volt->vid_mask = info.vidmask;
+	} else
+	if (data && info.vidmask) {
+		for (i = 0; i < cnt; i++) {
+			data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
+						      &ivid);
+			if (data) {
+				volt->vid[volt->vid_nr].uv = ivid.voltage;
+				volt->vid[volt->vid_nr].vid = ivid.vid;
+				volt->vid_nr++;
+			}
+		}
+		volt->vid_mask = info.vidmask;
+	}
+
+	if (volt->vid_nr) {
+		for (i = 0; i < volt->vid_nr; i++) {
+			nv_debug(volt, "VID %02x: %duv\n",
+				 volt->vid[i].vid, volt->vid[i].uv);
+		}
+
+		/*XXX: this is an assumption.. there probably exists boards
+		 * out there with i2c-connected voltage controllers too..
+		 */
+		ret = nouveau_voltgpio_init(volt);
+		if (ret == 0) {
+			volt->vid_get = nouveau_voltgpio_get;
+			volt->vid_set = nouveau_voltgpio_set;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
new file mode 100644
index 0000000..755fa91
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gpio.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+#include <subdev/gpio.h>
+#include <subdev/bios/gpio.h>
+
+static const u8 tags[] = {
+	DCB_GPIO_VID0, DCB_GPIO_VID1, DCB_GPIO_VID2, DCB_GPIO_VID3,
+	DCB_GPIO_VID4, DCB_GPIO_VID5, DCB_GPIO_VID6, DCB_GPIO_VID7,
+};
+
+int
+nouveau_voltgpio_get(struct nouveau_volt *volt)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(volt);
+	u8 vid = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tags); i++) {
+		if (volt->vid_mask & (1 << i)) {
+			int ret = gpio->get(gpio, 0, tags[i], 0xff);
+			if (ret < 0)
+				return ret;
+			vid |= ret << i;
+		}
+	}
+
+	return vid;
+}
+
+int
+nouveau_voltgpio_set(struct nouveau_volt *volt, u8 vid)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(volt);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
+		if (volt->vid_mask & (1 << i)) {
+			int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_voltgpio_init(struct nouveau_volt *volt)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(volt);
+	struct dcb_gpio_func func;
+	int i;
+
+	/* check we have gpio function info for each vid bit.  on some
+	 * boards (ie. nvs295) the vid mask has more bits than there
+	 * are valid gpio functions... from traces, nvidia appear to
+	 * just touch the existing ones, so let's mask off the invalid
+	 * bits and continue with life
+	 */
+	for (i = 0; i < ARRAY_SIZE(tags); i++) {
+		if (volt->vid_mask & (1 << i)) {
+			int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+			if (ret) {
+				if (ret != -ENOENT)
+					return ret;
+				nv_debug(volt, "VID bit %d has no GPIO\n", i);
+				volt->vid_mask &= ~(1 << i);
+			}
+		}
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
new file mode 100644
index 0000000..87d5358
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/nv40.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/volt.h>
+
+struct nv40_volt_priv {
+	struct nouveau_volt base;
+};
+
+static int
+nv40_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv40_volt_priv *priv;
+	int ret;
+
+	ret = nouveau_volt_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_volt_oclass = {
+	.handle = NV_SUBDEV(VOLT, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_volt_ctor,
+		.dtor = _nouveau_volt_dtor,
+		.init = _nouveau_volt_init,
+		.fini = _nouveau_volt_fini,
+	},
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/Makefile b/drivers/gpu/drm/nouveau/dispnv04/Makefile
index ea3f5b8..424a489 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/Makefile
+++ b/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -5,6 +5,7 @@
 nouveau-y += dispnv04/dfp.o
 nouveau-y += dispnv04/disp.o
 nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/overlay.o
 nouveau-y += dispnv04/tvmodesnv17.o
 nouveau-y += dispnv04/tvnv04.o
 nouveau-y += dispnv04/tvnv17.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2e70462..2a15b98 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -210,8 +210,8 @@
 	sim_data.nvclk_khz = NVClk;
 	sim_data.bpp = bpp;
 	sim_data.two_heads = nv_two_heads(dev);
-	if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
-	    (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+	if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+	    (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
 		uint32_t type;
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@
 
 	if (nv_device(drm->device)->card_type < NV_20)
 		nv04_update_arb(dev, vclk, bpp, burst, lwm);
-	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
-		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+	else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+		 (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
 		*burst = 128;
 		*lwm = 0x0480;
 	} else
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11..0e3270c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@
 			regp->MiscOutReg = 0x23;	/* +hsync +vsync */
 	}
 
-	regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
-
 	/*
 	 * Time Sequencer
 	 */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 93dd23ff..936a71c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -490,10 +490,10 @@
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
 	 */
-	if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
-	    dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
+	if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
+	    dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
 			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
 		} else {
 			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
@@ -625,13 +625,15 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
 	struct nouveau_i2c_port *port = i2c->find(i2c, 2);
-	struct i2c_board_info info[] = {
+	struct nouveau_i2c_board_info info[] = {
 		{
-			.type = "sil164",
-			.addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
-			.platform_data = &(struct sil164_encoder_params) {
-				SIL164_INPUT_EDGE_RISING
-			}
+		    {
+		        .type = "sil164",
+		        .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
+		        .platform_data = &(struct sil164_encoder_params) {
+		            SIL164_INPUT_EDGE_RISING
+		         }
+		    }, 0
 		},
 		{ }
 	};
@@ -646,7 +648,7 @@
 		return;
 
 	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-			     &port->adapter, &info[type]);
+			     &port->adapter, &info[type].dev);
 }
 
 static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4908d3f..b13ff0f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -140,6 +140,8 @@
 		func->save(encoder);
 	}
 
+	nouveau_overlay_init(dev);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 9928187..56a28db 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -123,11 +123,14 @@
 /* nv17_tv.c */
 int nv17_tv_create(struct drm_connector *, struct dcb_output *);
 
+/* overlay.c */
+void nouveau_overlay_init(struct drm_device *dev);
+
 static inline bool
 nv_two_heads(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	const int impl = dev->pci_device & 0x0ff0;
+	const int impl = dev->pdev->device & 0x0ff0;
 
 	if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
 	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +142,14 @@
 static inline bool
 nv_gf4_disp_arch(struct drm_device *dev)
 {
-	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+	return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
 }
 
 static inline bool
 nv_two_reg_pll(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	const int impl = dev->pci_device & 0x0ff0;
+	const int impl = dev->pdev->device & 0x0ff0;
 
 	if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
 		return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 973056b..aca76af 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,6 +27,7 @@
 #include "hw.h"
 
 #include <subdev/bios/pll.h>
+#include <subdev/fb.h>
 #include <subdev/clock.h>
 #include <subdev/timer.h>
 
@@ -220,7 +221,7 @@
 	int ret;
 
 	if (plltype == PLL_MEMORY &&
-	    (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+	    (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
 		uint32_t mpllP;
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +231,7 @@
 		return 400000 / mpllP;
 	} else
 	if (plltype == PLL_MEMORY &&
-	    (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+	    (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
 		uint32_t clock;
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
@@ -664,6 +665,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_device *device = nv_device(drm->device);
 	struct nouveau_timer *ptimer = nouveau_timer(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t reg900;
 	int i;
@@ -680,10 +682,10 @@
 		nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
 		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
 		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
-		nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
-		nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
-		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
-		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
+		nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
 		nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -740,7 +742,7 @@
 	}
 	/* NV11 and NV20 stop at 0x52. */
 	if (nv_gf4_disp_arch(dev)) {
-		if (nv_device(drm->device)->card_type == NV_10) {
+		if (nv_device(drm->device)->card_type < NV_20) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
 			nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
new file mode 100644
index 0000000..3618ac6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2013 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Implementation based on the pre-KMS implementation in xf86-video-nouveau,
+ * written by Arthur Huillet.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+
+#include "nouveau_drm.h"
+
+#include "nouveau_bo.h"
+#include "nouveau_connector.h"
+#include "nouveau_display.h"
+#include "nvreg.h"
+
+
+struct nouveau_plane {
+	struct drm_plane base;
+	bool flip;
+	struct nouveau_bo *cur;
+
+	struct {
+		struct drm_property *colorkey;
+		struct drm_property *contrast;
+		struct drm_property *brightness;
+		struct drm_property *hue;
+		struct drm_property *saturation;
+		struct drm_property *iturbt_709;
+	} props;
+
+	int colorkey;
+	int contrast;
+	int brightness;
+	int hue;
+	int saturation;
+	int iturbt_709;
+};
+
+static uint32_t formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_UYVY,
+};
+
+/* Sine can be approximated with
+ * http://en.wikipedia.org/wiki/Bhaskara_I's_sine_approximation_formula
+ * sin(x degrees) ~= 4 x (180 - x) / (40500 - x (180 - x) )
+ * Note that this only works for the range [0, 180].
+ * Also note that sin(x) == -sin(x - 180)
+ */
+static inline int
+sin_mul(int degrees, int factor)
+{
+	if (degrees > 180) {
+		degrees -= 180;
+		factor *= -1;
+	}
+	return factor * 4 * degrees * (180 - degrees) /
+		(40500 - degrees * (180 - degrees));
+}
+
+/* cos(x) = sin(x + 90) */
+static inline int
+cos_mul(int degrees, int factor)
+{
+	return sin_mul((degrees + 90) % 360, factor);
+}
+
+static int
+nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		  unsigned int crtc_w, unsigned int crtc_h,
+		  uint32_t src_x, uint32_t src_y,
+		  uint32_t src_w, uint32_t src_h)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->dev);
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_bo *cur = nv_plane->cur;
+	bool flip = nv_plane->flip;
+	int format = ALIGN(src_w * 4, 0x100);
+	int soff = NV_PCRTC0_SIZE * nv_crtc->index;
+	int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
+	int ret;
+
+	if (format > 0xffff)
+		return -EINVAL;
+
+	ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	nv_plane->cur = nv_fb->nvbo;
+
+	/* Source parameters given in 16.16 fixed point, ignore fractional. */
+	src_x = src_x >> 16;
+	src_y = src_y >> 16;
+	src_w = src_w >> 16;
+	src_h = src_h >> 16;
+
+	nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
+	nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
+
+	nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
+	nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+	nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
+	nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
+	nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
+	nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
+	nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
+	nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
+
+	if (fb->pixel_format == DRM_FORMAT_NV12) {
+		format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
+		format |= NV_PVIDEO_FORMAT_PLANAR;
+	}
+	if (nv_plane->iturbt_709)
+		format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
+	if (nv_plane->colorkey & (1 << 24))
+		format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
+
+	if (fb->pixel_format == DRM_FORMAT_NV12) {
+		nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
+		nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
+			nv_fb->nvbo->bo.offset + fb->offsets[1]);
+	}
+	nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+	nv_wr32(dev, NV_PVIDEO_STOP, 0);
+	/* TODO: wait for vblank? */
+	nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
+	nv_plane->flip = !flip;
+
+	if (cur)
+		nouveau_bo_unpin(cur);
+
+	return 0;
+}
+
+static int
+nv10_disable_plane(struct drm_plane *plane)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->dev);
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+	nv_wr32(dev, NV_PVIDEO_STOP, 1);
+	if (nv_plane->cur) {
+		nouveau_bo_unpin(nv_plane->cur);
+		nv_plane->cur = NULL;
+	}
+
+	return 0;
+}
+
+static void
+nv10_destroy_plane(struct drm_plane *plane)
+{
+	nv10_disable_plane(plane);
+	drm_plane_cleanup(plane);
+	kfree(plane);
+}
+
+static void
+nv10_set_params(struct nouveau_plane *plane)
+{
+	struct nouveau_device *dev = nouveau_dev(plane->base.dev);
+	u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
+	u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
+		(cos_mul(plane->hue, plane->saturation) & 0xffff);
+	u32 format = 0;
+
+	nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
+	nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
+	nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
+	nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
+	nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
+
+	if (plane->cur) {
+		if (plane->iturbt_709)
+			format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
+		if (plane->colorkey & (1 << 24))
+			format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
+		nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
+			NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
+			NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
+			format);
+	}
+}
+
+static int
+nv10_set_property(struct drm_plane *plane,
+		  struct drm_property *property,
+		  uint64_t value)
+{
+	struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
+
+	if (property == nv_plane->props.colorkey)
+		nv_plane->colorkey = value;
+	else if (property == nv_plane->props.contrast)
+		nv_plane->contrast = value;
+	else if (property == nv_plane->props.brightness)
+		nv_plane->brightness = value;
+	else if (property == nv_plane->props.hue)
+		nv_plane->hue = value;
+	else if (property == nv_plane->props.saturation)
+		nv_plane->saturation = value;
+	else if (property == nv_plane->props.iturbt_709)
+		nv_plane->iturbt_709 = value;
+	else
+		return -EINVAL;
+
+	nv10_set_params(nv_plane);
+	return 0;
+}
+
+static const struct drm_plane_funcs nv10_plane_funcs = {
+	.update_plane = nv10_update_plane,
+	.disable_plane = nv10_disable_plane,
+	.set_property = nv10_set_property,
+	.destroy = nv10_destroy_plane,
+};
+
+static void
+nv10_overlay_init(struct drm_device *device)
+{
+	struct nouveau_device *dev = nouveau_dev(device);
+	struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
+	int ret;
+
+	if (!plane)
+		return;
+
+	ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
+			     &nv10_plane_funcs,
+			     formats, ARRAY_SIZE(formats), false);
+	if (ret)
+		goto err;
+
+	/* Set up the plane properties */
+	plane->props.colorkey = drm_property_create_range(
+			device, 0, "colorkey", 0, 0x01ffffff);
+	plane->props.contrast = drm_property_create_range(
+			device, 0, "contrast", 0, 8192 - 1);
+	plane->props.brightness = drm_property_create_range(
+			device, 0, "brightness", 0, 1024);
+	plane->props.hue = drm_property_create_range(
+			device, 0, "hue", 0, 359);
+	plane->props.saturation = drm_property_create_range(
+			device, 0, "saturation", 0, 8192 - 1);
+	plane->props.iturbt_709 = drm_property_create_range(
+			device, 0, "iturbt_709", 0, 1);
+	if (!plane->props.colorkey ||
+	    !plane->props.contrast ||
+	    !plane->props.brightness ||
+	    !plane->props.hue ||
+	    !plane->props.saturation ||
+	    !plane->props.iturbt_709)
+		goto cleanup;
+
+	plane->colorkey = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.colorkey, plane->colorkey);
+
+	plane->contrast = 0x1000;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.contrast, plane->contrast);
+
+	plane->brightness = 512;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.brightness, plane->brightness);
+
+	plane->hue = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.hue, plane->hue);
+
+	plane->saturation = 0x1000;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.saturation, plane->saturation);
+
+	plane->iturbt_709 = 0;
+	drm_object_attach_property(&plane->base.base,
+				   plane->props.iturbt_709, plane->iturbt_709);
+
+	nv10_set_params(plane);
+	nv_wr32(dev, NV_PVIDEO_STOP, 1);
+	return;
+cleanup:
+	drm_plane_cleanup(&plane->base);
+err:
+	kfree(plane);
+	nv_error(dev, "Failed to create plane\n");
+}
+
+void
+nouveau_overlay_init(struct drm_device *device)
+{
+	struct nouveau_device *dev = nouveau_dev(device);
+	if (dev->chipset >= 0x10 && dev->chipset <= 0x40)
+		nv10_overlay_init(device);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index bf13db4..cc4b208 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -37,15 +37,18 @@
 
 #include <subdev/i2c.h>
 
-static struct i2c_board_info nv04_tv_encoder_info[] = {
+static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
 	{
-		I2C_BOARD_INFO("ch7006", 0x75),
-		.platform_data = &(struct ch7006_encoder_params) {
-			CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
-			0, 0, 0,
-			CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
-			CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
-		}
+		{
+			I2C_BOARD_INFO("ch7006", 0x75),
+			.platform_data = &(struct ch7006_encoder_params) {
+				CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+				0, 0, 0,
+				CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+				CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+			}
+		},
+		0
 	},
 	{ }
 };
@@ -229,7 +232,8 @@
 
 	/* Run the slave-specific initialization */
 	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-				   &port->adapter, &nv04_tv_encoder_info[type]);
+				   &port->adapter,
+				   &nv04_tv_encoder_info[type].dev);
 	if (ret < 0)
 		goto fail_cleanup;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 8f467e7..3621e7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -87,6 +87,7 @@
 	case NV_04:
 		return 0x006e;
 	case NV_10:
+	case NV_11:
 	case NV_20:
 	case NV_30:
 	case NV_40:
@@ -130,7 +131,7 @@
 	if (chan->ntfy) {
 		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
 		nouveau_bo_unpin(chan->ntfy);
-		drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+		drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
 	}
 
 	if (chan->heap.block_size)
@@ -178,10 +179,10 @@
 		getparam->value = device->chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		getparam->value = dev->pci_vendor;
+		getparam->value = dev->pdev->vendor;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		getparam->value = dev->pci_device;
+		getparam->value = dev->pdev->device;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
 		if (drm_pci_device_is_agp(dev))
@@ -320,7 +321,7 @@
 			goto done;
 	}
 
-	ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
+	ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
 				    &init->notifier_handle);
 	if (ret)
 		goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dd7d2e1..f9a2df2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -317,6 +317,16 @@
 			has_optimus = 1;
 	}
 
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
+		vga_count++;
+
+		retval = nouveau_dsm_pci_probe(pdev);
+		if (retval & NOUVEAU_DSM_HAS_MUX)
+			has_dsm |= 1;
+		if (retval & NOUVEAU_DSM_HAS_OPT)
+			has_optimus = 1;
+	}
+
 	/* find the optimus DSM or the old v1 DSM */
 	if (has_optimus == 1) {
 		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 6e7a55f..2953c4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -11,10 +11,28 @@
 static int nouveau_agpmode = -1;
 module_param_named(agpmode, nouveau_agpmode, int, 0400);
 
+struct nouveau_agpmode_quirk {
+	u16 hostbridge_vendor;
+	u16 hostbridge_device;
+	u16 chip_vendor;
+	u16 chip_device;
+	int mode;
+};
+
+static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
+	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+
+	{},
+};
+
 static unsigned long
-get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
+get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
 {
 	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
+	int agpmode = nouveau_agpmode;
+	unsigned long mode = info->mode;
 
 	/*
 	 * FW seems to be broken on nv18, it makes the card lock up
@@ -24,11 +42,27 @@
 		mode &= ~PCI_AGP_COMMAND_FW;
 
 	/*
+	 * Go through the quirks list and adjust the agpmode accordingly.
+	 */
+	while (agpmode == -1 && quirk->hostbridge_vendor) {
+		if (info->id_vendor == quirk->hostbridge_vendor &&
+		    info->id_device == quirk->hostbridge_device &&
+		    device->pdev->vendor == quirk->chip_vendor &&
+		    device->pdev->device == quirk->chip_device) {
+			agpmode = quirk->mode;
+			nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
+				agpmode);
+			break;
+		}
+		++quirk;
+	}
+
+	/*
 	 * AGP mode set in the command line.
 	 */
-	if (nouveau_agpmode > 0) {
+	if (agpmode > 0) {
 		bool agpv3 = mode & 0x8;
-		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+		int rate = agpv3 ? agpmode / 4 : agpmode;
 
 		mode = (mode & ~0x7) | (rate & 0x7);
 	}
@@ -90,7 +124,7 @@
 		if (ret)
 			return;
 
-		mode.mode  = get_agp_mode(drm, info.mode);
+		mode.mode  = get_agp_mode(drm, &info);
 		mode.mode &= ~PCI_AGP_COMMAND_FW;
 
 		ret = drm_agp_enable(dev, mode);
@@ -139,7 +173,7 @@
 	}
 
 	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = get_agp_mode(drm, info.mode);
+	mode.mode = get_agp_mode(drm, &info);
 
 	ret = drm_agp_enable(dev, mode);
 	if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2ffad21..630f6e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -82,7 +82,7 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 31;
-	bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
+	bd = backlight_device_register("nv_backlight", connector->kdev, drm,
 				       &nv40_bl_ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
@@ -204,7 +204,7 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
 	props.max_brightness = 100;
-	bd = backlight_device_register("nv_backlight", &connector->kdev,
+	bd = backlight_device_register("nv_backlight", connector->kdev,
 				       nv_encoder, ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 3e72876..4c3feaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -127,8 +127,8 @@
 #ifdef __powerpc__
 	/* Powerbook specific quirks */
 	if (script == LVDS_RESET &&
-	    (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
-	     dev->pci_device == 0x0329))
+	    (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
+	     dev->pdev->device == 0x0329))
 		nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
 #endif
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 755c38d..949ab0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -146,7 +146,7 @@
 	struct drm_device *dev = drm->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	if (unlikely(nvbo->gem))
+	if (unlikely(nvbo->gem.filp))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 	WARN_ON(nvbo->pin_refcnt > 0);
 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -269,7 +269,8 @@
 	struct nouveau_fb *pfb = nouveau_fb(drm->device);
 	u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
 
-	if (nv_device(drm->device)->card_type == NV_10 &&
+	if ((nv_device(drm->device)->card_type == NV_10 ||
+	     nv_device(drm->device)->card_type == NV_11) &&
 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
@@ -982,7 +983,7 @@
 		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
-	struct nouveau_channel *chan = chan = drm->ttm.chan;
+	struct nouveau_channel *chan = drm->ttm.chan;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	int ret;
@@ -1267,7 +1268,7 @@
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-	return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
+	return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
 }
 
 static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 653dbbb..ff17c1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -27,7 +27,10 @@
 	u32 tile_flags;
 	struct nouveau_drm_tile *tile;
 
-	struct drm_gem_object *gem;
+	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
+	 * gem reference to it! For debugging, use gem.filp != NULL to test
+	 * whether it is valid. */
+	struct drm_gem_object gem;
 
 	/* protect by the ttm reservation lock */
 	int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index c5b36f9..1674882 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -100,6 +100,7 @@
 nouveau_connector_destroy(struct drm_connector *connector)
 {
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	nouveau_event_ref(NULL, &nv_connector->hpd_func);
 	kfree(nv_connector->edid);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
@@ -214,9 +215,10 @@
 	} else {
 		connector->doublescan_allowed = true;
 		if (nv_device(drm->device)->card_type == NV_20 ||
-		   (nv_device(drm->device)->card_type == NV_10 &&
-		    (dev->pci_device & 0x0ff0) != 0x0100 &&
-		    (dev->pci_device & 0x0ff0) != 0x0150))
+		    ((nv_device(drm->device)->card_type == NV_10 ||
+		      nv_device(drm->device)->card_type == NV_11) &&
+		     (dev->pdev->device & 0x0ff0) != 0x0100 &&
+		     (dev->pdev->device & 0x0ff0) != 0x0150))
 			/* HW is broken */
 			connector->interlace_allowed = false;
 		else
@@ -932,10 +934,9 @@
 }
 
 static int
-nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+nouveau_connector_hotplug(void *data, int index)
 {
-	struct nouveau_connector *nv_connector =
-		container_of(event, struct nouveau_connector, hpd_func);
+	struct nouveau_connector *nv_connector = data;
 	schedule_work(&nv_connector->hpd_work);
 	return NVKM_EVENT_KEEP;
 }
@@ -1007,10 +1008,16 @@
 
 		ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
 				 DCB_GPIO_UNUSED, &nv_connector->hpd);
-		nv_connector->hpd_func.func = nouveau_connector_hotplug;
 		if (ret)
 			nv_connector->hpd.func = DCB_GPIO_UNUSED;
 
+		if (nv_connector->hpd.func != DCB_GPIO_UNUSED) {
+			nouveau_event_new(gpio->events, nv_connector->hpd.line,
+					  nouveau_connector_hotplug,
+					  nv_connector,
+					 &nv_connector->hpd_func);
+		}
+
 		nv_connector->type = nv_connector->dcb[0];
 		if (drm_conntype_from_dcb(nv_connector->type) ==
 					  DRM_MODE_CONNECTOR_Unknown) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 6e399aa..264a778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -69,7 +69,7 @@
 
 	struct dcb_gpio_func hpd;
 	struct work_struct hpd_work;
-	struct nouveau_eventh hpd_func;
+	struct nouveau_eventh *hpd_func;
 
 	int dithering_mode;
 	int dithering_depth;
@@ -107,7 +107,4 @@
 struct drm_connector *
 nouveau_connector_create(struct drm_device *, int index);
 
-int
-nouveau_connector_bpp(struct drm_connector *);
-
 #endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7848590..44642d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -38,19 +38,92 @@
 
 #include "nouveau_fence.h"
 
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
 #include <engine/disp.h>
 
 #include <core/class.h>
 
+static int
+nouveau_display_vblank_handler(void *data, int head)
+{
+	struct nouveau_drm *drm = data;
+	drm_handle_vblank(drm->dev, head);
+	return NVKM_EVENT_KEEP;
+}
+
+int
+nouveau_display_vblank_enable(struct drm_device *dev, int head)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	if (disp) {
+		nouveau_event_get(disp->vblank[head]);
+		return 0;
+	}
+	return -EIO;
+}
+
+void
+nouveau_display_vblank_disable(struct drm_device *dev, int head)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	if (disp)
+		nouveau_event_put(disp->vblank[head]);
+}
+
+static void
+nouveau_display_vblank_fini(struct drm_device *dev)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	int i;
+
+	if (disp->vblank) {
+		for (i = 0; i < dev->mode_config.num_crtc; i++)
+			nouveau_event_ref(NULL, &disp->vblank[i]);
+		kfree(disp->vblank);
+		disp->vblank = NULL;
+	}
+
+	drm_vblank_cleanup(dev);
+}
+
+static int
+nouveau_display_vblank_init(struct drm_device *dev)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+	int ret, i;
+
+	disp->vblank = kzalloc(dev->mode_config.num_crtc *
+			       sizeof(*disp->vblank), GFP_KERNEL);
+	if (!disp->vblank)
+		return -ENOMEM;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		ret = nouveau_event_new(pdisp->vblank, i,
+					nouveau_display_vblank_handler,
+					drm, &disp->vblank[i]);
+		if (ret) {
+			nouveau_display_vblank_fini(dev);
+			return ret;
+		}
+	}
+
+	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+	if (ret) {
+		nouveau_display_vblank_fini(dev);
+		return ret;
+	}
+
+	return 0;
+}
+
 static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
 {
 	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
 	if (fb->nvbo)
-		drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+		drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
 
 	drm_framebuffer_cleanup(drm_fb);
 	kfree(fb);
@@ -63,7 +136,7 @@
 {
 	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
-	return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+	return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
 }
 
 static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -227,9 +300,7 @@
 int
 nouveau_display_init(struct drm_device *dev)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_display *disp = nouveau_display(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct drm_connector *connector;
 	int ret;
 
@@ -243,10 +314,7 @@
 	/* enable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
-		if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
-			nouveau_event_get(gpio->events, conn->hpd.line,
-					 &conn->hpd_func);
-		}
+		if (conn->hpd_func) nouveau_event_get(conn->hpd_func);
 	}
 
 	return ret;
@@ -255,18 +323,13 @@
 void
 nouveau_display_fini(struct drm_device *dev)
 {
-	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_display *disp = nouveau_display(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
 	struct drm_connector *connector;
 
 	/* disable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
-		if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
-			nouveau_event_put(gpio->events, conn->hpd.line,
-					 &conn->hpd_func);
-		}
+		if (conn->hpd_func) nouveau_event_put(conn->hpd_func);
 	}
 
 	drm_kms_helper_poll_disable(dev);
@@ -352,7 +415,7 @@
 		goto disp_create_err;
 
 	if (dev->mode_config.num_crtc) {
-		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+		ret = nouveau_display_vblank_init(dev);
 		if (ret)
 			goto vblank_err;
 	}
@@ -374,7 +437,7 @@
 	struct nouveau_display *disp = nouveau_display(dev);
 
 	nouveau_backlight_exit(dev);
-	drm_vblank_cleanup(dev);
+	nouveau_display_vblank_fini(dev);
 
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
@@ -394,7 +457,7 @@
 
 	nouveau_display_fini(dev);
 
-	NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
+	NV_INFO(drm, "unpinning framebuffer(s)...\n");
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_framebuffer *nouveau_fb;
 
@@ -674,8 +737,8 @@
 	if (ret)
 		return ret;
 
-	ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
-	drm_gem_object_unreference_unlocked(bo->gem);
+	ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
+	drm_gem_object_unreference_unlocked(&bo->gem);
 	return ret;
 }
 
@@ -688,7 +751,7 @@
 
 	gem = drm_gem_object_lookup(dev, file_priv, handle);
 	if (gem) {
-		struct nouveau_bo *bo = gem->driver_private;
+		struct nouveau_bo *bo = nouveau_gem_object(gem);
 		*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
 		drm_gem_object_unreference_unlocked(gem);
 		return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 025c66f..8bc8bab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -36,6 +36,8 @@
 	int  (*init)(struct drm_device *);
 	void (*fini)(struct drm_device *);
 
+	struct nouveau_eventh **vblank;
+
 	struct drm_property *dithering_mode;
 	struct drm_property *dithering_depth;
 	struct drm_property *underscan_property;
@@ -59,6 +61,8 @@
 int  nouveau_display_suspend(struct drm_device *dev);
 void nouveau_display_repin(struct drm_device *dev);
 void nouveau_display_resume(struct drm_device *dev);
+int  nouveau_display_vblank_enable(struct drm_device *, int);
+void nouveau_display_vblank_disable(struct drm_device *, int);
 
 int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 			    struct drm_pending_vblank_event *event,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index e893c53..2418b0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -46,7 +46,8 @@
 #include "nouveau_gem.h"
 #include "nouveau_agp.h"
 #include "nouveau_vga.h"
-#include "nouveau_pm.h"
+#include "nouveau_sysfs.h"
+#include "nouveau_hwmon.h"
 #include "nouveau_acpi.h"
 #include "nouveau_bios.h"
 #include "nouveau_ioctl.h"
@@ -78,41 +79,6 @@
 
 static struct drm_driver driver;
 
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
-	struct nouveau_drm *drm =
-		container_of(event, struct nouveau_drm, vblank[head]);
-	drm_handle_vblank(drm->dev, head);
-	return NVKM_EVENT_KEEP;
-}
-
-static int
-nouveau_drm_vblank_enable(struct drm_device *dev, int head)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-
-	if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
-		return -EIO;
-	WARN_ON_ONCE(drm->vblank[head].func);
-	drm->vblank[head].func = nouveau_drm_vblank_handler;
-	nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
-	return 0;
-}
-
-static void
-nouveau_drm_vblank_disable(struct drm_device *dev, int head)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-	if (drm->vblank[head].func)
-		nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
-	else
-		WARN_ON_ONCE(1);
-	drm->vblank[head].func = NULL;
-}
-
 static u64
 nouveau_name(struct pci_dev *pdev)
 {
@@ -177,7 +143,8 @@
 
 	/* initialise synchronisation routines */
 	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
-	else if (device->chipset   <  0x17) ret = nv10_fence_create(drm);
+	else if (device->card_type < NV_11 ||
+		 device->chipset   <  0x17) ret = nv10_fence_create(drm);
 	else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
 	else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
 	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
@@ -418,8 +385,8 @@
 			goto fail_dispinit;
 	}
 
-	nouveau_pm_init(dev);
-
+	nouveau_sysfs_init(dev);
+	nouveau_hwmon_init(dev);
 	nouveau_accel_init(drm);
 	nouveau_fbcon_init(dev);
 
@@ -455,8 +422,8 @@
 	pm_runtime_get_sync(dev->dev);
 	nouveau_fbcon_fini(dev);
 	nouveau_accel_fini(drm);
-
-	nouveau_pm_fini(dev);
+	nouveau_hwmon_fini(dev);
+	nouveau_sysfs_fini(dev);
 
 	if (dev->mode_config.num_crtc)
 		nouveau_display_fini(dev);
@@ -496,16 +463,16 @@
 	int ret;
 
 	if (dev->mode_config.num_crtc) {
-		NV_SUSPEND(drm, "suspending display...\n");
+		NV_INFO(drm, "suspending display...\n");
 		ret = nouveau_display_suspend(dev);
 		if (ret)
 			return ret;
 	}
 
-	NV_SUSPEND(drm, "evicting buffers...\n");
+	NV_INFO(drm, "evicting buffers...\n");
 	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 
-	NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
+	NV_INFO(drm, "waiting for kernel channels to go idle...\n");
 	if (drm->cechan) {
 		ret = nouveau_channel_idle(drm->cechan);
 		if (ret)
@@ -518,7 +485,7 @@
 			return ret;
 	}
 
-	NV_SUSPEND(drm, "suspending client object trees...\n");
+	NV_INFO(drm, "suspending client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->suspend) {
 		if (!nouveau_fence(drm)->suspend(drm))
 			return -ENOMEM;
@@ -530,7 +497,7 @@
 			goto fail_client;
 	}
 
-	NV_SUSPEND(drm, "suspending kernel object tree...\n");
+	NV_INFO(drm, "suspending kernel object tree...\n");
 	ret = nouveau_client_fini(&drm->client.base, true);
 	if (ret)
 		goto fail_client;
@@ -544,7 +511,7 @@
 	}
 
 	if (dev->mode_config.num_crtc) {
-		NV_SUSPEND(drm, "resuming display...\n");
+		NV_INFO(drm, "resuming display...\n");
 		nouveau_display_resume(dev);
 	}
 	return ret;
@@ -563,7 +530,6 @@
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 1);
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	ret = nouveau_do_suspend(drm_dev);
 	if (ret)
 		return ret;
@@ -571,8 +537,6 @@
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, PCI_D3hot);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
-
 	return 0;
 }
 
@@ -582,15 +546,15 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli;
 
-	NV_SUSPEND(drm, "re-enabling device...\n");
+	NV_INFO(drm, "re-enabling device...\n");
 
 	nouveau_agp_reset(drm);
 
-	NV_SUSPEND(drm, "resuming kernel object tree...\n");
+	NV_INFO(drm, "resuming kernel object tree...\n");
 	nouveau_client_init(&drm->client.base);
 	nouveau_agp_init(drm);
 
-	NV_SUSPEND(drm, "resuming client object trees...\n");
+	NV_INFO(drm, "resuming client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
 		nouveau_fence(drm)->resume(drm);
 
@@ -599,10 +563,9 @@
 	}
 
 	nouveau_run_vbios_init(dev);
-	nouveau_pm_resume(dev);
 
 	if (dev->mode_config.num_crtc) {
-		NV_SUSPEND(drm, "resuming display...\n");
+		NV_INFO(drm, "resuming display...\n");
 		nouveau_display_repin(dev);
 	}
 
@@ -626,19 +589,15 @@
 		return ret;
 	pci_set_master(pdev);
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	ret = nouveau_do_resume(drm_dev);
-	if (ret) {
-		nv_suspend_set_printk_level(NV_DBG_DEBUG);
+	if (ret)
 		return ret;
-	}
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 
 	nouveau_fbcon_zfill_all(drm_dev);
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_display_resume(drm_dev);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
 
@@ -648,12 +607,10 @@
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 1);
 
 	ret = nouveau_do_suspend(drm_dev);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return ret;
 }
 
@@ -663,18 +620,14 @@
 	struct drm_device *drm_dev = pci_get_drvdata(pdev);
 	int ret;
 
-	nv_suspend_set_printk_level(NV_DBG_INFO);
 	ret = nouveau_do_resume(drm_dev);
-	if (ret) {
-		nv_suspend_set_printk_level(NV_DBG_DEBUG);
+	if (ret)
 		return ret;
-	}
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_fbcon_set_suspend(drm_dev, 0);
 	nouveau_fbcon_zfill_all(drm_dev);
 	if (drm_dev->mode_config.num_crtc)
 		nouveau_display_resume(drm_dev);
-	nv_suspend_set_printk_level(NV_DBG_DEBUG);
 	return 0;
 }
 
@@ -816,8 +769,8 @@
 #endif
 
 	.get_vblank_counter = drm_vblank_count,
-	.enable_vblank = nouveau_drm_vblank_enable,
-	.disable_vblank = nouveau_drm_vblank_disable,
+	.enable_vblank = nouveau_display_vblank_enable,
+	.disable_vblank = nouveau_display_vblank_disable,
 
 	.ioctls = nouveau_ioctls,
 	.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
@@ -834,7 +787,6 @@
 	.gem_prime_vmap = nouveau_gem_prime_vmap,
 	.gem_prime_vunmap = nouveau_gem_prime_vunmap,
 
-	.gem_init_object = nouveau_gem_object_new,
 	.gem_free_object = nouveau_gem_object_del,
 	.gem_open_object = nouveau_gem_object_open,
 	.gem_close_object = nouveau_gem_object_close,
@@ -879,6 +831,7 @@
 	if (nouveau_runtime_pm == 0)
 		return -EINVAL;
 
+	nv_debug_level(SILENT);
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 	nouveau_switcheroo_optimus_dsm();
@@ -915,6 +868,7 @@
 	nv_mask(device, 0x88488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	nv_debug_level(NORMAL);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 994fd6e..71ed2da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -51,10 +51,11 @@
 };
 
 enum nouveau_drm_handle {
-	NVDRM_CLIENT = 0xffffffff,
-	NVDRM_DEVICE = 0xdddddddd,
-	NVDRM_PUSH   = 0xbbbb0000, /* |= client chid */
-	NVDRM_CHAN   = 0xcccc0000, /* |= client chid */
+	NVDRM_CLIENT  = 0xffffffff,
+	NVDRM_DEVICE  = 0xdddddddd,
+	NVDRM_CONTROL = 0xdddddddc,
+	NVDRM_PUSH    = 0xbbbb0000, /* |= client chid */
+	NVDRM_CHAN    = 0xcccc0000, /* |= client chid */
 };
 
 struct nouveau_cli {
@@ -127,10 +128,10 @@
 	struct nvbios vbios;
 	struct nouveau_display *display;
 	struct backlight_device *backlight;
-	struct nouveau_eventh vblank[4];
 
 	/* power management */
-	struct nouveau_pm *pm;
+	struct nouveau_hwmon *hwmon;
+	struct nouveau_sysfs *sysfs;
 
 	/* display power reference */
 	bool have_disp_power_ref;
@@ -154,7 +155,6 @@
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 
-#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
 #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
 #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
 #define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index a86ecf6..7903e0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -420,7 +420,7 @@
 		nouveau_bo_unmap(nouveau_fb->nvbo);
 		nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
 		nouveau_bo_unpin(nouveau_fb->nvbo);
-		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+		drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
 		nouveau_fb->nvbo = NULL;
 	}
 	drm_fb_helper_fini(&fbcon->helper);
@@ -503,34 +503,45 @@
 	drm->fbcon = NULL;
 }
 
-void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+void
+nouveau_fbcon_save_disable_accel(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
-	drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+	if (drm->fbcon) {
+		drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+		drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+	}
 }
 
-void nouveau_fbcon_restore_accel(struct drm_device *dev)
+void
+nouveau_fbcon_restore_accel(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+	if (drm->fbcon) {
+		drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+	}
 }
 
-void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+void
+nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	console_lock();
-	if (state == 0)
-		nouveau_fbcon_save_disable_accel(dev);
-	fb_set_suspend(drm->fbcon->helper.fbdev, state);
-	if (state == 1)
-		nouveau_fbcon_restore_accel(dev);
-	console_unlock();
+	if (drm->fbcon) {
+		console_lock();
+		if (state == 0)
+			nouveau_fbcon_save_disable_accel(dev);
+		fb_set_suspend(drm->fbcon->helper.fbdev, state);
+		if (state == 1)
+			nouveau_fbcon_restore_accel(dev);
+		console_unlock();
+	}
 }
 
-void nouveau_fbcon_zfill_all(struct drm_device *dev)
+void
+nouveau_fbcon_zfill_all(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	nouveau_fbcon_zfill(dev, drm->fbcon);
+	if (drm->fbcon) {
+		nouveau_fbcon_zfill(dev, drm->fbcon);
+	}
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index be31499..34b8271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -165,17 +165,11 @@
 	return !fence->channel;
 }
 
-struct nouveau_fence_uevent {
-	struct nouveau_eventh handler;
-	struct nouveau_fence_priv *priv;
-};
-
 static int
-nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+nouveau_fence_wait_uevent_handler(void *data, int index)
 {
-	struct nouveau_fence_uevent *uevent =
-		container_of(event, struct nouveau_fence_uevent, handler);
-	wake_up_all(&uevent->priv->waiting);
+	struct nouveau_fence_priv *priv = data;
+	wake_up_all(&priv->waiting);
 	return NVKM_EVENT_KEEP;
 }
 
@@ -186,13 +180,16 @@
 	struct nouveau_channel *chan = fence->channel;
 	struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
 	struct nouveau_fence_priv *priv = chan->drm->fence;
-	struct nouveau_fence_uevent uevent = {
-		.handler.func = nouveau_fence_wait_uevent_handler,
-		.priv = priv,
-	};
+	struct nouveau_eventh *handler;
 	int ret = 0;
 
-	nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+	ret = nouveau_event_new(pfifo->uevent, 0,
+				nouveau_fence_wait_uevent_handler,
+				priv, &handler);
+	if (ret)
+		return ret;
+
+	nouveau_event_get(handler);
 
 	if (fence->timeout) {
 		unsigned long timeout = fence->timeout - jiffies;
@@ -224,7 +221,7 @@
 		}
 	}
 
-	nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+	nouveau_event_ref(NULL, &handler);
 	if (unlikely(ret < 0))
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f32b712..418a617 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -34,29 +34,20 @@
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 
-int
-nouveau_gem_object_new(struct drm_gem_object *gem)
-{
-	return 0;
-}
-
 void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 {
-	struct nouveau_bo *nvbo = gem->driver_private;
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct ttm_buffer_object *bo = &nvbo->bo;
 
-	if (!nvbo)
-		return;
-	nvbo->gem = NULL;
-
 	if (gem->import_attach)
 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
 
-	ttm_bo_unref(&bo);
-
 	drm_gem_object_release(gem);
-	kfree(gem);
+
+	/* reset filp so nouveau_bo_del_ttm() can test for it */
+	gem->filp = NULL;
+	ttm_bo_unref(&bo);
 }
 
 int
@@ -186,14 +177,15 @@
 	if (nv_device(drm->device)->card_type >= NV_50)
 		nvbo->valid_domains &= domain;
 
-	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
-	if (!nvbo->gem) {
+	/* Initialize the embedded gem-object. We return a single gem-reference
+	 * to the caller, instead of a normal nouveau_bo ttm reference. */
+	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+	if (ret) {
 		nouveau_bo_ref(NULL, pnvbo);
 		return -ENOMEM;
 	}
 
-	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
-	nvbo->gem->driver_private = nvbo;
+	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
 	return 0;
 }
 
@@ -250,15 +242,15 @@
 	if (ret)
 		return ret;
 
-	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
 	if (ret == 0) {
-		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
 		if (ret)
 			drm_gem_handle_delete(file_priv, req->info.handle);
 	}
 
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference_unlocked(nvbo->gem);
+	drm_gem_object_unreference_unlocked(&nvbo->gem);
 	return ret;
 }
 
@@ -266,7 +258,7 @@
 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 		       uint32_t write_domains, uint32_t valid_domains)
 {
-	struct nouveau_bo *nvbo = gem->driver_private;
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
 	struct ttm_buffer_object *bo = &nvbo->bo;
 	uint32_t domains = valid_domains & nvbo->valid_domains &
 		(write_domains ? write_domains : read_domains);
@@ -327,7 +319,7 @@
 		list_del(&nvbo->entry);
 		nvbo->reserved_by = NULL;
 		ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
-		drm_gem_object_unreference_unlocked(nvbo->gem);
+		drm_gem_object_unreference_unlocked(&nvbo->gem);
 	}
 }
 
@@ -376,7 +368,7 @@
 			validate_fini(op, NULL);
 			return -ENOENT;
 		}
-		nvbo = gem->driver_private;
+		nvbo = nouveau_gem_object(gem);
 		if (nvbo == res_bo) {
 			res_bo = NULL;
 			drm_gem_object_unreference_unlocked(gem);
@@ -478,7 +470,7 @@
 			return ret;
 		}
 
-		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+		ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
 					     b->write_domains,
 					     b->valid_domains);
 		if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 502e429..7caca05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -12,14 +12,13 @@
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 {
-	return gem ? gem->driver_private : NULL;
+	return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
 }
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, int size, int align,
 			   uint32_t domain, uint32_t tile_mode,
 			   uint32_t tile_flags, struct nouveau_bo **);
-extern int nouveau_gem_object_new(struct drm_gem_object *);
 extern void nouveau_gem_object_del(struct drm_gem_object *);
 extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
 extern void nouveau_gem_object_close(struct drm_gem_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
new file mode 100644
index 0000000..38a4db5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -0,0 +1,656 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_hwmon.h"
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int temp = therm->temp_get(therm);
+
+	if (temp < 0)
+		return temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+						  NULL, 0);
+
+static ssize_t
+nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
+					 struct device_attribute *a, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
+			  nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
+				     struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	      therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
+					 struct device_attribute *a,
+					 const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_temp1_auto_point1_temp,
+			  nouveau_hwmon_set_temp1_auto_point1_temp, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
+					  struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
+					      struct device_attribute *a,
+					      const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_temp1_auto_point1_temp_hyst,
+			  nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+						  nouveau_hwmon_set_max_temp,
+						  0);
+
+static ssize_t
+nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
+			    char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_max_temp_hyst,
+			  nouveau_hwmon_set_max_temp_hyst, 0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+						nouveau_hwmon_critical_temp,
+						nouveau_hwmon_set_critical_temp,
+						0);
+
+static ssize_t
+nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp_hyst(struct device *d,
+				     struct device_attribute *a,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_critical_temp_hyst,
+			  nouveau_hwmon_set_critical_temp_hyst, 0);
+static ssize_t
+nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
+					nouveau_hwmon_emergency_temp,
+					nouveau_hwmon_set_emergency_temp,
+					0);
+
+static ssize_t
+nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
+				      struct device_attribute *a,
+				      const char *buf,
+				      size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
+					nouveau_hwmon_emergency_temp_hyst,
+					nouveau_hwmon_set_emergency_temp_hyst,
+					0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+						nouveau_hwmon_show_update_rate,
+						NULL, 0);
+
+static ssize_t
+nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
+			      char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+}
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
+			  NULL, 0);
+
+ static ssize_t
+nouveau_hwmon_get_pwm1_enable(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (strict_strtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_enable,
+			  nouveau_hwmon_set_pwm1_enable, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->fan_get(therm);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
+		       const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret = -ENODEV;
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->fan_set(therm, value);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1,
+			  nouveau_hwmon_set_pwm1, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1_min(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_min,
+			  nouveau_hwmon_set_pwm1_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1_max(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_max,
+			  nouveau_hwmon_set_pwm1_max, 0);
+
+static struct attribute *hwmon_default_attributes[] = {
+	&sensor_dev_attr_name.dev_attr.attr,
+	&sensor_dev_attr_update_rate.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_temp_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
+	&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_max.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group hwmon_default_attrgroup = {
+	.attrs = hwmon_default_attributes,
+};
+static const struct attribute_group hwmon_temp_attrgroup = {
+	.attrs = hwmon_temp_attributes,
+};
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+	.attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+	.attrs = hwmon_pwm_fan_attributes,
+};
+#endif
+
+int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	struct nouveau_hwmon *hwmon;
+	struct device *hwmon_dev;
+	int ret = 0;
+
+	hwmon = drm->hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
+	if (!hwmon)
+		return -ENOMEM;
+	hwmon->dev = dev;
+
+	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+		return -ENODEV;
+
+	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+	if (IS_ERR(hwmon_dev)) {
+		ret = PTR_ERR(hwmon_dev);
+		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
+		return ret;
+	}
+	dev_set_drvdata(hwmon_dev, dev);
+
+	/* set the default attributes */
+	ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
+	if (ret) {
+		if (ret)
+			goto error;
+	}
+
+	/* if the card has a working thermal sensor */
+	if (therm->temp_get(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+		if (ret) {
+			if (ret)
+				goto error;
+		}
+	}
+
+	/* if the card has a pwm fan */
+	/*XXX: incorrect, need better detection for this, some boards have
+	 *     the gpio entries for pwm fan control even when there's no
+	 *     actual fan connected to it... therm table? */
+	if (therm->fan_get && therm->fan_get(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_pwm_fan_attrgroup);
+		if (ret)
+			goto error;
+	}
+
+	/* if the card can read the fan rpm */
+	if (therm->fan_sense(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_fan_rpm_attrgroup);
+		if (ret)
+			goto error;
+	}
+
+	hwmon->hwmon = hwmon_dev;
+
+	return 0;
+
+error:
+	NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
+	hwmon_device_unregister(hwmon_dev);
+	hwmon->hwmon = NULL;
+	return ret;
+#else
+	hwmon->hwmon = NULL;
+	return 0;
+#endif
+}
+
+void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+	struct nouveau_hwmon *hwmon = nouveau_hwmon(dev);
+
+	if (hwmon->hwmon) {
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_default_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_temp_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
+		sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
+
+		hwmon_device_unregister(hwmon->hwmon);
+	}
+
+	nouveau_drm(dev)->hwmon = NULL;
+	kfree(hwmon);
+#endif
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.h b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
new file mode 100644
index 0000000..62ccbb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+struct nouveau_hwmon {
+	struct drm_device *dev;
+	struct device *hwmon;
+};
+
+static inline struct nouveau_hwmon *
+nouveau_hwmon(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->hwmon;
+}
+
+/* nouveau_hwmon.c */
+int  nouveau_hwmon_init(struct drm_device *dev);
+void nouveau_hwmon_fini(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
deleted file mode 100644
index 6976875..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_HWSQ_H__
-#define __NOUVEAU_HWSQ_H__
-
-struct hwsq_ucode {
-	u8 data[0x200];
-	union {
-		u8  *u08;
-		u16 *u16;
-		u32 *u32;
-	} ptr;
-	u16 len;
-
-	u32 reg;
-	u32 val;
-};
-
-static inline void
-hwsq_init(struct hwsq_ucode *hwsq)
-{
-	hwsq->ptr.u08 = hwsq->data;
-	hwsq->reg = 0xffffffff;
-	hwsq->val = 0xffffffff;
-}
-
-static inline void
-hwsq_fini(struct hwsq_ucode *hwsq)
-{
-	do {
-		*hwsq->ptr.u08++ = 0x7f;
-		hwsq->len = hwsq->ptr.u08 - hwsq->data;
-	} while (hwsq->len & 3);
-	hwsq->ptr.u08 = hwsq->data;
-}
-
-static inline void
-hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
-{
-	u32 shift = 0;
-	while (usec & ~3) {
-		usec >>= 2;
-		shift++;
-	}
-
-	*hwsq->ptr.u08++ = (shift << 2) | usec;
-}
-
-static inline void
-hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
-{
-	flag += 0x80;
-	if (val >= 0)
-		flag += 0x20;
-	if (val >= 1)
-		flag += 0x20;
-	*hwsq->ptr.u08++ = flag;
-}
-
-static inline void
-hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
-{
-	*hwsq->ptr.u08++ = 0x5f;
-	*hwsq->ptr.u08++ = v0;
-	*hwsq->ptr.u08++ = v1;
-}
-
-static inline void
-hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
-{
-	if (val != hwsq->val) {
-		if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
-			*hwsq->ptr.u08++ = 0x42;
-			*hwsq->ptr.u16++ = (val & 0x0000ffff);
-		} else {
-			*hwsq->ptr.u08++ = 0xe2;
-			*hwsq->ptr.u32++ = val;
-		}
-
-		hwsq->val = val;
-	}
-
-	if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
-		*hwsq->ptr.u08++ = 0x40;
-		*hwsq->ptr.u16++ = (reg & 0x0000ffff);
-	} else {
-		*hwsq->ptr.u08++ = 0xe0;
-		*hwsq->ptr.u32++ = reg;
-	}
-	hwsq->reg = reg;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
deleted file mode 100644
index 4f6a572..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
- * Copyright 2005 Stephane Marchesin
- *
- * The Weather Channel (TM) funded Tungsten Graphics to develop the
- * initial release of the Radeon 8500 driver under the XFree86 license.
- * This notice must be preserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Ben Skeggs <bskeggs@redhat.com>
- *    Roy Spliet <r.spliet@student.tudelft.nl>
- */
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/fb.h>
-
-static int
-nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
-	/* XXX: I don't trust the -1's and +1's... they must come
-	 *      from somewhere! */
-	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
-		    1 << 16 |
-		    (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
-		    (e->tCL + 2 - (t->tCWL - 1));
-
-	t->reg[2] = 0x20200000 |
-		    ((t->tCWL - 1) << 24 |
-		     e->tRRD << 16 |
-		     e->tRCDWR << 8 |
-		     e->tRCDRD);
-
-	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
-		 t->reg[0], t->reg[1], t->reg[2]);
-	return 0;
-}
-
-static int
-nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry P;
-	uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
-
-	if (bit_table(dev, 'P', &P))
-		return -EINVAL;
-
-	switch (min(len, (u8) 22)) {
-	case 22:
-		unk21 = e->tUNK_21;
-	case 21:
-		unk20 = e->tUNK_20;
-	case 20:
-		if (e->tCWL > 0)
-			t->tCWL = e->tCWL;
-	case 19:
-		unk18 = e->tUNK_18;
-		break;
-	}
-
-	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
-
-	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
-				max(unk18, (u8) 1) << 16 |
-				(e->tWTR + 2 + (t->tCWL - 1)) << 8;
-
-	t->reg[2] = ((t->tCWL - 1) << 24 |
-		    e->tRRD << 16 |
-		    e->tRCDWR << 8 |
-		    e->tRCDRD);
-
-	t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
-
-	t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
-
-	t->reg[8] = boot->reg[8] & 0xffffff00;
-
-	if (P.version == 1) {
-		t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
-
-		t->reg[3] = (0x14 + e->tCL) << 24 |
-			    0x16 << 16 |
-			    (e->tCL - 1) << 8 |
-			    (e->tCL - 1);
-
-		t->reg[4] |= boot->reg[4] & 0xffff0000;
-
-		t->reg[6] = (0x33 - t->tCWL) << 16 |
-			    t->tCWL << 8 |
-			    (0x2e + e->tCL - t->tCWL);
-
-		t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
-
-		/* XXX: P.version == 1 only has DDR2 and GDDR3? */
-		if (pfb->ram->type == NV_MEM_TYPE_DDR2) {
-			t->reg[5] |= (e->tCL + 3) << 8;
-			t->reg[6] |= (t->tCWL - 2) << 8;
-			t->reg[8] |= (e->tCL - 4);
-		} else {
-			t->reg[5] |= (e->tCL + 2) << 8;
-			t->reg[6] |= t->tCWL << 8;
-			t->reg[8] |= (e->tCL - 2);
-		}
-	} else {
-		t->reg[1] |= (5 + e->tCL - (t->tCWL));
-
-		/* XXX: 0xb? 0x30? */
-		t->reg[3] = (0x30 + e->tCL) << 24 |
-			    (boot->reg[3] & 0x00ff0000)|
-			    (0xb + e->tCL) << 8 |
-			    (e->tCL - 1);
-
-		t->reg[4] |= (unk20 << 24 | unk21 << 16);
-
-		/* XXX: +6? */
-		t->reg[5] |= (t->tCWL + 6) << 8;
-
-		t->reg[6] = (0x5a + e->tCL) << 16 |
-			    (6 - e->tCL + t->tCWL) << 8 |
-			    (0x50 + e->tCL - t->tCWL);
-
-		tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
-		t->reg[7] = (tmp7_3 << 24) |
-			    ((tmp7_3 - 6 + e->tCL) << 16) |
-			    0x202;
-	}
-
-	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
-		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
-	NV_DEBUG(drm, "         230: %08x %08x %08x %08x\n",
-		 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
-	NV_DEBUG(drm, "         240: %08x\n", t->reg[8]);
-	return 0;
-}
-
-static int
-nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (e->tCWL > 0)
-		t->tCWL = e->tCWL;
-
-	t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
-		     e->tRFC << 8 | e->tRC);
-
-	t->reg[1] = (boot->reg[1] & 0xff000000) |
-		    (e->tRCDWR & 0x0f) << 20 |
-		    (e->tRCDRD & 0x0f) << 14 |
-		    (t->tCWL << 7) |
-		    (e->tCL & 0x0f);
-
-	t->reg[2] = (boot->reg[2] & 0xff0000ff) |
-		    e->tWR << 16 | e->tWTR << 8;
-
-	t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
-		    (e->tUNK_21 & 0xf) << 5 |
-		    (e->tUNK_13 & 0x1f);
-
-	t->reg[4] = (boot->reg[4] & 0xfff00fff) |
-		    (e->tRRD&0x1f) << 15;
-
-	NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
-		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
-	NV_DEBUG(drm, "         2a0: %08x\n", t->reg[4]);
-	return 0;
-}
-
-/**
- * MR generation methods
- */
-
-static int
-nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
-		    struct nouveau_pm_tbl_entry *e, u8 len,
-		    struct nouveau_pm_memtiming *boot,
-		    struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	t->drive_strength = 0;
-	if (len < 15) {
-		t->odt = boot->odt;
-	} else {
-		t->odt = e->RAM_FT1 & 0x07;
-	}
-
-	if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (t->odt > 3) {
-		NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
-			t->id, t->odt);
-		t->odt = 0;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0x100f) |
-		   (e->tCL) << 4 |
-		   (e->tWR - 1) << 9;
-	t->mr[1] = (boot->mr[1] & 0x101fbb) |
-		   (t->odt & 0x1) << 2 |
-		   (t->odt & 0x2) << 5;
-
-	NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
-	return 0;
-}
-
-static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
-	0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
-
-static int
-nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
-		    struct nouveau_pm_tbl_entry *e, u8 len,
-		    struct nouveau_pm_memtiming *boot,
-		    struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u8 cl = e->tCL - 4;
-
-	t->drive_strength = 0;
-	if (len < 15) {
-		t->odt = boot->odt;
-	} else {
-		t->odt = e->RAM_FT1 & 0x07;
-	}
-
-	if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (e->tCWL < 5) {
-		NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
-		return -ERANGE;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0x180b) |
-		   /* CAS */
-		   (cl & 0x7) << 4 |
-		   (cl & 0x8) >> 1 |
-		   (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
-	t->mr[1] = (boot->mr[1] & 0x101dbb) |
-		   (t->odt & 0x1) << 2 |
-		   (t->odt & 0x2) << 5 |
-		   (t->odt & 0x4) << 7;
-	t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
-
-	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
-	return 0;
-}
-
-static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
-	0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
-static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
-	0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
-
-static int
-nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (len < 15) {
-		t->drive_strength = boot->drive_strength;
-		t->odt = boot->odt;
-	} else {
-		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
-		t->odt = e->RAM_FT1 & 0x07;
-	}
-
-	if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (t->odt > 3) {
-		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
-			t->id, t->odt);
-		t->odt = 0;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0xe0b) |
-		   /* CAS */
-		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
-		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
-	t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
-		   (t->odt << 2) |
-		   (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
-	t->mr[2] = boot->mr[2];
-
-	NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
-		      t->mr[0], t->mr[1], t->mr[2]);
-	return 0;
-}
-
-static int
-nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
-		     struct nouveau_pm_tbl_entry *e, u8 len,
-		     struct nouveau_pm_memtiming *boot,
-		     struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (len < 15) {
-		t->drive_strength = boot->drive_strength;
-		t->odt = boot->odt;
-	} else {
-		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
-		t->odt = e->RAM_FT1 & 0x03;
-	}
-
-	if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
-		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
-		return -ERANGE;
-	}
-
-	if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
-		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
-		return -ERANGE;
-	}
-
-	if (t->odt > 3) {
-		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
-			t->id, t->odt);
-		t->odt = 0;
-	}
-
-	t->mr[0] = (boot->mr[0] & 0x007) |
-		   ((e->tCL - 5) << 3) |
-		   ((e->tWR - 4) << 8);
-	t->mr[1] = (boot->mr[1] & 0x1007f0) |
-		   t->drive_strength |
-		   (t->odt << 2);
-
-	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
-	return 0;
-}
-
-int
-nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
-			struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_memtiming *boot = &pm->boot.timing;
-	struct nouveau_pm_tbl_entry *e;
-	u8 ver, len, *ptr, *ramcfg;
-	int ret;
-
-	ptr = nouveau_perf_timing(dev, freq, &ver, &len);
-	if (!ptr || ptr[0] == 0x00) {
-		*t = *boot;
-		return 0;
-	}
-	e = (struct nouveau_pm_tbl_entry *)ptr;
-
-	t->tCWL = boot->tCWL;
-
-	switch (device->card_type) {
-	case NV_40:
-		ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
-		break;
-	case NV_50:
-		ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
-		break;
-	case NV_C0:
-	case NV_D0:
-		ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
-		break;
-	default:
-		ret = -ENODEV;
-		break;
-	}
-
-	switch (pfb->ram->type * !ret) {
-	case NV_MEM_TYPE_GDDR3:
-		ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
-		break;
-	case NV_MEM_TYPE_GDDR5:
-		ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
-		break;
-	case NV_MEM_TYPE_DDR2:
-		ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
-		break;
-	case NV_MEM_TYPE_DDR3:
-		ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
-	if (ramcfg) {
-		int dll_off;
-
-		if (ver == 0x00)
-			dll_off = !!(ramcfg[3] & 0x04);
-		else
-			dll_off = !!(ramcfg[2] & 0x40);
-
-		switch (pfb->ram->type) {
-		case NV_MEM_TYPE_GDDR3:
-			t->mr[1] &= ~0x00000040;
-			t->mr[1] |=  0x00000040 * dll_off;
-			break;
-		default:
-			t->mr[1] &= ~0x00000001;
-			t->mr[1] |=  0x00000001 * dll_off;
-			break;
-		}
-	}
-
-	return ret;
-}
-
-void
-nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	u32 timing_base, timing_regs, mr_base;
-	int i;
-
-	if (device->card_type >= 0xC0) {
-		timing_base = 0x10f290;
-		mr_base = 0x10f300;
-	} else {
-		timing_base = 0x100220;
-		mr_base = 0x1002c0;
-	}
-
-	t->id = -1;
-
-	switch (device->card_type) {
-	case NV_50:
-		timing_regs = 9;
-		break;
-	case NV_C0:
-	case NV_D0:
-		timing_regs = 5;
-		break;
-	case NV_30:
-	case NV_40:
-		timing_regs = 3;
-		break;
-	default:
-		timing_regs = 0;
-		return;
-	}
-	for(i = 0; i < timing_regs; i++)
-		t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
-
-	t->tCWL = 0;
-	if (device->card_type < NV_C0) {
-		t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
-	} else if (device->card_type <= NV_D0) {
-		t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
-	}
-
-	t->mr[0] = nv_rd32(device, mr_base);
-	t->mr[1] = nv_rd32(device, mr_base + 0x04);
-	t->mr[2] = nv_rd32(device, mr_base + 0x20);
-	t->mr[3] = nv_rd32(device, mr_base + 0x24);
-
-	t->odt = 0;
-	t->drive_strength = 0;
-
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_DDR3:
-		t->odt |= (t->mr[1] & 0x200) >> 7;
-	case NV_MEM_TYPE_DDR2:
-		t->odt |= (t->mr[1] & 0x04) >> 2 |
-			  (t->mr[1] & 0x40) >> 5;
-		break;
-	case NV_MEM_TYPE_GDDR3:
-	case NV_MEM_TYPE_GDDR5:
-		t->drive_strength = t->mr[1] & 0x03;
-		t->odt = (t->mr[1] & 0x0c) >> 2;
-		break;
-	default:
-		break;
-	}
-}
-
-int
-nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
-		 struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(exec->dev);
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nouveau_pm_memtiming *info = &perflvl->timing;
-	u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
-	u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
-	u32 mr1_dlloff;
-
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_DDR2:
-		tDLLK = 2000;
-		mr1_dlloff = 0x00000001;
-		break;
-	case NV_MEM_TYPE_DDR3:
-		tDLLK = 12000;
-		tCKSRE = 2000;
-		tXS = 1000;
-		mr1_dlloff = 0x00000001;
-		break;
-	case NV_MEM_TYPE_GDDR3:
-		tDLLK = 40000;
-		mr1_dlloff = 0x00000040;
-		break;
-	default:
-		NV_ERROR(drm, "cannot reclock unsupported memtype\n");
-		return -ENODEV;
-	}
-
-	/* fetch current MRs */
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_GDDR3:
-	case NV_MEM_TYPE_DDR3:
-		mr[2] = exec->mrg(exec, 2);
-	default:
-		mr[1] = exec->mrg(exec, 1);
-		mr[0] = exec->mrg(exec, 0);
-		break;
-	}
-
-	/* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
-	if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
-		exec->precharge(exec);
-		exec->mrs (exec, 1, mr[1] | mr1_dlloff);
-		exec->wait(exec, tMRD);
-	}
-
-	/* enter self-refresh mode */
-	exec->precharge(exec);
-	exec->refresh(exec);
-	exec->refresh(exec);
-	exec->refresh_auto(exec, false);
-	exec->refresh_self(exec, true);
-	exec->wait(exec, tCKSRE);
-
-	/* modify input clock frequency */
-	exec->clock_set(exec);
-
-	/* exit self-refresh mode */
-	exec->wait(exec, tCKSRX);
-	exec->precharge(exec);
-	exec->refresh_self(exec, false);
-	exec->refresh_auto(exec, true);
-	exec->wait(exec, tXS);
-	exec->wait(exec, tXS);
-
-	/* update MRs */
-	if (mr[2] != info->mr[2]) {
-		exec->mrs (exec, 2, info->mr[2]);
-		exec->wait(exec, tMRD);
-	}
-
-	if (mr[1] != info->mr[1]) {
-		/* need to keep DLL off until later, at least on GDDR3 */
-		exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
-		exec->wait(exec, tMRD);
-	}
-
-	if (mr[0] != info->mr[0]) {
-		exec->mrs (exec, 0, info->mr[0]);
-		exec->wait(exec, tMRD);
-	}
-
-	/* update PFB timing registers */
-	exec->timing_set(exec);
-
-	/* DLL (enable + ) reset */
-	if (!(info->mr[1] & mr1_dlloff)) {
-		if (mr[1] & mr1_dlloff) {
-			exec->mrs (exec, 1, info->mr[1]);
-			exec->wait(exec, tMRD);
-		}
-		exec->mrs (exec, 0, info->mr[0] | 0x00000100);
-		exec->wait(exec, tMRD);
-		exec->mrs (exec, 0, info->mr[0] | 0x00000000);
-		exec->wait(exec, tMRD);
-		exec->wait(exec, tDLLK);
-		if (pfb->ram->type == NV_MEM_TYPE_GDDR3)
-			exec->precharge(exec);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
deleted file mode 100644
index 4fe883c..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_reg.h"
-#include "nouveau_pm.h"
-
-static u8 *
-nouveau_perf_table(struct drm_device *dev, u8 *ver)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	struct bit_entry P;
-
-	if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
-		u8 *perf = ROMPTR(dev, P.data[0]);
-		if (perf) {
-			*ver = perf[0];
-			return perf;
-		}
-	}
-
-	if (bios->type == NVBIOS_BMP) {
-		if (bios->data[bios->offset + 6] >= 0x25) {
-			u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
-			if (perf) {
-				*ver = perf[1];
-				return perf;
-			}
-		}
-	}
-
-	return NULL;
-}
-
-static u8 *
-nouveau_perf_entry(struct drm_device *dev, int idx,
-		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
-	u8 *perf = nouveau_perf_table(dev, ver);
-	if (perf) {
-		if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
-			*hdr = perf[3];
-			*cnt = 0;
-			*len = 0;
-			return perf + perf[0] + idx * perf[3];
-		} else
-		if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
-			*hdr = perf[3];
-			*cnt = perf[4];
-			*len = perf[5];
-			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
-		} else
-		if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
-			*hdr = perf[2];
-			*cnt = perf[4];
-			*len = perf[3];
-			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
-		}
-	}
-	return NULL;
-}
-
-u8 *
-nouveau_perf_rammap(struct drm_device *dev, u32 freq,
-		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry P;
-	u8 *perf, i = 0;
-
-	if (!bit_table(dev, 'P', &P) && P.version == 2) {
-		u8 *rammap = ROMPTR(dev, P.data[4]);
-		if (rammap) {
-			u8 *ramcfg = rammap + rammap[1];
-
-			*ver = rammap[0];
-			*hdr = rammap[2];
-			*cnt = rammap[4];
-			*len = rammap[3];
-
-			freq /= 1000;
-			for (i = 0; i < rammap[5]; i++) {
-				if (freq >= ROM16(ramcfg[0]) &&
-				    freq <= ROM16(ramcfg[2]))
-					return ramcfg;
-
-				ramcfg += *hdr + (*cnt * *len);
-			}
-		}
-
-		return NULL;
-	}
-
-	if (nv_device(drm->device)->chipset == 0x49 ||
-	    nv_device(drm->device)->chipset == 0x4b)
-		freq /= 2;
-
-	while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
-		if (*ver >= 0x20 && *ver < 0x25) {
-			if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
-				break;
-		} else
-		if (*ver >= 0x25 && *ver < 0x40) {
-			if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
-				break;
-		}
-	}
-
-	if (perf) {
-		u8 *ramcfg = perf + *hdr;
-		*ver = 0x00;
-		*hdr = 0;
-		return ramcfg;
-	}
-
-	return NULL;
-}
-
-u8 *
-nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	u8 strap, hdr, cnt;
-	u8 *rammap;
-
-	strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
-	if (bios->ram_restrict_tbl_ptr)
-		strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
-
-	rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
-	if (rammap && strap < cnt)
-		return rammap + hdr + (strap * *len);
-
-	return NULL;
-}
-
-u8 *
-nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	struct bit_entry P;
-	u8 *perf, *timing = NULL;
-	u8 i = 0, hdr, cnt;
-
-	if (bios->type == NVBIOS_BMP) {
-		while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
-						  len)) && *ver == 0x15) {
-			if (freq <= ROM32(perf[5]) * 20) {
-				*ver = 0x00;
-				*len = 14;
-				return perf + 41;
-			}
-		}
-		return NULL;
-	}
-
-	if (!bit_table(dev, 'P', &P)) {
-		if (P.version == 1)
-			timing = ROMPTR(dev, P.data[4]);
-		else
-		if (P.version == 2)
-			timing = ROMPTR(dev, P.data[8]);
-	}
-
-	if (timing && timing[0] == 0x10) {
-		u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
-		if (ramcfg && ramcfg[1] < timing[2]) {
-			*ver = timing[0];
-			*len = timing[3];
-			return timing + timing[1] + (ramcfg[1] * timing[3]);
-		}
-	}
-
-	return NULL;
-}
-
-static void
-legacy_perf_init(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvbios *bios = &drm->vbios;
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	char *perf, *entry, *bmp = &bios->data[bios->offset];
-	int headerlen, use_straps;
-
-	if (bmp[5] < 0x5 || bmp[6] < 0x14) {
-		NV_DEBUG(drm, "BMP version too old for perf\n");
-		return;
-	}
-
-	perf = ROMPTR(dev, bmp[0x73]);
-	if (!perf) {
-		NV_DEBUG(drm, "No memclock table pointer found.\n");
-		return;
-	}
-
-	switch (perf[0]) {
-	case 0x12:
-	case 0x14:
-	case 0x18:
-		use_straps = 0;
-		headerlen = 1;
-		break;
-	case 0x01:
-		use_straps = perf[1] & 1;
-		headerlen = (use_straps ? 8 : 2);
-		break;
-	default:
-		NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
-		return;
-	}
-
-	entry = perf + headerlen;
-	if (use_straps)
-		entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
-
-	sprintf(pm->perflvl[0].name, "performance_level_0");
-	pm->perflvl[0].memory = ROM16(entry[0]) * 20;
-	pm->nr_perflvl = 1;
-}
-
-static void
-nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct bit_entry P;
-	u8 *vmap;
-	int id;
-
-	id = perflvl->volt_min;
-	perflvl->volt_min = 0;
-
-	/* boards using voltage table version <0x40 store the voltage
-	 * level directly in the perflvl entry as a multiple of 10mV
-	 */
-	if (drm->pm->voltage.version < 0x40) {
-		perflvl->volt_min = id * 10000;
-		perflvl->volt_max = perflvl->volt_min;
-		return;
-	}
-
-	/* on newer ones, the perflvl stores an index into yet another
-	 * vbios table containing a min/max voltage value for the perflvl
-	 */
-	if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
-		NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
-			 P.version, P.length);
-		return;
-	}
-
-	vmap = ROMPTR(dev, P.data[32]);
-	if (!vmap) {
-		NV_DEBUG(drm, "volt map table pointer invalid\n");
-		return;
-	}
-
-	if (id < vmap[3]) {
-		vmap += vmap[1] + (vmap[2] * id);
-		perflvl->volt_min = ROM32(vmap[0]);
-		perflvl->volt_max = ROM32(vmap[4]);
-	}
-}
-
-void
-nouveau_perf_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nvbios *bios = &drm->vbios;
-	u8 *perf, ver, hdr, cnt, len;
-	int ret, vid, i = -1;
-
-	if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
-		legacy_perf_init(dev);
-		return;
-	}
-
-	perf = nouveau_perf_table(dev, &ver);
-
-	while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
-		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
-
-		if (perf[0] == 0xff)
-			continue;
-
-		switch (ver) {
-		case 0x12:
-		case 0x13:
-		case 0x15:
-			perflvl->fanspeed = perf[55];
-			if (hdr > 56)
-				perflvl->volt_min = perf[56];
-			perflvl->core = ROM32(perf[1]) * 10;
-			perflvl->memory = ROM32(perf[5]) * 20;
-			break;
-		case 0x21:
-		case 0x23:
-		case 0x24:
-			perflvl->fanspeed = perf[4];
-			perflvl->volt_min = perf[5];
-			perflvl->shader = ROM16(perf[6]) * 1000;
-			perflvl->core = perflvl->shader;
-			perflvl->core += (signed char)perf[8] * 1000;
-			if (nv_device(drm->device)->chipset == 0x49 ||
-			    nv_device(drm->device)->chipset == 0x4b)
-				perflvl->memory = ROM16(perf[11]) * 1000;
-			else
-				perflvl->memory = ROM16(perf[11]) * 2000;
-			break;
-		case 0x25:
-			perflvl->fanspeed = perf[4];
-			perflvl->volt_min = perf[5];
-			perflvl->core = ROM16(perf[6]) * 1000;
-			perflvl->shader = ROM16(perf[10]) * 1000;
-			perflvl->memory = ROM16(perf[12]) * 1000;
-			break;
-		case 0x30:
-			perflvl->memscript = ROM16(perf[2]);
-		case 0x35:
-			perflvl->fanspeed = perf[6];
-			perflvl->volt_min = perf[7];
-			perflvl->core = ROM16(perf[8]) * 1000;
-			perflvl->shader = ROM16(perf[10]) * 1000;
-			perflvl->memory = ROM16(perf[12]) * 1000;
-			perflvl->vdec = ROM16(perf[16]) * 1000;
-			perflvl->dom6 = ROM16(perf[20]) * 1000;
-			break;
-		case 0x40:
-#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
-			perflvl->fanspeed = 0; /*XXX*/
-			perflvl->volt_min = perf[2];
-			if (nv_device(drm->device)->card_type == NV_50) {
-				perflvl->core   = subent(0);
-				perflvl->shader = subent(1);
-				perflvl->memory = subent(2);
-				perflvl->vdec   = subent(3);
-				perflvl->unka0  = subent(4);
-			} else {
-				perflvl->hub06  = subent(0);
-				perflvl->hub01  = subent(1);
-				perflvl->copy   = subent(2);
-				perflvl->shader = subent(3);
-				perflvl->rop    = subent(4);
-				perflvl->memory = subent(5);
-				perflvl->vdec   = subent(6);
-				perflvl->daemon = subent(10);
-				perflvl->hub07  = subent(11);
-				perflvl->core   = perflvl->shader / 2;
-			}
-			break;
-		}
-
-		/* make sure vid is valid */
-		nouveau_perf_voltage(dev, perflvl);
-		if (pm->voltage.supported && perflvl->volt_min) {
-			vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
-			if (vid < 0) {
-				NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
-				continue;
-			}
-		}
-
-		/* get the corresponding memory timings */
-		ret = nouveau_mem_timing_calc(dev, perflvl->memory,
-					          &perflvl->timing);
-		if (ret) {
-			NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
-			continue;
-		}
-
-		snprintf(perflvl->name, sizeof(perflvl->name),
-			 "performance_level_%d", i);
-		perflvl->id = i;
-
-		snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
-			 "%d", perflvl->id);
-		perflvl->profile.func = &nouveau_pm_static_profile_func;
-		list_add_tail(&perflvl->profile.head, &pm->profiles);
-
-
-		pm->nr_perflvl++;
-	}
-}
-
-void
-nouveau_perf_fini(struct drm_device *dev)
-{
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
deleted file mode 100644
index 936b442..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ /dev/null
@@ -1,1174 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-#endif
-#include <linux/power_supply.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/gpio.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-
-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
-static char *nouveau_perflvl;
-module_param_named(perflvl, nouveau_perflvl, charp, 0400);
-
-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
-static int nouveau_perflvl_wr;
-module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
-
-static int
-nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-		       struct nouveau_pm_level *a, struct nouveau_pm_level *b)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	/*XXX: not on all boards, we should control based on temperature
-	 *     on recent boards..  or maybe on some other factor we don't
-	 *     know about?
-	 */
-	if (therm && therm->fan_set &&
-		a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
-		ret = therm->fan_set(therm, perflvl->fanspeed);
-		if (ret && ret != -ENODEV) {
-			NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
-		}
-	}
-
-	if (pm->voltage.supported && pm->voltage_set) {
-		if (perflvl->volt_min && b->volt_min > a->volt_min) {
-			ret = pm->voltage_set(dev, perflvl->volt_min);
-			if (ret) {
-				NV_ERROR(drm, "voltage set failed: %d\n", ret);
-				return ret;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static int
-nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	void *state;
-	int ret;
-
-	if (perflvl == pm->cur)
-		return 0;
-
-	ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
-	if (ret)
-		return ret;
-
-	state = pm->clocks_pre(dev, perflvl);
-	if (IS_ERR(state)) {
-		ret = PTR_ERR(state);
-		goto error;
-	}
-	ret = pm->clocks_set(dev, state);
-	if (ret)
-		goto error;
-
-	ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
-	if (ret)
-		return ret;
-
-	pm->cur = perflvl;
-	return 0;
-
-error:
-	/* restore the fan speed and voltage before leaving */
-	nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
-	return ret;
-}
-
-void
-nouveau_pm_trigger(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_timer *ptimer = nouveau_timer(drm->device);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *profile = NULL;
-	struct nouveau_pm_level *perflvl = NULL;
-	int ret;
-
-	/* select power profile based on current power source */
-	if (power_supply_is_system_supplied())
-		profile = pm->profile_ac;
-	else
-		profile = pm->profile_dc;
-
-	if (profile != pm->profile) {
-		pm->profile->func->fini(pm->profile);
-		pm->profile = profile;
-		pm->profile->func->init(pm->profile);
-	}
-
-	/* select performance level based on profile */
-	perflvl = profile->func->select(profile);
-
-	/* change perflvl, if necessary */
-	if (perflvl != pm->cur) {
-		u64 time0 = ptimer->read(ptimer);
-
-		NV_INFO(drm, "setting performance level: %d", perflvl->id);
-		ret = nouveau_pm_perflvl_set(dev, perflvl);
-		if (ret)
-			NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
-
-		NV_INFO(drm, "> reclocking took %lluns\n\n",
-			     ptimer->read(ptimer) - time0);
-	}
-}
-
-static struct nouveau_pm_profile *
-profile_find(struct drm_device *dev, const char *string)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *profile;
-
-	list_for_each_entry(profile, &pm->profiles, head) {
-		if (!strncmp(profile->name, string, sizeof(profile->name)))
-			return profile;
-	}
-
-	return NULL;
-}
-
-static int
-nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *ac = NULL, *dc = NULL;
-	char string[16], *cur = string, *ptr;
-
-	/* safety precaution, for now */
-	if (nouveau_perflvl_wr != 7777)
-		return -EPERM;
-
-	strncpy(string, profile, sizeof(string));
-	string[sizeof(string) - 1] = 0;
-	if ((ptr = strchr(string, '\n')))
-		*ptr = '\0';
-
-	ptr = strsep(&cur, ",");
-	if (ptr)
-		ac = profile_find(dev, ptr);
-
-	ptr = strsep(&cur, ",");
-	if (ptr)
-		dc = profile_find(dev, ptr);
-	else
-		dc = ac;
-
-	if (ac == NULL || dc == NULL)
-		return -EINVAL;
-
-	pm->profile_ac = ac;
-	pm->profile_dc = dc;
-	nouveau_pm_trigger(dev);
-	return 0;
-}
-
-static void
-nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
-{
-}
-
-static struct nouveau_pm_level *
-nouveau_pm_static_select(struct nouveau_pm_profile *profile)
-{
-	return container_of(profile, struct nouveau_pm_level, profile);
-}
-
-const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
-	.destroy = nouveau_pm_static_dummy,
-	.init = nouveau_pm_static_dummy,
-	.fini = nouveau_pm_static_dummy,
-	.select = nouveau_pm_static_select,
-};
-
-static int
-nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	memset(perflvl, 0, sizeof(*perflvl));
-
-	if (pm->clocks_get) {
-		ret = pm->clocks_get(dev, perflvl);
-		if (ret)
-			return ret;
-	}
-
-	if (pm->voltage.supported && pm->voltage_get) {
-		ret = pm->voltage_get(dev);
-		if (ret > 0) {
-			perflvl->volt_min = ret;
-			perflvl->volt_max = ret;
-		}
-	}
-
-	if (therm && therm->fan_get) {
-		ret = therm->fan_get(therm);
-		if (ret >= 0)
-			perflvl->fanspeed = ret;
-	}
-
-	nouveau_mem_timing_read(dev, &perflvl->timing);
-	return 0;
-}
-
-static void
-nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
-{
-	char c[16], s[16], v[32], f[16], m[16];
-
-	c[0] = '\0';
-	if (perflvl->core)
-		snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
-
-	s[0] = '\0';
-	if (perflvl->shader)
-		snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
-
-	m[0] = '\0';
-	if (perflvl->memory)
-		snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
-
-	v[0] = '\0';
-	if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
-		snprintf(v, sizeof(v), " voltage %dmV-%dmV",
-			 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
-	} else
-	if (perflvl->volt_min) {
-		snprintf(v, sizeof(v), " voltage %dmV",
-			 perflvl->volt_min / 1000);
-	}
-
-	f[0] = '\0';
-	if (perflvl->fanspeed)
-		snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
-
-	snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
-}
-
-static ssize_t
-nouveau_pm_get_perflvl_info(struct device *d,
-			    struct device_attribute *a, char *buf)
-{
-	struct nouveau_pm_level *perflvl =
-		container_of(a, struct nouveau_pm_level, dev_attr);
-	char *ptr = buf;
-	int len = PAGE_SIZE;
-
-	snprintf(ptr, len, "%d:", perflvl->id);
-	ptr += strlen(buf);
-	len -= strlen(buf);
-
-	nouveau_pm_perflvl_info(perflvl, ptr, len);
-	return strlen(buf);
-}
-
-static ssize_t
-nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_level cur;
-	int len = PAGE_SIZE, ret;
-	char *ptr = buf;
-
-	snprintf(ptr, len, "profile: %s, %s\nc:",
-		 pm->profile_ac->name, pm->profile_dc->name);
-	ptr += strlen(buf);
-	len -= strlen(buf);
-
-	ret = nouveau_pm_perflvl_get(dev, &cur);
-	if (ret == 0)
-		nouveau_pm_perflvl_info(&cur, ptr, len);
-	return strlen(buf);
-}
-
-static ssize_t
-nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
-		       const char *buf, size_t count)
-{
-	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
-	int ret;
-
-	ret = nouveau_pm_profile_set(dev, buf);
-	if (ret)
-		return ret;
-	return strlen(buf);
-}
-
-static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
-		   nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
-
-static int
-nouveau_sysfs_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct device *d = &dev->pdev->dev;
-	int ret, i;
-
-	ret = device_create_file(d, &dev_attr_performance_level);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < pm->nr_perflvl; i++) {
-		struct nouveau_pm_level *perflvl = &pm->perflvl[i];
-
-		perflvl->dev_attr.attr.name = perflvl->name;
-		perflvl->dev_attr.attr.mode = S_IRUGO;
-		perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
-		perflvl->dev_attr.store = NULL;
-		sysfs_attr_init(&perflvl->dev_attr.attr);
-
-		ret = device_create_file(d, &perflvl->dev_attr);
-		if (ret) {
-			NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
-				 perflvl->id, i);
-			perflvl->dev_attr.attr.name = NULL;
-			nouveau_pm_fini(dev);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static void
-nouveau_sysfs_fini(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct device *d = &dev->pdev->dev;
-	int i;
-
-	device_remove_file(d, &dev_attr_performance_level);
-	for (i = 0; i < pm->nr_perflvl; i++) {
-		struct nouveau_pm_level *pl = &pm->perflvl[i];
-
-		if (!pl->dev_attr.attr.name)
-			break;
-
-		device_remove_file(d, &pl->dev_attr);
-	}
-}
-
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-static ssize_t
-nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int temp = therm->temp_get(therm);
-
-	if (temp < 0)
-		return temp;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
-}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
-						  NULL, 0);
-
-static ssize_t
-nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
-					 struct device_attribute *a, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", 100);
-}
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
-			  nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
-
-static ssize_t
-nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
-				     struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	      therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
-					 struct device_attribute *a,
-					 const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_temp1_auto_point1_temp,
-			  nouveau_hwmon_set_temp1_auto_point1_temp, 0);
-
-static ssize_t
-nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
-					  struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
-					      struct device_attribute *a,
-					      const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_temp1_auto_point1_temp_hyst,
-			  nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
-
-static ssize_t
-nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
-						const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
-						  nouveau_hwmon_set_max_temp,
-						  0);
-
-static ssize_t
-nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
-			    char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
-						const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_max_temp_hyst,
-			  nouveau_hwmon_set_max_temp_hyst, 0);
-
-static ssize_t
-nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
-							    const char *buf,
-								size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
-						nouveau_hwmon_critical_temp,
-						nouveau_hwmon_set_critical_temp,
-						0);
-
-static ssize_t
-nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_critical_temp_hyst(struct device *d,
-				     struct device_attribute *a,
-				     const char *buf,
-				     size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_critical_temp_hyst,
-			  nouveau_hwmon_set_critical_temp_hyst, 0);
-static ssize_t
-nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
-							    const char *buf,
-								size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
-					nouveau_hwmon_emergency_temp,
-					nouveau_hwmon_set_emergency_temp,
-					0);
-
-static ssize_t
-nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
-							char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
-}
-static ssize_t
-nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
-				      struct device_attribute *a,
-				      const char *buf,
-				      size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return count;
-
-	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
-			value / 1000);
-
-	return count;
-}
-static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
-					nouveau_hwmon_emergency_temp_hyst,
-					nouveau_hwmon_set_emergency_temp_hyst,
-					0);
-
-static ssize_t nouveau_hwmon_show_name(struct device *dev,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	return sprintf(buf, "nouveau\n");
-}
-static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
-
-static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	return sprintf(buf, "1000\n");
-}
-static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
-						nouveau_hwmon_show_update_rate,
-						NULL, 0);
-
-static ssize_t
-nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
-			      char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
-}
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
-			  NULL, 0);
-
- static ssize_t
-nouveau_hwmon_get_pwm1_enable(struct device *d,
-			   struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
-			   const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-	int ret;
-
-	if (strict_strtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
-	if (ret)
-		return ret;
-	else
-		return count;
-}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1_enable,
-			  nouveau_hwmon_set_pwm1_enable, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->fan_get(therm);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
-		       const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret = -ENODEV;
-	long value;
-
-	if (nouveau_perflvl_wr != 7777)
-		return -EPERM;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->fan_set(therm, value);
-	if (ret)
-		return ret;
-
-	return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1,
-			  nouveau_hwmon_set_pwm1, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm1_min(struct device *d,
-			   struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
-			   const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-	int ret;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
-	if (ret < 0)
-		return ret;
-
-	return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1_min,
-			  nouveau_hwmon_set_pwm1_min, 0);
-
-static ssize_t
-nouveau_hwmon_get_pwm1_max(struct device *d,
-			   struct device_attribute *a, char *buf)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	int ret;
-
-	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
-			   const char *buf, size_t count)
-{
-	struct drm_device *dev = dev_get_drvdata(d);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	long value;
-	int ret;
-
-	if (kstrtol(buf, 10, &value) == -EINVAL)
-		return -EINVAL;
-
-	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
-	if (ret < 0)
-		return ret;
-
-	return count;
-}
-
-static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
-			  nouveau_hwmon_get_pwm1_max,
-			  nouveau_hwmon_set_pwm1_max, 0);
-
-static struct attribute *hwmon_default_attributes[] = {
-	&sensor_dev_attr_name.dev_attr.attr,
-	&sensor_dev_attr_update_rate.dev_attr.attr,
-	NULL
-};
-static struct attribute *hwmon_temp_attributes[] = {
-	&sensor_dev_attr_temp1_input.dev_attr.attr,
-	&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
-	&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
-	&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
-	&sensor_dev_attr_temp1_max.dev_attr.attr,
-	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
-	&sensor_dev_attr_temp1_crit.dev_attr.attr,
-	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
-	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
-	&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
-	NULL
-};
-static struct attribute *hwmon_fan_rpm_attributes[] = {
-	&sensor_dev_attr_fan1_input.dev_attr.attr,
-	NULL
-};
-static struct attribute *hwmon_pwm_fan_attributes[] = {
-	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
-	&sensor_dev_attr_pwm1.dev_attr.attr,
-	&sensor_dev_attr_pwm1_min.dev_attr.attr,
-	&sensor_dev_attr_pwm1_max.dev_attr.attr,
-	NULL
-};
-
-static const struct attribute_group hwmon_default_attrgroup = {
-	.attrs = hwmon_default_attributes,
-};
-static const struct attribute_group hwmon_temp_attrgroup = {
-	.attrs = hwmon_temp_attributes,
-};
-static const struct attribute_group hwmon_fan_rpm_attrgroup = {
-	.attrs = hwmon_fan_rpm_attributes,
-};
-static const struct attribute_group hwmon_pwm_fan_attrgroup = {
-	.attrs = hwmon_pwm_fan_attributes,
-};
-#endif
-
-static int
-nouveau_hwmon_init(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_therm *therm = nouveau_therm(drm->device);
-	struct device *hwmon_dev;
-	int ret = 0;
-
-	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
-		return -ENODEV;
-
-	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
-	if (IS_ERR(hwmon_dev)) {
-		ret = PTR_ERR(hwmon_dev);
-		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
-		return ret;
-	}
-	dev_set_drvdata(hwmon_dev, dev);
-
-	/* set the default attributes */
-	ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
-	if (ret) {
-		if (ret)
-			goto error;
-	}
-
-	/* if the card has a working thermal sensor */
-	if (therm->temp_get(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
-		if (ret) {
-			if (ret)
-				goto error;
-		}
-	}
-
-	/* if the card has a pwm fan */
-	/*XXX: incorrect, need better detection for this, some boards have
-	 *     the gpio entries for pwm fan control even when there's no
-	 *     actual fan connected to it... therm table? */
-	if (therm->fan_get && therm->fan_get(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj,
-					 &hwmon_pwm_fan_attrgroup);
-		if (ret)
-			goto error;
-	}
-
-	/* if the card can read the fan rpm */
-	if (therm->fan_sense(therm) >= 0) {
-		ret = sysfs_create_group(&hwmon_dev->kobj,
-					 &hwmon_fan_rpm_attrgroup);
-		if (ret)
-			goto error;
-	}
-
-	pm->hwmon = hwmon_dev;
-
-	return 0;
-
-error:
-	NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
-	hwmon_device_unregister(hwmon_dev);
-	pm->hwmon = NULL;
-	return ret;
-#else
-	pm->hwmon = NULL;
-	return 0;
-#endif
-}
-
-static void
-nouveau_hwmon_fini(struct drm_device *dev)
-{
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
-	struct nouveau_pm *pm = nouveau_pm(dev);
-
-	if (pm->hwmon) {
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
-
-		hwmon_device_unregister(pm->hwmon);
-	}
-#endif
-}
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-static int
-nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
-{
-	struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
-	struct nouveau_drm *drm = nouveau_drm(pm->dev);
-	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
-
-	if (strcmp(entry->device_class, "ac_adapter") == 0) {
-		bool ac = power_supply_is_system_supplied();
-
-		NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
-		nouveau_pm_trigger(pm->dev);
-	}
-
-	return NOTIFY_OK;
-}
-#endif
-
-int
-nouveau_pm_init(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_pm *pm;
-	char info[256];
-	int ret, i;
-
-	pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
-	if (!pm)
-		return -ENOMEM;
-
-	pm->dev = dev;
-
-	if (device->card_type < NV_40) {
-		pm->clocks_get = nv04_pm_clocks_get;
-		pm->clocks_pre = nv04_pm_clocks_pre;
-		pm->clocks_set = nv04_pm_clocks_set;
-		if (nouveau_gpio(drm->device)) {
-			pm->voltage_get = nouveau_voltage_gpio_get;
-			pm->voltage_set = nouveau_voltage_gpio_set;
-		}
-	} else
-	if (device->card_type < NV_50) {
-		pm->clocks_get = nv40_pm_clocks_get;
-		pm->clocks_pre = nv40_pm_clocks_pre;
-		pm->clocks_set = nv40_pm_clocks_set;
-		pm->voltage_get = nouveau_voltage_gpio_get;
-		pm->voltage_set = nouveau_voltage_gpio_set;
-	} else
-	if (device->card_type < NV_C0) {
-		if (device->chipset <  0xa3 ||
-		    device->chipset == 0xaa ||
-		    device->chipset == 0xac) {
-			pm->clocks_get = nv50_pm_clocks_get;
-			pm->clocks_pre = nv50_pm_clocks_pre;
-			pm->clocks_set = nv50_pm_clocks_set;
-		} else {
-			pm->clocks_get = nva3_pm_clocks_get;
-			pm->clocks_pre = nva3_pm_clocks_pre;
-			pm->clocks_set = nva3_pm_clocks_set;
-		}
-		pm->voltage_get = nouveau_voltage_gpio_get;
-		pm->voltage_set = nouveau_voltage_gpio_set;
-	} else
-	if (device->card_type < NV_E0) {
-		pm->clocks_get = nvc0_pm_clocks_get;
-		pm->clocks_pre = nvc0_pm_clocks_pre;
-		pm->clocks_set = nvc0_pm_clocks_set;
-		pm->voltage_get = nouveau_voltage_gpio_get;
-		pm->voltage_set = nouveau_voltage_gpio_set;
-	}
-
-
-	/* parse aux tables from vbios */
-	nouveau_volt_init(dev);
-
-	INIT_LIST_HEAD(&pm->profiles);
-
-	/* determine current ("boot") performance level */
-	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
-	if (ret) {
-		NV_ERROR(drm, "failed to determine boot perflvl\n");
-		return ret;
-	}
-
-	strncpy(pm->boot.name, "boot", 4);
-	strncpy(pm->boot.profile.name, "boot", 4);
-	pm->boot.profile.func = &nouveau_pm_static_profile_func;
-
-	list_add(&pm->boot.profile.head, &pm->profiles);
-
-	pm->profile_ac = &pm->boot.profile;
-	pm->profile_dc = &pm->boot.profile;
-	pm->profile = &pm->boot.profile;
-	pm->cur = &pm->boot;
-
-	/* add performance levels from vbios */
-	nouveau_perf_init(dev);
-
-	/* display available performance levels */
-	NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
-	for (i = 0; i < pm->nr_perflvl; i++) {
-		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
-		NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
-	}
-
-	nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
-	NV_INFO(drm, "c:%s", info);
-
-	/* switch performance levels now if requested */
-	if (nouveau_perflvl != NULL)
-		nouveau_pm_profile_set(dev, nouveau_perflvl);
-
-	nouveau_sysfs_init(dev);
-	nouveau_hwmon_init(dev);
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
-	register_acpi_notifier(&pm->acpi_nb);
-#endif
-
-	return 0;
-}
-
-void
-nouveau_pm_fini(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_profile *profile, *tmp;
-
-	list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
-		list_del(&profile->head);
-		profile->func->destroy(profile);
-	}
-
-	if (pm->cur != &pm->boot)
-		nouveau_pm_perflvl_set(dev, &pm->boot);
-
-	nouveau_perf_fini(dev);
-	nouveau_volt_fini(dev);
-
-#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
-	unregister_acpi_notifier(&pm->acpi_nb);
-#endif
-	nouveau_hwmon_fini(dev);
-	nouveau_sysfs_fini(dev);
-
-	nouveau_drm(dev)->pm = NULL;
-	kfree(pm);
-}
-
-void
-nouveau_pm_resume(struct drm_device *dev)
-{
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_level *perflvl;
-
-	if (!pm->cur || pm->cur == &pm->boot)
-		return;
-
-	perflvl = pm->cur;
-	pm->cur = &pm->boot;
-	nouveau_pm_perflvl_set(dev, perflvl);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
deleted file mode 100644
index 73b789c..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_PM_H__
-#define __NOUVEAU_PM_H__
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-
-struct nouveau_pm_voltage_level {
-	u32 voltage; /* microvolts */
-	u8  vid;
-};
-
-struct nouveau_pm_voltage {
-	bool supported;
-	u8 version;
-	u8 vid_mask;
-
-	struct nouveau_pm_voltage_level *level;
-	int nr_level;
-};
-
-/* Exclusive upper limits */
-#define NV_MEM_CL_DDR2_MAX 8
-#define NV_MEM_WR_DDR2_MAX 9
-#define NV_MEM_CL_DDR3_MAX 17
-#define NV_MEM_WR_DDR3_MAX 17
-#define NV_MEM_CL_GDDR3_MAX 16
-#define NV_MEM_WR_GDDR3_MAX 18
-#define NV_MEM_CL_GDDR5_MAX 21
-#define NV_MEM_WR_GDDR5_MAX 20
-
-struct nouveau_pm_memtiming {
-	int id;
-
-	u32 reg[9];
-	u32 mr[4];
-
-	u8 tCWL;
-
-	u8 odt;
-	u8 drive_strength;
-};
-
-struct nouveau_pm_tbl_header {
-	u8 version;
-	u8 header_len;
-	u8 entry_cnt;
-	u8 entry_len;
-};
-
-struct nouveau_pm_tbl_entry {
-	u8 tWR;
-	u8 tWTR;
-	u8 tCL;
-	u8 tRC;
-	u8 empty_4;
-	u8 tRFC;	/* Byte 5 */
-	u8 empty_6;
-	u8 tRAS;	/* Byte 7 */
-	u8 empty_8;
-	u8 tRP;		/* Byte 9 */
-	u8 tRCDRD;
-	u8 tRCDWR;
-	u8 tRRD;
-	u8 tUNK_13;
-	u8 RAM_FT1;		/* 14, a bitmask of random RAM features */
-	u8 empty_15;
-	u8 tUNK_16;
-	u8 empty_17;
-	u8 tUNK_18;
-	u8 tCWL;
-	u8 tUNK_20, tUNK_21;
-};
-
-struct nouveau_pm_profile;
-struct nouveau_pm_profile_func {
-	void (*destroy)(struct nouveau_pm_profile *);
-	void (*init)(struct nouveau_pm_profile *);
-	void (*fini)(struct nouveau_pm_profile *);
-	struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
-};
-
-struct nouveau_pm_profile {
-	const struct nouveau_pm_profile_func *func;
-	struct list_head head;
-	char name[8];
-};
-
-#define NOUVEAU_PM_MAX_LEVEL 8
-struct nouveau_pm_level {
-	struct nouveau_pm_profile profile;
-	struct device_attribute dev_attr;
-	char name[32];
-	int id;
-
-	struct nouveau_pm_memtiming timing;
-	u32 memory;
-	u16 memscript;
-
-	u32 core;
-	u32 shader;
-	u32 rop;
-	u32 copy;
-	u32 daemon;
-	u32 vdec;
-	u32 dom6;
-	u32 unka0;	/* nva3:nvc0 */
-	u32 hub01;	/* nvc0- */
-	u32 hub06;	/* nvc0- */
-	u32 hub07;	/* nvc0- */
-
-	u32 volt_min; /* microvolts */
-	u32 volt_max;
-	u8  fanspeed;
-};
-
-struct nouveau_pm_temp_sensor_constants {
-	u16 offset_constant;
-	s16 offset_mult;
-	s16 offset_div;
-	s16 slope_mult;
-	s16 slope_div;
-};
-
-struct nouveau_pm_threshold_temp {
-	s16 critical;
-	s16 down_clock;
-};
-
-struct nouveau_pm {
-	struct drm_device *dev;
-
-	struct nouveau_pm_voltage voltage;
-	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
-	int nr_perflvl;
-	struct nouveau_pm_temp_sensor_constants sensor_constants;
-	struct nouveau_pm_threshold_temp threshold_temp;
-
-	struct nouveau_pm_profile *profile_ac;
-	struct nouveau_pm_profile *profile_dc;
-	struct nouveau_pm_profile *profile;
-	struct list_head profiles;
-
-	struct nouveau_pm_level boot;
-	struct nouveau_pm_level *cur;
-
-	struct device *hwmon;
-	struct notifier_block acpi_nb;
-
-	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
-	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
-	int (*clocks_set)(struct drm_device *, void *);
-
-	int (*voltage_get)(struct drm_device *);
-	int (*voltage_set)(struct drm_device *, int voltage);
-};
-
-static inline struct nouveau_pm *
-nouveau_pm(struct drm_device *dev)
-{
-	return nouveau_drm(dev)->pm;
-}
-
-struct nouveau_mem_exec_func {
-	struct drm_device *dev;
-	void (*precharge)(struct nouveau_mem_exec_func *);
-	void (*refresh)(struct nouveau_mem_exec_func *);
-	void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
-	void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
-	void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
-	u32  (*mrg)(struct nouveau_mem_exec_func *, int mr);
-	void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
-	void (*clock_set)(struct nouveau_mem_exec_func *);
-	void (*timing_set)(struct nouveau_mem_exec_func *);
-	void *priv;
-};
-
-/* nouveau_mem.c */
-int  nouveau_mem_exec(struct nouveau_mem_exec_func *,
-		      struct nouveau_pm_level *);
-
-/* nouveau_pm.c */
-int  nouveau_pm_init(struct drm_device *dev);
-void nouveau_pm_fini(struct drm_device *dev);
-void nouveau_pm_resume(struct drm_device *dev);
-extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
-void nouveau_pm_trigger(struct drm_device *dev);
-
-/* nouveau_volt.c */
-void nouveau_volt_init(struct drm_device *);
-void nouveau_volt_fini(struct drm_device *);
-int  nouveau_volt_vid_lookup(struct drm_device *, int voltage);
-int  nouveau_volt_lvl_lookup(struct drm_device *, int vid);
-int  nouveau_voltage_gpio_get(struct drm_device *);
-int  nouveau_voltage_gpio_set(struct drm_device *, int voltage);
-
-/* nouveau_perf.c */
-void nouveau_perf_init(struct drm_device *);
-void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
-			u8 *hdr, u8 *cnt, u8 *len);
-u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
-
-/* nouveau_mem.c */
-void nouveau_mem_timing_init(struct drm_device *);
-void nouveau_mem_timing_fini(struct drm_device *);
-
-/* nv04_pm.c */
-int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv04_pm_clocks_set(struct drm_device *, void *);
-
-/* nv40_pm.c */
-int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv40_pm_clocks_set(struct drm_device *, void *);
-int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
-
-/* nv50_pm.c */
-int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nv50_pm_clocks_set(struct drm_device *, void *);
-int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
-int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
-
-/* nva3_pm.c */
-int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nva3_pm_clocks_set(struct drm_device *, void *);
-
-/* nvc0_pm.c */
-int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
-void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-int nvc0_pm_clocks_set(struct drm_device *, void *);
-
-/* nouveau_mem.c */
-int  nouveau_mem_timing_calc(struct drm_device *, u32 freq,
-			     struct nouveau_pm_memtiming *);
-void nouveau_mem_timing_read(struct drm_device *,
-			     struct nouveau_pm_memtiming *);
-
-static inline int
-nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
-	      int *N, int *fN, int *M, int *P)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_clock *clk = nouveau_clock(device);
-	struct nouveau_pll_vals pv;
-	int ret;
-
-	ret = clk->pll_calc(clk, pll, freq, &pv);
-	*N = pv.N1;
-	*M = pv.M1;
-	*P = pv.log2P;
-	return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e90468d..51a2cb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,14 +71,16 @@
 		return ERR_PTR(ret);
 
 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
-	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
-	if (!nvbo->gem) {
+
+	/* Initialize the embedded gem-object. We return a single gem-reference
+	 * to the caller, instead of a normal nouveau_bo ttm reference. */
+	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+	if (ret) {
 		nouveau_bo_ref(NULL, &nvbo);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	nvbo->gem->driver_private = nvbo;
-	return nvbo->gem;
+	return &nvbo->gem;
 }
 
 int nouveau_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
new file mode 100644
index 0000000..89201a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_sysfs.h"
+
+#include <core/object.h>
+#include <core/class.h>
+
+static inline struct drm_device *
+drm_device(struct device *d)
+{
+	return pci_get_drvdata(to_pci_dev(d));
+}
+
+#define snappendf(p,r,f,a...) do {                                             \
+	snprintf(p, r, f, ##a);                                                \
+	r -= strlen(p);                                                        \
+	p += strlen(p);                                                        \
+} while(0)
+
+static ssize_t
+nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
+{
+	struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
+	struct nv_control_pstate_info info;
+	size_t cnt = PAGE_SIZE;
+	char *buf = b;
+	int ret, i;
+
+	ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
+	if (ret)
+		return ret;
+
+	for (i = 0; i < info.count + 1; i++) {
+		const s32 state = i < info.count ? i :
+			NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
+		struct nv_control_pstate_attr attr = {
+			.state = state,
+			.index = 0,
+		};
+
+		ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
+			     &attr, sizeof(attr));
+		if (ret)
+			return ret;
+
+		if (i < info.count)
+			snappendf(buf, cnt, "%02x:", attr.state);
+		else
+			snappendf(buf, cnt, "--:");
+
+		attr.index = 0;
+		do {
+			attr.state = state;
+			ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
+				     &attr, sizeof(attr));
+			if (ret)
+				return ret;
+
+			snappendf(buf, cnt, " %s %d", attr.name, attr.min);
+			if (attr.min != attr.max)
+				snappendf(buf, cnt, "-%d", attr.max);
+			snappendf(buf, cnt, " %s", attr.unit);
+		} while (attr.index);
+
+		if ((state >= 0 && info.pstate == state) ||
+		    (state <  0 && info.ustate < 0))
+			snappendf(buf, cnt, " *");
+		snappendf(buf, cnt, "\n");
+	}
+
+	return strlen(b);
+}
+
+static ssize_t
+nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
+			 const char *buf, size_t count)
+{
+	struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
+	struct nv_control_pstate_user args;
+	long value, ret;
+	char *tmp;
+
+	if ((tmp = strchr(buf, '\n')))
+		*tmp = '\0';
+
+	if (!strcasecmp(buf, "none"))
+		args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
+	else
+	if (!strcasecmp(buf, "auto"))
+		args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
+	else {
+		ret = kstrtol(buf, 16, &value);
+		if (ret)
+			return ret;
+		args.state = value;
+	}
+
+	ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static DEVICE_ATTR(pstate, S_IRUGO | S_IWUSR,
+		   nouveau_sysfs_pstate_get, nouveau_sysfs_pstate_set);
+
+void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+	struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (sysfs->ctrl) {
+		device_remove_file(&dev->pdev->dev, &dev_attr_pstate);
+		nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
+	}
+
+	drm->sysfs = NULL;
+	kfree(sysfs);
+}
+
+int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_sysfs *sysfs;
+	int ret;
+
+	sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
+	if (!sysfs)
+		return -ENOMEM;
+
+	ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
+				 NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
+	if (ret == 0)
+		device_create_file(&dev->pdev->dev, &dev_attr_pstate);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
new file mode 100644
index 0000000..74b47f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -0,0 +1,19 @@
+#ifndef __NOUVEAU_SYSFS_H__
+#define __NOUVEAU_SYSFS_H__
+
+#include "nouveau_drm.h"
+
+struct nouveau_sysfs {
+	struct nouveau_object *ctrl;
+};
+
+static inline struct nouveau_sysfs *
+nouveau_sysfs(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->sysfs;
+}
+
+int  nouveau_sysfs_init(struct drm_device *);
+void nouveau_sysfs_fini(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
deleted file mode 100644
index 9976414..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
-
-static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
-static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
-
-int
-nouveau_voltage_gpio_get(struct drm_device *dev)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(device);
-	u8 vid = 0;
-	int i;
-
-	for (i = 0; i < nr_vidtag; i++) {
-		if (!(volt->vid_mask & (1 << i)))
-			continue;
-
-		vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
-	}
-
-	return nouveau_volt_lvl_lookup(dev, vid);
-}
-
-int
-nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(device);
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	int vid, i;
-
-	vid = nouveau_volt_vid_lookup(dev, voltage);
-	if (vid < 0)
-		return vid;
-
-	for (i = 0; i < nr_vidtag; i++) {
-		if (!(volt->vid_mask & (1 << i)))
-			continue;
-
-		gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
-	}
-
-	return 0;
-}
-
-int
-nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	int i;
-
-	for (i = 0; i < volt->nr_level; i++) {
-		if (volt->level[i].voltage == voltage)
-			return volt->level[i].vid;
-	}
-
-	return -ENOENT;
-}
-
-int
-nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-	int i;
-
-	for (i = 0; i < volt->nr_level; i++) {
-		if (volt->level[i].vid == vid)
-			return volt->level[i].voltage;
-	}
-
-	return -ENOENT;
-}
-
-void
-nouveau_volt_init(struct drm_device *dev)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
-	struct nouveau_pm *pm = nouveau_pm(dev);
-	struct nouveau_pm_voltage *voltage = &pm->voltage;
-	struct nvbios *bios = &drm->vbios;
-	struct dcb_gpio_func func;
-	struct bit_entry P;
-	u8 *volt = NULL, *entry;
-	int i, headerlen, recordlen, entries, vidmask, vidshift;
-
-	if (bios->type == NVBIOS_BIT) {
-		if (bit_table(dev, 'P', &P))
-			return;
-
-		if (P.version == 1)
-			volt = ROMPTR(dev, P.data[16]);
-		else
-		if (P.version == 2)
-			volt = ROMPTR(dev, P.data[12]);
-		else {
-			NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
-		}
-	} else {
-		if (bios->data[bios->offset + 6] < 0x27) {
-			NV_DEBUG(drm, "BMP version too old for voltage\n");
-			return;
-		}
-
-		volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
-	}
-
-	if (!volt) {
-		NV_DEBUG(drm, "voltage table pointer invalid\n");
-		return;
-	}
-
-	switch (volt[0]) {
-	case 0x10:
-	case 0x11:
-	case 0x12:
-		headerlen = 5;
-		recordlen = volt[1];
-		entries   = volt[2];
-		vidshift  = 0;
-		vidmask   = volt[4];
-		break;
-	case 0x20:
-		headerlen = volt[1];
-		recordlen = volt[3];
-		entries   = volt[2];
-		vidshift  = 0; /* could be vidshift like 0x30? */
-		vidmask   = volt[5];
-		break;
-	case 0x30:
-		headerlen = volt[1];
-		recordlen = volt[2];
-		entries   = volt[3];
-		vidmask   = volt[4];
-		/* no longer certain what volt[5] is, if it's related to
-		 * the vid shift then it's definitely not a function of
-		 * how many bits are set.
-		 *
-		 * after looking at a number of nva3+ vbios images, they
-		 * all seem likely to have a static shift of 2.. lets
-		 * go with that for now until proven otherwise.
-		 */
-		vidshift  = 2;
-		break;
-	case 0x40:
-		headerlen = volt[1];
-		recordlen = volt[2];
-		entries   = volt[3]; /* not a clue what the entries are for.. */
-		vidmask   = volt[11]; /* guess.. */
-		vidshift  = 0;
-		break;
-	default:
-		NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
-		return;
-	}
-
-	/* validate vid mask */
-	voltage->vid_mask = vidmask;
-	if (!voltage->vid_mask)
-		return;
-
-	i = 0;
-	while (vidmask) {
-		if (i > nr_vidtag) {
-			NV_DEBUG(drm, "vid bit %d unknown\n", i);
-			return;
-		}
-
-		if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
-			NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
-			return;
-		}
-
-		vidmask >>= 1;
-		i++;
-	}
-
-	/* parse vbios entries into common format */
-	voltage->version = volt[0];
-	if (voltage->version < 0x40) {
-		voltage->nr_level = entries;
-		voltage->level =
-			kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
-		if (!voltage->level)
-			return;
-
-		entry = volt + headerlen;
-		for (i = 0; i < entries; i++, entry += recordlen) {
-			voltage->level[i].voltage = entry[0] * 10000;
-			voltage->level[i].vid     = entry[1] >> vidshift;
-		}
-	} else {
-		u32 volt_uv = ROM32(volt[4]);
-		s16 step_uv = ROM16(volt[8]);
-		u8 vid;
-
-		voltage->nr_level = voltage->vid_mask + 1;
-		voltage->level = kcalloc(voltage->nr_level,
-					 sizeof(*voltage->level), GFP_KERNEL);
-		if (!voltage->level)
-			return;
-
-		for (vid = 0; vid <= voltage->vid_mask; vid++) {
-			voltage->level[vid].voltage = volt_uv;
-			voltage->level[vid].vid = vid;
-			volt_uv += step_uv;
-		}
-	}
-
-	voltage->supported = true;
-}
-
-void
-nouveau_volt_fini(struct drm_device *dev)
-{
-	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
-
-	kfree(volt->level);
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
deleted file mode 100644
index 27afc0e..0000000
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_reg.h"
-#include "dispnv04/hw.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-int
-nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	int ret;
-
-	ret = nouveau_hw_get_clock(dev, PLL_CORE);
-	if (ret < 0)
-		return ret;
-	perflvl->core = ret;
-
-	ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
-	if (ret < 0)
-		return ret;
-	perflvl->memory = ret;
-
-	return 0;
-}
-
-struct nv04_pm_clock {
-	struct nvbios_pll pll;
-	struct nouveau_pll_vals calc;
-};
-
-struct nv04_pm_state {
-	struct nv04_pm_clock core;
-	struct nv04_pm_clock memory;
-};
-
-static int
-calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	int ret;
-
-	ret = nvbios_pll_parse(bios, id, &clk->pll);
-	if (ret)
-		return ret;
-
-	ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
-	if (!ret)
-		return -EINVAL;
-
-	return 0;
-}
-
-void *
-nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nv04_pm_state *info;
-	int ret;
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
-	if (ret)
-		goto error;
-
-	if (perflvl->memory) {
-		ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
-		if (ret)
-			goto error;
-	}
-
-	return info;
-error:
-	kfree(info);
-	return ERR_PTR(ret);
-}
-
-static void
-prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	u32 reg = clk->pll.reg;
-
-	/* thank the insane nouveau_hw_setpll() interface for this */
-	if (device->card_type >= NV_40)
-		reg += 4;
-
-	pclk->pll_prog(pclk, reg, &clk->calc);
-}
-
-int
-nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_timer *ptimer = nouveau_timer(device);
-	struct nv04_pm_state *state = pre_state;
-
-	prog_pll(dev, &state->core);
-
-	if (state->memory.pll.reg) {
-		prog_pll(dev, &state->memory);
-		if (device->card_type < NV_30) {
-			if (device->card_type == NV_20)
-				nv_mask(device, 0x1002c4, 0, 1 << 20);
-
-			/* Reset the DLLs */
-			nv_mask(device, 0x1002c0, 0, 1 << 8);
-		}
-	}
-
-	nv_ofuncs(ptimer)->init(nv_object(ptimer));
-
-	kfree(state);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
deleted file mode 100644
index 625f80d..0000000
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-#include "dispnv04/hw.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-
-#include <engine/fifo.h>
-
-#define min2(a,b) ((a) < (b) ? (a) : (b))
-
-static u32
-read_pll_1(struct drm_device *dev, u32 reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, reg + 0x00);
-	int P = (ctrl & 0x00070000) >> 16;
-	int N = (ctrl & 0x0000ff00) >> 8;
-	int M = (ctrl & 0x000000ff) >> 0;
-	u32 ref = 27000, clk = 0;
-
-	if (ctrl & 0x80000000)
-		clk = ref * N / M;
-
-	return clk >> P;
-}
-
-static u32
-read_pll_2(struct drm_device *dev, u32 reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, reg + 0x00);
-	u32 coef = nv_rd32(device, reg + 0x04);
-	int N2 = (coef & 0xff000000) >> 24;
-	int M2 = (coef & 0x00ff0000) >> 16;
-	int N1 = (coef & 0x0000ff00) >> 8;
-	int M1 = (coef & 0x000000ff) >> 0;
-	int P = (ctrl & 0x00070000) >> 16;
-	u32 ref = 27000, clk = 0;
-
-	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
-		if ((ctrl & 0x40000100) == 0x40000000) {
-			if (M2)
-				clk = clk * N2 / M2;
-			else
-				clk = 0;
-		}
-	}
-
-	return clk >> P;
-}
-
-static u32
-read_clk(struct drm_device *dev, u32 src)
-{
-	switch (src) {
-	case 3:
-		return read_pll_2(dev, 0x004000);
-	case 2:
-		return read_pll_1(dev, 0x004008);
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-int
-nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, 0x00c040);
-
-	perflvl->core   = read_clk(dev, (ctrl & 0x00000003) >> 0);
-	perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
-	perflvl->memory = read_pll_2(dev, 0x4020);
-	return 0;
-}
-
-struct nv40_pm_state {
-	u32 ctrl;
-	u32 npll_ctrl;
-	u32 npll_coef;
-	u32 spll;
-	u32 mpll_ctrl;
-	u32 mpll_coef;
-};
-
-static int
-nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
-	      u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	struct nouveau_pll_vals coef;
-	int ret;
-
-	ret = nvbios_pll_parse(bios, reg, pll);
-	if (ret)
-		return ret;
-
-	if (clk < pll->vco1.max_freq)
-		pll->vco2.max_freq = 0;
-
-	ret = pclk->pll_calc(pclk, pll, clk, &coef);
-	if (ret == 0)
-		return -ERANGE;
-
-	*N1 = coef.N1;
-	*M1 = coef.M1;
-	if (N2 && M2) {
-		if (pll->vco2.max_freq) {
-			*N2 = coef.N2;
-			*M2 = coef.M2;
-		} else {
-			*N2 = 1;
-			*M2 = 1;
-		}
-	}
-	*log2P = coef.log2P;
-	return 0;
-}
-
-void *
-nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nv40_pm_state *info;
-	struct nvbios_pll pll;
-	int N1, N2, M1, M2, log2P;
-	int ret;
-
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	/* core/geometric clock */
-	ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
-			    &N1, &M1, &N2, &M2, &log2P);
-	if (ret < 0)
-		goto out;
-
-	if (N2 == M2) {
-		info->npll_ctrl = 0x80000100 | (log2P << 16);
-		info->npll_coef = (N1 << 8) | M1;
-	} else {
-		info->npll_ctrl = 0xc0000000 | (log2P << 16);
-		info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
-	}
-
-	/* use the second PLL for shader/rop clock, if it differs from core */
-	if (perflvl->shader && perflvl->shader != perflvl->core) {
-		ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
-				    &N1, &M1, NULL, NULL, &log2P);
-		if (ret < 0)
-			goto out;
-
-		info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
-		info->ctrl = 0x00000223;
-	} else {
-		info->spll = 0x00000000;
-		info->ctrl = 0x00000333;
-	}
-
-	/* memory clock */
-	if (!perflvl->memory) {
-		info->mpll_ctrl = 0x00000000;
-		goto out;
-	}
-
-	ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
-			    &N1, &M1, &N2, &M2, &log2P);
-	if (ret < 0)
-		goto out;
-
-	info->mpll_ctrl  = 0x80000000 | (log2P << 16);
-	info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
-	if (N2 == M2) {
-		info->mpll_ctrl |= 0x00000100;
-		info->mpll_coef  = (N1 << 8) | M1;
-	} else {
-		info->mpll_ctrl |= 0x40000000;
-		info->mpll_coef  = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
-	}
-
-out:
-	if (ret < 0) {
-		kfree(info);
-		info = ERR_PTR(ret);
-	}
-	return info;
-}
-
-static bool
-nv40_pm_gr_idle(void *data)
-{
-	struct drm_device *dev = data;
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
-	    (nv_rd32(device, 0x400760) & 0x0000000f))
-		return false;
-
-	if (nv_rd32(device, 0x400700))
-		return false;
-
-	return true;
-}
-
-int
-nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_fifo *pfifo = nouveau_fifo(device);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv40_pm_state *info = pre_state;
-	unsigned long flags;
-	struct bit_entry M;
-	u32 crtc_mask = 0;
-	u8 sr1[2];
-	int i, ret = -EAGAIN;
-
-	/* determine which CRTCs are active, fetch VGA_SR1 for each */
-	for (i = 0; i < 2; i++) {
-		u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
-		u32 cnt = 0;
-		do {
-			if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
-				nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
-				sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
-				if (!(sr1[i] & 0x20))
-					crtc_mask |= (1 << i);
-				break;
-			}
-			udelay(1);
-		} while (cnt++ < 32);
-	}
-
-	/* halt and idle engines */
-	pfifo->pause(pfifo, &flags);
-
-	if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
-		goto resume;
-
-	ret = 0;
-
-	/* set engine clocks */
-	nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
-	nv_wr32(device, 0x004004, info->npll_coef);
-	nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
-	nv_mask(device, 0x004008, 0xc007ffff, info->spll);
-	mdelay(5);
-	nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
-
-	if (!info->mpll_ctrl)
-		goto resume;
-
-	/* wait for vblank start on active crtcs, disable memory access */
-	for (i = 0; i < 2; i++) {
-		if (!(crtc_mask & (1 << i)))
-			continue;
-		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
-		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
-	}
-
-	/* prepare ram for reclocking */
-	nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
-	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
-	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
-	nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
-	nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
-
-	/* change the PLL of each memory partition */
-	nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
-	switch (nv_device(drm->device)->chipset) {
-	case 0x40:
-	case 0x45:
-	case 0x41:
-	case 0x42:
-	case 0x47:
-		nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
-		nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(device, 0x004048, info->mpll_coef);
-		nv_wr32(device, 0x004030, info->mpll_coef);
-	case 0x43:
-	case 0x49:
-	case 0x4b:
-		nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(device, 0x00403c, info->mpll_coef);
-	default:
-		nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
-		nv_wr32(device, 0x004024, info->mpll_coef);
-		break;
-	}
-	udelay(100);
-	nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
-
-	/* re-enable normal operation of memory controller */
-	nv_wr32(device, 0x1002dc, 0x00000000);
-	nv_mask(device, 0x100210, 0x80000000, 0x80000000);
-	udelay(100);
-
-	/* execute memory reset script from vbios */
-	if (!bit_table(dev, 'M', &M))
-		nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
-
-	/* make sure we're in vblank (hopefully the same one as before), and
-	 * then re-enable crtc memory access
-	 */
-	for (i = 0; i < 2; i++) {
-		if (!(crtc_mask & (1 << i)))
-			continue;
-		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
-	}
-
-	/* resume engines */
-resume:
-	pfifo->start(pfifo, &flags);
-	kfree(info);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
deleted file mode 100644
index 4efc33f..0000000
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ /dev/null
@@ -1,855 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "dispnv04/hw.h"
-#include "nouveau_pm.h"
-#include "nouveau_hwsq.h"
-
-#include "nv50_display.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-enum clk_src {
-	clk_src_crystal,
-	clk_src_href,
-	clk_src_hclk,
-	clk_src_hclkm3,
-	clk_src_hclkm3d2,
-	clk_src_host,
-	clk_src_nvclk,
-	clk_src_sclk,
-	clk_src_mclk,
-	clk_src_vdec,
-	clk_src_dom6
-};
-
-static u32 read_clk(struct drm_device *, enum clk_src);
-
-static u32
-read_div(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	switch (nv_device(drm->device)->chipset) {
-	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
-	case 0x84:
-	case 0x86:
-	case 0x98:
-	case 0xa0:
-		return nv_rd32(device, 0x004700);
-	case 0x92:
-	case 0x94:
-	case 0x96:
-		return nv_rd32(device, 0x004800);
-	default:
-		return 0x00000000;
-	}
-}
-
-static u32
-read_pll_src(struct drm_device *dev, u32 base)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 coef, ref = read_clk(dev, clk_src_crystal);
-	u32 rsel = nv_rd32(device, 0x00e18c);
-	int P, N, M, id;
-
-	switch (nv_device(drm->device)->chipset) {
-	case 0x50:
-	case 0xa0:
-		switch (base) {
-		case 0x4020:
-		case 0x4028: id = !!(rsel & 0x00000004); break;
-		case 0x4008: id = !!(rsel & 0x00000008); break;
-		case 0x4030: id = 0; break;
-		default:
-			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
-			return 0;
-		}
-
-		coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
-		ref *=  (coef & 0x01000000) ? 2 : 4;
-		P    =  (coef & 0x00070000) >> 16;
-		N    = ((coef & 0x0000ff00) >> 8) + 1;
-		M    = ((coef & 0x000000ff) >> 0) + 1;
-		break;
-	case 0x84:
-	case 0x86:
-	case 0x92:
-		coef = nv_rd32(device, 0x00e81c);
-		P    = (coef & 0x00070000) >> 16;
-		N    = (coef & 0x0000ff00) >> 8;
-		M    = (coef & 0x000000ff) >> 0;
-		break;
-	case 0x94:
-	case 0x96:
-	case 0x98:
-		rsel = nv_rd32(device, 0x00c050);
-		switch (base) {
-		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
-		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
-		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
-		case 0x4030: rsel = 3; break;
-		default:
-			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
-			return 0;
-		}
-
-		switch (rsel) {
-		case 0: id = 1; break;
-		case 1: return read_clk(dev, clk_src_crystal);
-		case 2: return read_clk(dev, clk_src_href);
-		case 3: id = 0; break;
-		}
-
-		coef =  nv_rd32(device, 0x00e81c + (id * 0x28));
-		P    = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
-		P   += (coef & 0x00070000) >> 16;
-		N    = (coef & 0x0000ff00) >> 8;
-		M    = (coef & 0x000000ff) >> 0;
-		break;
-	default:
-		BUG_ON(1);
-	}
-
-	if (M)
-		return (ref * N / M) >> P;
-	return 0;
-}
-
-static u32
-read_pll_ref(struct drm_device *dev, u32 base)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 src, mast = nv_rd32(device, 0x00c040);
-
-	switch (base) {
-	case 0x004028:
-		src = !!(mast & 0x00200000);
-		break;
-	case 0x004020:
-		src = !!(mast & 0x00400000);
-		break;
-	case 0x004008:
-		src = !!(mast & 0x00010000);
-		break;
-	case 0x004030:
-		src = !!(mast & 0x02000000);
-		break;
-	case 0x00e810:
-		return read_clk(dev, clk_src_crystal);
-	default:
-		NV_ERROR(drm, "bad pll 0x%06x\n", base);
-		return 0;
-	}
-
-	if (src)
-		return read_clk(dev, clk_src_href);
-	return read_pll_src(dev, base);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 base)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 mast = nv_rd32(device, 0x00c040);
-	u32 ctrl = nv_rd32(device, base + 0);
-	u32 coef = nv_rd32(device, base + 4);
-	u32 ref = read_pll_ref(dev, base);
-	u32 clk = 0;
-	int N1, N2, M1, M2;
-
-	if (base == 0x004028 && (mast & 0x00100000)) {
-		/* wtf, appears to only disable post-divider on nva0 */
-		if (nv_device(drm->device)->chipset != 0xa0)
-			return read_clk(dev, clk_src_dom6);
-	}
-
-	N2 = (coef & 0xff000000) >> 24;
-	M2 = (coef & 0x00ff0000) >> 16;
-	N1 = (coef & 0x0000ff00) >> 8;
-	M1 = (coef & 0x000000ff);
-	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
-		if ((ctrl & 0x40000100) == 0x40000000) {
-			if (M2)
-				clk = clk * N2 / M2;
-			else
-				clk = 0;
-		}
-	}
-
-	return clk;
-}
-
-static u32
-read_clk(struct drm_device *dev, enum clk_src src)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 mast = nv_rd32(device, 0x00c040);
-	u32 P = 0;
-
-	switch (src) {
-	case clk_src_crystal:
-		return device->crystal;
-	case clk_src_href:
-		return 100000; /* PCIE reference clock */
-	case clk_src_hclk:
-		return read_clk(dev, clk_src_href) * 27778 / 10000;
-	case clk_src_hclkm3:
-		return read_clk(dev, clk_src_hclk) * 3;
-	case clk_src_hclkm3d2:
-		return read_clk(dev, clk_src_hclk) * 3 / 2;
-	case clk_src_host:
-		switch (mast & 0x30000000) {
-		case 0x00000000: return read_clk(dev, clk_src_href);
-		case 0x10000000: break;
-		case 0x20000000: /* !0x50 */
-		case 0x30000000: return read_clk(dev, clk_src_hclk);
-		}
-		break;
-	case clk_src_nvclk:
-		if (!(mast & 0x00100000))
-			P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
-		switch (mast & 0x00000003) {
-		case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
-		case 0x00000001: return read_clk(dev, clk_src_dom6);
-		case 0x00000002: return read_pll(dev, 0x004020) >> P;
-		case 0x00000003: return read_pll(dev, 0x004028) >> P;
-		}
-		break;
-	case clk_src_sclk:
-		P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
-		switch (mast & 0x00000030) {
-		case 0x00000000:
-			if (mast & 0x00000080)
-				return read_clk(dev, clk_src_host) >> P;
-			return read_clk(dev, clk_src_crystal) >> P;
-		case 0x00000010: break;
-		case 0x00000020: return read_pll(dev, 0x004028) >> P;
-		case 0x00000030: return read_pll(dev, 0x004020) >> P;
-		}
-		break;
-	case clk_src_mclk:
-		P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
-		if (nv_rd32(device, 0x004008) & 0x00000200) {
-			switch (mast & 0x0000c000) {
-			case 0x00000000:
-				return read_clk(dev, clk_src_crystal) >> P;
-			case 0x00008000:
-			case 0x0000c000:
-				return read_clk(dev, clk_src_href) >> P;
-			}
-		} else {
-			return read_pll(dev, 0x004008) >> P;
-		}
-		break;
-	case clk_src_vdec:
-		P = (read_div(dev) & 0x00000700) >> 8;
-		switch (nv_device(drm->device)->chipset) {
-		case 0x84:
-		case 0x86:
-		case 0x92:
-		case 0x94:
-		case 0x96:
-		case 0xa0:
-			switch (mast & 0x00000c00) {
-			case 0x00000000:
-				if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
-					return read_clk(dev, clk_src_nvclk) >> P;
-				return read_clk(dev, clk_src_crystal) >> P;
-			case 0x00000400:
-				return 0;
-			case 0x00000800:
-				if (mast & 0x01000000)
-					return read_pll(dev, 0x004028) >> P;
-				return read_pll(dev, 0x004030) >> P;
-			case 0x00000c00:
-				return read_clk(dev, clk_src_nvclk) >> P;
-			}
-			break;
-		case 0x98:
-			switch (mast & 0x00000c00) {
-			case 0x00000000:
-				return read_clk(dev, clk_src_nvclk) >> P;
-			case 0x00000400:
-				return 0;
-			case 0x00000800:
-				return read_clk(dev, clk_src_hclkm3d2) >> P;
-			case 0x00000c00:
-				return read_clk(dev, clk_src_mclk) >> P;
-			}
-			break;
-		}
-		break;
-	case clk_src_dom6:
-		switch (nv_device(drm->device)->chipset) {
-		case 0x50:
-		case 0xa0:
-			return read_pll(dev, 0x00e810) >> 2;
-		case 0x84:
-		case 0x86:
-		case 0x92:
-		case 0x94:
-		case 0x96:
-		case 0x98:
-			P = (read_div(dev) & 0x00000007) >> 0;
-			switch (mast & 0x0c000000) {
-			case 0x00000000: return read_clk(dev, clk_src_href);
-			case 0x04000000: break;
-			case 0x08000000: return read_clk(dev, clk_src_hclk);
-			case 0x0c000000:
-				return read_clk(dev, clk_src_hclkm3) >> P;
-			}
-			break;
-		default:
-			break;
-		}
-	default:
-		break;
-	}
-
-	NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
-	return 0;
-}
-
-int
-nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	if (nv_device(drm->device)->chipset == 0xaa ||
-	    nv_device(drm->device)->chipset == 0xac)
-		return 0;
-
-	perflvl->core   = read_clk(dev, clk_src_nvclk);
-	perflvl->shader = read_clk(dev, clk_src_sclk);
-	perflvl->memory = read_clk(dev, clk_src_mclk);
-	if (nv_device(drm->device)->chipset != 0x50) {
-		perflvl->vdec = read_clk(dev, clk_src_vdec);
-		perflvl->dom6 = read_clk(dev, clk_src_dom6);
-	}
-
-	return 0;
-}
-
-struct nv50_pm_state {
-	struct nouveau_pm_level *perflvl;
-	struct hwsq_ucode eclk_hwsq;
-	struct hwsq_ucode mclk_hwsq;
-	u32 mscript;
-	u32 mmast;
-	u32 mctrl;
-	u32 mcoef;
-};
-
-static u32
-calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
-	 u32 clk, int *N1, int *M1, int *log2P)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nouveau_clock *pclk = nouveau_clock(device);
-	struct nouveau_pll_vals coef;
-	int ret;
-
-	ret = nvbios_pll_parse(bios, reg, pll);
-	if (ret)
-		return 0;
-
-	pll->vco2.max_freq = 0;
-	pll->refclk = read_pll_ref(dev, reg);
-	if (!pll->refclk)
-		return 0;
-
-	ret = pclk->pll_calc(pclk, pll, clk, &coef);
-	if (ret == 0)
-		return 0;
-
-	*N1 = coef.N1;
-	*M1 = coef.M1;
-	*log2P = coef.log2P;
-	return ret;
-}
-
-static inline u32
-calc_div(u32 src, u32 target, int *div)
-{
-	u32 clk0 = src, clk1 = src;
-	for (*div = 0; *div <= 7; (*div)++) {
-		if (clk0 <= target) {
-			clk1 = clk0 << (*div ? 1 : 0);
-			break;
-		}
-		clk0 >>= 1;
-	}
-
-	if (target - clk0 <= clk1 - target)
-		return clk0;
-	(*div)--;
-	return clk1;
-}
-
-static inline u32
-clk_same(u32 a, u32 b)
-{
-	return ((a / 1000) == (b / 1000));
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	if (nsec > 1000)
-		hwsq_usec(hwsq, (nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	if (mr <= 1)
-		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
-	if (mr <= 3)
-		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
-	return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-
-	if (mr <= 1) {
-		if (pfb->ram->ranks > 1)
-			hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
-		hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
-	} else
-	if (mr <= 3) {
-		if (pfb->ram->ranks > 1)
-			hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
-		hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
-	}
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nv50_pm_state *info = exec->priv;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	u32 ctrl = nv_rd32(device, 0x004008);
-
-	info->mmast = nv_rd32(device, 0x00c040);
-	info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
-	info->mmast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
-
-	hwsq_wr32(hwsq, 0xc040, info->mmast);
-	hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
-	if (info->mctrl & 0x80000000)
-		hwsq_wr32(hwsq, 0x400c, info->mcoef);
-	hwsq_wr32(hwsq, 0x4008, info->mctrl);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nv50_pm_state *info = exec->priv;
-	struct nouveau_pm_level *perflvl = info->perflvl;
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	int i;
-
-	for (i = 0; i < 9; i++) {
-		u32 reg = 0x100220 + (i * 4);
-		u32 val = nv_rd32(device, reg);
-		if (val != perflvl->timing.reg[i])
-			hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
-	}
-}
-
-static int
-calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
-	  struct nv50_pm_state *info)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
-	struct nouveau_mem_exec_func exec = {
-		.dev = dev,
-		.precharge = mclk_precharge,
-		.refresh = mclk_refresh,
-		.refresh_auto = mclk_refresh_auto,
-		.refresh_self = mclk_refresh_self,
-		.wait = mclk_wait,
-		.mrg = mclk_mrg,
-		.mrs = mclk_mrs,
-		.clock_set = mclk_clock_set,
-		.timing_set = mclk_timing_set,
-		.priv = info
-	};
-	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
-	struct nvbios_pll pll;
-	int N, M, P;
-	int ret;
-
-	/* use pcie refclock if possible, otherwise use mpll */
-	info->mctrl  = nv_rd32(device, 0x004008);
-	info->mctrl &= ~0x81ff0200;
-	if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
-		info->mctrl |= 0x00000200 | (pll.bias_p << 19);
-	} else {
-		ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
-		if (ret == 0)
-			return -EINVAL;
-
-		info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
-		info->mctrl |= pll.bias_p << 19;
-		info->mcoef  = (N << 8) | M;
-	}
-
-	/* build the ucode which will reclock the memory for us */
-	hwsq_init(hwsq);
-	if (crtc_mask) {
-		hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
-		hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
-	}
-	if (nv_device(drm->device)->chipset >= 0x92)
-		hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
-	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
-
-	ret = nouveau_mem_exec(&exec, perflvl);
-	if (ret)
-		return ret;
-
-	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
-	if (nv_device(drm->device)->chipset >= 0x92)
-		hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
-	hwsq_fini(hwsq);
-	return 0;
-}
-
-void *
-nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nv50_pm_state *info;
-	struct hwsq_ucode *hwsq;
-	struct nvbios_pll pll;
-	u32 out, mast, divs, ctrl;
-	int clk, ret = -EINVAL;
-	int N, M, P1, P2;
-
-	if (nv_device(drm->device)->chipset == 0xaa ||
-	    nv_device(drm->device)->chipset == 0xac)
-		return ERR_PTR(-ENODEV);
-
-	info = kmalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-	info->perflvl = perflvl;
-
-	/* memory: build hwsq ucode which we'll use to reclock memory.
-	 *         use pcie refclock if possible, otherwise use mpll */
-	info->mclk_hwsq.len = 0;
-	if (perflvl->memory) {
-		ret = calc_mclk(dev, perflvl, info);
-		if (ret)
-			goto error;
-		info->mscript = perflvl->memscript;
-	}
-
-	divs = read_div(dev);
-	mast = info->mmast;
-
-	/* start building HWSQ script for engine reclocking */
-	hwsq = &info->eclk_hwsq;
-	hwsq_init(hwsq);
-	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
-
-	/* vdec/dom6: switch to "safe" clocks temporarily */
-	if (perflvl->vdec) {
-		mast &= ~0x00000c00;
-		divs &= ~0x00000700;
-	}
-
-	if (perflvl->dom6) {
-		mast &= ~0x0c000000;
-		divs &= ~0x00000007;
-	}
-
-	hwsq_wr32(hwsq, 0x00c040, mast);
-
-	/* vdec: avoid modifying xpll until we know exactly how the other
-	 * clock domains work, i suspect at least some of them can also be
-	 * tied to xpll...
-	 */
-	if (perflvl->vdec) {
-		/* see how close we can get using nvclk as a source */
-		clk = calc_div(perflvl->core, perflvl->vdec, &P1);
-
-		/* see how close we can get using xpll/hclk as a source */
-		if (nv_device(drm->device)->chipset != 0x98)
-			out = read_pll(dev, 0x004030);
-		else
-			out = read_clk(dev, clk_src_hclkm3d2);
-		out = calc_div(out, perflvl->vdec, &P2);
-
-		/* select whichever gets us closest */
-		if (abs((int)perflvl->vdec - clk) <=
-		    abs((int)perflvl->vdec - out)) {
-			if (nv_device(drm->device)->chipset != 0x98)
-				mast |= 0x00000c00;
-			divs |= P1 << 8;
-		} else {
-			mast |= 0x00000800;
-			divs |= P2 << 8;
-		}
-	}
-
-	/* dom6: nfi what this is, but we're limited to various combinations
-	 * of the host clock frequency
-	 */
-	if (perflvl->dom6) {
-		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
-			mast |= 0x00000000;
-		} else
-		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
-			mast |= 0x08000000;
-		} else {
-			clk = read_clk(dev, clk_src_hclk) * 3;
-			clk = calc_div(clk, perflvl->dom6, &P1);
-
-			mast |= 0x0c000000;
-			divs |= P1;
-		}
-	}
-
-	/* vdec/dom6: complete switch to new clocks */
-	switch (nv_device(drm->device)->chipset) {
-	case 0x92:
-	case 0x94:
-	case 0x96:
-		hwsq_wr32(hwsq, 0x004800, divs);
-		break;
-	default:
-		hwsq_wr32(hwsq, 0x004700, divs);
-		break;
-	}
-
-	hwsq_wr32(hwsq, 0x00c040, mast);
-
-	/* core/shader: make sure sclk/nvclk are disconnected from their
-	 * PLLs (nvclk to dom6, sclk to hclk)
-	 */
-	if (nv_device(drm->device)->chipset < 0x92)
-		mast = (mast & ~0x001000b0) | 0x00100080;
-	else
-		mast = (mast & ~0x000000b3) | 0x00000081;
-
-	hwsq_wr32(hwsq, 0x00c040, mast);
-
-	/* core: for the moment at least, always use nvpll */
-	clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
-	if (clk == 0)
-		goto error;
-
-	ctrl  = nv_rd32(device, 0x004028) & ~0xc03f0100;
-	mast &= ~0x00100000;
-	mast |= 3;
-
-	hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
-	hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
-
-	/* shader: tie to nvclk if possible, otherwise use spll.  have to be
-	 * very careful that the shader clock is at least twice the core, or
-	 * some chipsets will be very unhappy.  i expect most or all of these
-	 * cases will be handled by tying to nvclk, but it's possible there's
-	 * corners
-	 */
-	ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
-
-	if (P1-- && perflvl->shader == (perflvl->core << 1)) {
-		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
-		hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
-	} else {
-		clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
-		if (clk == 0)
-			goto error;
-		ctrl |= 0x80000000;
-
-		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
-		hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
-		hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
-	}
-
-	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
-	hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
-	hwsq_fini(hwsq);
-
-	return info;
-error:
-	kfree(info);
-	return ERR_PTR(ret);
-}
-
-static int
-prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 hwsq_data, hwsq_kick;
-	int i;
-
-	if (nv_device(drm->device)->chipset < 0x94) {
-		hwsq_data = 0x001400;
-		hwsq_kick = 0x00000003;
-	} else {
-		hwsq_data = 0x080000;
-		hwsq_kick = 0x00000001;
-	}
-	/* upload hwsq ucode */
-	nv_mask(device, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(device, 0x001304, 0x00000000);
-	if (nv_device(drm->device)->chipset >= 0x92)
-		nv_wr32(device, 0x001318, 0x00000000);
-	for (i = 0; i < hwsq->len / 4; i++)
-		nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
-	nv_mask(device, 0x001098, 0x00000018, 0x00000018);
-
-	/* launch, and wait for completion */
-	nv_wr32(device, 0x00130c, hwsq_kick);
-	if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
-		NV_ERROR(drm, "hwsq ucode exec timed out\n");
-		NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
-		for (i = 0; i < hwsq->len / 4; i++) {
-			NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
-				 nv_rd32(device, 0x001400 + (i * 4)));
-		}
-
-		return -EIO;
-	}
-
-	return 0;
-}
-
-int
-nv50_pm_clocks_set(struct drm_device *dev, void *data)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nv50_pm_state *info = data;
-	struct bit_entry M;
-	int ret = -EBUSY;
-
-	/* halt and idle execution engines */
-	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
-	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
-		goto resume;
-	if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
-		goto resume;
-
-	/* program memory clock, if necessary - must come before engine clock
-	 * reprogramming due to how we construct the hwsq scripts in pre()
-	 */
-#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
-	if (info->mclk_hwsq.len) {
-		/* execute some scripts that do ??? from the vbios.. */
-		if (!bit_table(dev, 'M', &M) && M.version == 1) {
-			if (M.length >= 6)
-				nouveau_bios_init_exec(dev, ROM16(M.data[5]));
-			if (M.length >= 8)
-				nouveau_bios_init_exec(dev, ROM16(M.data[7]));
-			if (M.length >= 10)
-				nouveau_bios_init_exec(dev, ROM16(M.data[9]));
-			nouveau_bios_init_exec(dev, info->mscript);
-		}
-
-		ret = prog_hwsq(dev, &info->mclk_hwsq);
-		if (ret)
-			goto resume;
-	}
-
-	/* program engine clocks */
-	ret = prog_hwsq(dev, &info->eclk_hwsq);
-
-resume:
-	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
-	kfree(info);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
deleted file mode 100644
index 0d0ed59..0000000
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/bios.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32 read_clk(struct drm_device *, int, bool);
-static u32 read_pll(struct drm_device *, int, u32);
-
-static u32
-read_vco(struct drm_device *dev, int clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
-	if ((sctl & 0x00000030) != 0x00000030)
-		return read_pll(dev, 0x41, 0x00e820);
-	return read_pll(dev, 0x42, 0x00e8a0);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk, bool ignore_en)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	u32 sctl, sdiv, sclk;
-
-	/* refclk for the 0xe8xx plls is a fixed frequency */
-	if (clk >= 0x40) {
-		if (nv_device(drm->device)->chipset == 0xaf) {
-			/* no joke.. seriously.. sigh.. */
-			return nv_rd32(device, 0x00471c) * 1000;
-		}
-
-		return device->crystal;
-	}
-
-	sctl = nv_rd32(device, 0x4120 + (clk * 4));
-	if (!ignore_en && !(sctl & 0x00000100))
-		return 0;
-
-	switch (sctl & 0x00003000) {
-	case 0x00000000:
-		return device->crystal;
-	case 0x00002000:
-		if (sctl & 0x00000040)
-			return 108000;
-		return 100000;
-	case 0x00003000:
-		sclk = read_vco(dev, clk);
-		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
-		return (sclk * 2) / sdiv;
-	default:
-		return 0;
-	}
-}
-
-static u32
-read_pll(struct drm_device *dev, int clk, u32 pll)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, pll + 0);
-	u32 sclk = 0, P = 1, N = 1, M = 1;
-
-	if (!(ctrl & 0x00000008)) {
-		if (ctrl & 0x00000001) {
-			u32 coef = nv_rd32(device, pll + 4);
-			M = (coef & 0x000000ff) >> 0;
-			N = (coef & 0x0000ff00) >> 8;
-			P = (coef & 0x003f0000) >> 16;
-
-			/* no post-divider on these.. */
-			if ((pll & 0x00ff00) == 0x00e800)
-				P = 1;
-
-			sclk = read_clk(dev, 0x00 + clk, false);
-		}
-	} else {
-		sclk = read_clk(dev, 0x10 + clk, false);
-	}
-
-	if (M * P)
-		return sclk * N / (M * P);
-	return 0;
-}
-
-struct creg {
-	u32 clk;
-	u32 pll;
-};
-
-static int
-calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
-{
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nvbios_pll limits;
-	u32 oclk, sclk, sdiv;
-	int P, N, M, diff;
-	int ret;
-
-	reg->pll = 0;
-	reg->clk = 0;
-	if (!khz) {
-		NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
-		return 0;
-	}
-
-	switch (khz) {
-	case 27000:
-		reg->clk = 0x00000100;
-		return khz;
-	case 100000:
-		reg->clk = 0x00002100;
-		return khz;
-	case 108000:
-		reg->clk = 0x00002140;
-		return khz;
-	default:
-		sclk = read_vco(dev, clk);
-		sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
-		/* if the clock has a PLL attached, and we can get a within
-		 * [-2, 3) MHz of a divider, we'll disable the PLL and use
-		 * the divider instead.
-		 *
-		 * divider can go as low as 2, limited here because NVIDIA
-		 * and the VBIOS on my NVA8 seem to prefer using the PLL
-		 * for 810MHz - is there a good reason?
-		 */
-		if (sdiv > 4) {
-			oclk = (sclk * 2) / sdiv;
-			diff = khz - oclk;
-			if (!pll || (diff >= -2000 && diff < 3000)) {
-				reg->clk = (((sdiv - 2) << 16) | 0x00003100);
-				return oclk;
-			}
-		}
-
-		if (!pll) {
-			NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
-			return -ERANGE;
-		}
-
-		break;
-	}
-
-	ret = nvbios_pll_parse(bios, pll, &limits);
-	if (ret)
-		return ret;
-
-	limits.refclk = read_clk(dev, clk - 0x10, true);
-	if (!limits.refclk)
-		return -EINVAL;
-
-	ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
-	if (ret >= 0) {
-		reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
-		reg->pll = (P << 16) | (N << 8) | M;
-	}
-
-	return ret;
-}
-
-static void
-prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	const u32 src0 = 0x004120 + (clk * 4);
-	const u32 src1 = 0x004160 + (clk * 4);
-	const u32 ctrl = pll + 0;
-	const u32 coef = pll + 4;
-
-	if (!reg->clk && !reg->pll) {
-		NV_DEBUG(drm, "no clock for %02x\n", clk);
-		return;
-	}
-
-	if (reg->pll) {
-		nv_mask(device, src0, 0x00000101, 0x00000101);
-		nv_wr32(device, coef, reg->pll);
-		nv_mask(device, ctrl, 0x00000015, 0x00000015);
-		nv_mask(device, ctrl, 0x00000010, 0x00000000);
-		nv_wait(device, ctrl, 0x00020000, 0x00020000);
-		nv_mask(device, ctrl, 0x00000010, 0x00000010);
-		nv_mask(device, ctrl, 0x00000008, 0x00000000);
-		nv_mask(device, src1, 0x00000100, 0x00000000);
-		nv_mask(device, src1, 0x00000001, 0x00000000);
-	} else {
-		nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
-		nv_mask(device, ctrl, 0x00000018, 0x00000018);
-		udelay(20);
-		nv_mask(device, ctrl, 0x00000001, 0x00000000);
-		nv_mask(device, src0, 0x00000100, 0x00000000);
-		nv_mask(device, src0, 0x00000001, 0x00000000);
-	}
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct creg *reg)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-
-	if (!reg->clk) {
-		NV_DEBUG(drm, "no clock for %02x\n", clk);
-		return;
-	}
-
-	nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
-}
-
-int
-nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	perflvl->core   = read_pll(dev, 0x00, 0x4200);
-	perflvl->shader = read_pll(dev, 0x01, 0x4220);
-	perflvl->memory = read_pll(dev, 0x02, 0x4000);
-	perflvl->unka0  = read_clk(dev, 0x20, false);
-	perflvl->vdec   = read_clk(dev, 0x21, false);
-	perflvl->daemon = read_clk(dev, 0x25, false);
-	perflvl->copy   = perflvl->core;
-	return 0;
-}
-
-struct nva3_pm_state {
-	struct nouveau_pm_level *perflvl;
-
-	struct creg nclk;
-	struct creg sclk;
-	struct creg vdec;
-	struct creg unka0;
-
-	struct creg mclk;
-	u8 *rammap;
-	u8  rammap_ver;
-	u8  rammap_len;
-	u8 *ramcfg;
-	u8  ramcfg_len;
-	u32 r004018;
-	u32 r100760;
-};
-
-void *
-nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nva3_pm_state *info;
-	u8 ramcfg_cnt;
-	int ret;
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
-	if (ret < 0)
-		goto out;
-
-	ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
-	if (ret < 0)
-		goto out;
-
-	info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
-					   &info->rammap_ver,
-					   &info->rammap_len,
-					   &ramcfg_cnt, &info->ramcfg_len);
-	if (info->rammap_ver != 0x10 || info->rammap_len < 5)
-		info->rammap = NULL;
-
-	info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
-					   &info->rammap_ver,
-					   &info->ramcfg_len);
-	if (info->rammap_ver != 0x10)
-		info->ramcfg = NULL;
-
-	info->perflvl = perflvl;
-out:
-	if (ret < 0) {
-		kfree(info);
-		info = ERR_PTR(ret);
-	}
-	return info;
-}
-
-static bool
-nva3_pm_grcp_idle(void *data)
-{
-	struct drm_device *dev = data;
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	if (!(nv_rd32(device, 0x400304) & 0x00000001))
-		return true;
-	if (nv_rd32(device, 0x400308) == 0x0050001c)
-		return true;
-	return false;
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x1002d4, 0x00000001);
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x1002d0, 0x00000001);
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	volatile u32 post = nv_rd32(device, 0); (void)post;
-	udelay((nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	if (mr <= 1)
-		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
-	if (mr <= 3)
-		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
-	return 0;
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	if (mr <= 1) {
-		if (pfb->ram->ranks > 1)
-			nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
-		nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
-	} else
-	if (mr <= 3) {
-		if (pfb->ram->ranks > 1)
-			nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
-		nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
-	}
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nva3_pm_state *info = exec->priv;
-	u32 ctrl;
-
-	ctrl = nv_rd32(device, 0x004000);
-	if (!(ctrl & 0x00000008) && info->mclk.pll) {
-		nv_wr32(device, 0x004000, (ctrl |=  0x00000008));
-		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
-		nv_wr32(device, 0x004018, 0x00001000);
-		nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
-		nv_wr32(device, 0x004004, info->mclk.pll);
-		nv_wr32(device, 0x004000, (ctrl |=  0x00000001));
-		udelay(64);
-		nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
-		udelay(20);
-	} else
-	if (!info->mclk.pll) {
-		nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
-		nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
-		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
-		nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
-	}
-
-	if (info->rammap) {
-		if (info->ramcfg && (info->rammap[4] & 0x08)) {
-			u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
-				      info->ramcfg[5];
-			u32 unk5a4 = ROM16(info->ramcfg[7]);
-			u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
-				     (info->ramcfg[3] & 0x0f) << 16 |
-				     (info->ramcfg[9] & 0x0f) |
-				     0x80000000;
-			nv_wr32(device, 0x1005a0, unk5a0);
-			nv_wr32(device, 0x1005a4, unk5a4);
-			nv_wr32(device, 0x10f804, unk804);
-			nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
-		} else {
-			nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
-			nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
-			nv_mask(device, 0x100760, 0x22222222, info->r100760);
-			nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
-			nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
-		}
-	}
-
-	if (info->mclk.pll) {
-		nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
-		nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
-	}
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nva3_pm_state *info = exec->priv;
-	struct nouveau_pm_level *perflvl = info->perflvl;
-	int i;
-
-	for (i = 0; i < 9; i++)
-		nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
-
-	if (info->ramcfg) {
-		u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
-		nv_mask(device, 0x100200, 0x00001000, data);
-	}
-
-	if (info->ramcfg) {
-		u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
-		u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
-		u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
-		if ( (info->ramcfg[2] & 0x20))
-			unk714 |= 0xf0000000;
-		if (!(info->ramcfg[2] & 0x04))
-			unk714 |= 0x00000010;
-		nv_wr32(device, 0x100714, unk714);
-
-		if (info->ramcfg[2] & 0x01)
-			unk71c |= 0x00000100;
-		nv_wr32(device, 0x10071c, unk71c);
-
-		if (info->ramcfg[2] & 0x02)
-			unk718 |= 0x00000100;
-		nv_wr32(device, 0x100718, unk718);
-
-		if (info->ramcfg[2] & 0x10)
-			nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
-	}
-}
-
-static void
-prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_mem_exec_func exec = {
-		.dev = dev,
-		.precharge = mclk_precharge,
-		.refresh = mclk_refresh,
-		.refresh_auto = mclk_refresh_auto,
-		.refresh_self = mclk_refresh_self,
-		.wait = mclk_wait,
-		.mrg = mclk_mrg,
-		.mrs = mclk_mrs,
-		.clock_set = mclk_clock_set,
-		.timing_set = mclk_timing_set,
-		.priv = info
-	};
-	u32 ctrl;
-
-	/* XXX: where the fuck does 750MHz come from? */
-	if (info->perflvl->memory <= 750000) {
-		info->r004018 = 0x10000000;
-		info->r100760 = 0x22222222;
-	}
-
-	ctrl = nv_rd32(device, 0x004000);
-	if (ctrl & 0x00000008) {
-		if (info->mclk.pll) {
-			nv_mask(device, 0x004128, 0x00000101, 0x00000101);
-			nv_wr32(device, 0x004004, info->mclk.pll);
-			nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
-			nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
-			nv_wait(device, 0x004000, 0x00020000, 0x00020000);
-			nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
-			nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
-			nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
-		}
-	} else {
-		u32 ssel = 0x00000101;
-		if (info->mclk.clk)
-			ssel |= info->mclk.clk;
-		else
-			ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
-		nv_mask(device, 0x004168, 0x003f3141, ctrl);
-	}
-
-	if (info->ramcfg) {
-		if (info->ramcfg[2] & 0x10) {
-			nv_mask(device, 0x111104, 0x00000600, 0x00000000);
-		} else {
-			nv_mask(device, 0x111100, 0x40000000, 0x40000000);
-			nv_mask(device, 0x111104, 0x00000180, 0x00000000);
-		}
-	}
-	if (info->rammap && !(info->rammap[4] & 0x02))
-		nv_mask(device, 0x100200, 0x00000800, 0x00000000);
-	nv_wr32(device, 0x611200, 0x00003300);
-	if (!(info->ramcfg[2] & 0x10))
-		nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
-
-	nouveau_mem_exec(&exec, info->perflvl);
-
-	nv_wr32(device, 0x611200, 0x00003330);
-	if (info->rammap && (info->rammap[4] & 0x02))
-		nv_mask(device, 0x100200, 0x00000800, 0x00000800);
-	if (info->ramcfg) {
-		if (info->ramcfg[2] & 0x10) {
-			nv_mask(device, 0x111104, 0x00000180, 0x00000180);
-			nv_mask(device, 0x111100, 0x40000000, 0x00000000);
-		} else {
-			nv_mask(device, 0x111104, 0x00000600, 0x00000600);
-		}
-	}
-
-	if (info->mclk.pll) {
-		nv_mask(device, 0x004168, 0x00000001, 0x00000000);
-		nv_mask(device, 0x004168, 0x00000100, 0x00000000);
-	} else {
-		nv_mask(device, 0x004000, 0x00000001, 0x00000000);
-		nv_mask(device, 0x004128, 0x00000001, 0x00000000);
-		nv_mask(device, 0x004128, 0x00000100, 0x00000000);
-	}
-}
-
-int
-nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nva3_pm_state *info = pre_state;
-	int ret = -EAGAIN;
-
-	/* prevent any new grctx switches from starting */
-	nv_wr32(device, 0x400324, 0x00000000);
-	nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
-	/* wait for any pending grctx switches to complete */
-	if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
-		NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
-		goto cleanup;
-	}
-	/* freeze PFIFO */
-	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
-	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
-		NV_ERROR(drm, "pm: fifo didn't go idle\n");
-		goto cleanup;
-	}
-
-	prog_pll(dev, 0x00, 0x004200, &info->nclk);
-	prog_pll(dev, 0x01, 0x004220, &info->sclk);
-	prog_clk(dev, 0x20, &info->unka0);
-	prog_clk(dev, 0x21, &info->vdec);
-
-	if (info->mclk.clk || info->mclk.pll)
-		prog_mem(dev, info);
-
-	ret = 0;
-
-cleanup:
-	/* unfreeze PFIFO */
-	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
-	/* restore ctxprog to normal */
-	nv_wr32(device, 0x400324, 0x00000000);
-	nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
-	/* unblock it if necessary */
-	if (nv_rd32(device, 0x400308) == 0x0050001c)
-		nv_mask(device, 0x400824, 0x10000000, 0x10000000);
-	kfree(info);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
deleted file mode 100644
index 3b7041c..0000000
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ /dev/null
@@ -1,599 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "nouveau_drm.h"
-#include "nouveau_bios.h"
-#include "nouveau_pm.h"
-
-#include <subdev/bios/pll.h>
-#include <subdev/bios.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32 read_div(struct drm_device *, int, u32, u32);
-static u32 read_pll(struct drm_device *, u32);
-
-static u32
-read_vco(struct drm_device *dev, u32 dsrc)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ssrc = nv_rd32(device, dsrc);
-	if (!(ssrc & 0x00000100))
-		return read_pll(dev, 0x00e800);
-	return read_pll(dev, 0x00e820);
-}
-
-static u32
-read_pll(struct drm_device *dev, u32 pll)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ctrl = nv_rd32(device, pll + 0);
-	u32 coef = nv_rd32(device, pll + 4);
-	u32 P = (coef & 0x003f0000) >> 16;
-	u32 N = (coef & 0x0000ff00) >> 8;
-	u32 M = (coef & 0x000000ff) >> 0;
-	u32 sclk, doff;
-
-	if (!(ctrl & 0x00000001))
-		return 0;
-
-	switch (pll & 0xfff000) {
-	case 0x00e000:
-		sclk = 27000;
-		P = 1;
-		break;
-	case 0x137000:
-		doff = (pll - 0x137000) / 0x20;
-		sclk = read_div(dev, doff, 0x137120, 0x137140);
-		break;
-	case 0x132000:
-		switch (pll) {
-		case 0x132000:
-			sclk = read_pll(dev, 0x132020);
-			break;
-		case 0x132020:
-			sclk = read_div(dev, 0, 0x137320, 0x137330);
-			break;
-		default:
-			return 0;
-		}
-		break;
-	default:
-		return 0;
-	}
-
-	return sclk * N / M / P;
-}
-
-static u32
-read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(device, dctl + (doff * 4));
-
-	switch (ssrc & 0x00000003) {
-	case 0:
-		if ((ssrc & 0x00030000) != 0x00030000)
-			return 27000;
-		return 108000;
-	case 2:
-		return 100000;
-	case 3:
-		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(dev, dsrc + (doff * 4));
-			u32 sdiv = (sctl & 0x0000003f) + 2;
-			return (sclk * 2) / sdiv;
-		}
-
-		return read_vco(dev, dsrc + (doff * 4));
-	default:
-		return 0;
-	}
-}
-
-static u32
-read_mem(struct drm_device *dev)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 ssel = nv_rd32(device, 0x1373f0);
-	if (ssel & 0x00000001)
-		return read_div(dev, 0, 0x137300, 0x137310);
-	return read_pll(dev, 0x132000);
-}
-
-static u32
-read_clk(struct drm_device *dev, int clk)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
-	u32 ssel = nv_rd32(device, 0x137100);
-	u32 sclk, sdiv;
-
-	if (ssel & (1 << clk)) {
-		if (clk < 7)
-			sclk = read_pll(dev, 0x137000 + (clk * 0x20));
-		else
-			sclk = read_pll(dev, 0x1370e0);
-		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
-	} else {
-		sclk = read_div(dev, clk, 0x137160, 0x1371d0);
-		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
-	}
-
-	if (sctl & 0x80000000)
-		return (sclk * 2) / sdiv;
-	return sclk;
-}
-
-int
-nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	perflvl->shader = read_clk(dev, 0x00);
-	perflvl->core   = perflvl->shader / 2;
-	perflvl->memory = read_mem(dev);
-	perflvl->rop    = read_clk(dev, 0x01);
-	perflvl->hub07  = read_clk(dev, 0x02);
-	perflvl->hub06  = read_clk(dev, 0x07);
-	perflvl->hub01  = read_clk(dev, 0x08);
-	perflvl->copy   = read_clk(dev, 0x09);
-	perflvl->daemon = read_clk(dev, 0x0c);
-	perflvl->vdec   = read_clk(dev, 0x0e);
-	return 0;
-}
-
-struct nvc0_pm_clock {
-	u32 freq;
-	u32 ssel;
-	u32 mdiv;
-	u32 dsrc;
-	u32 ddiv;
-	u32 coef;
-};
-
-struct nvc0_pm_state {
-	struct nouveau_pm_level *perflvl;
-	struct nvc0_pm_clock eng[16];
-	struct nvc0_pm_clock mem;
-};
-
-static u32
-calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
-{
-	u32 div = min((ref * 2) / freq, (u32)65);
-	if (div < 2)
-		div = 2;
-
-	*ddiv = div - 2;
-	return (ref * 2) / div;
-}
-
-static u32
-calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
-{
-	u32 sclk;
-
-	/* use one of the fixed frequencies if possible */
-	*ddiv = 0x00000000;
-	switch (freq) {
-	case  27000:
-	case 108000:
-		*dsrc = 0x00000000;
-		if (freq == 108000)
-			*dsrc |= 0x00030000;
-		return freq;
-	case 100000:
-		*dsrc = 0x00000002;
-		return freq;
-	default:
-		*dsrc = 0x00000003;
-		break;
-	}
-
-	/* otherwise, calculate the closest divider */
-	sclk = read_vco(dev, clk);
-	if (clk < 7)
-		sclk = calc_div(dev, clk, sclk, freq, ddiv);
-	return sclk;
-}
-
-static u32
-calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nvbios_pll limits;
-	int N, M, P, ret;
-
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
-	if (ret)
-		return 0;
-
-	limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
-	if (!limits.refclk)
-		return 0;
-
-	ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
-	if (ret <= 0)
-		return 0;
-
-	*coef = (P << 16) | (N << 8) | M;
-	return ret;
-}
-
-/* A (likely rather simplified and incomplete) view of the clock tree
- *
- * Key:
- *
- * S: source select
- * D: divider
- * P: pll
- * F: switch
- *
- * Engine clocks:
- *
- * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
- *                      (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
- *
- * Not all registers exist for all clocks.  For example: clocks >= 8 don't
- * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
- * they have the divider at 1371d0, though the source selection at 137160
- * still exists.  You must use the divider at 137250 for these instead.
- *
- * Memory clock:
- *
- * TBD, read_mem() above is likely very wrong...
- *
- */
-
-static int
-calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
-{
-	u32 src0, div0, div1D, div1P = 0;
-	u32 clk0, clk1 = 0;
-
-	/* invalid clock domain */
-	if (!freq)
-		return 0;
-
-	/* first possible path, using only dividers */
-	clk0 = calc_src(dev, clk, freq, &src0, &div0);
-	clk0 = calc_div(dev, clk, clk0, freq, &div1D);
-
-	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x00004387 & (1 << clk))) {
-		if (clk < 7)
-			clk1 = calc_pll(dev, clk, freq, &info->coef);
-		else
-			clk1 = read_pll(dev, 0x1370e0);
-		clk1 = calc_div(dev, clk, clk1, freq, &div1P);
-	}
-
-	/* select the method which gets closest to target freq */
-	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
-		info->dsrc = src0;
-		if (div0) {
-			info->ddiv |= 0x80000000;
-			info->ddiv |= div0 << 8;
-			info->ddiv |= div0;
-		}
-		if (div1D) {
-			info->mdiv |= 0x80000000;
-			info->mdiv |= div1D;
-		}
-		info->ssel = 0;
-		info->freq = clk0;
-	} else {
-		if (div1P) {
-			info->mdiv |= 0x80000000;
-			info->mdiv |= div1P << 8;
-		}
-		info->ssel = (1 << clk);
-		info->freq = clk1;
-	}
-
-	return 0;
-}
-
-static int
-calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_bios *bios = nouveau_bios(device);
-	struct nvbios_pll pll;
-	int N, M, P, ret;
-	u32 ctrl;
-
-	/* mclk pll input freq comes from another pll, make sure it's on */
-	ctrl = nv_rd32(device, 0x132020);
-	if (!(ctrl & 0x00000001)) {
-		/* if not, program it to 567MHz.  nfi where this value comes
-		 * from - it looks like it's in the pll limits table for
-		 * 132000 but the binary driver ignores all my attempts to
-		 * change this value.
-		 */
-		nv_wr32(device, 0x137320, 0x00000103);
-		nv_wr32(device, 0x137330, 0x81200606);
-		nv_wait(device, 0x132020, 0x00010000, 0x00010000);
-		nv_wr32(device, 0x132024, 0x0001150f);
-		nv_mask(device, 0x132020, 0x00000001, 0x00000001);
-		nv_wait(device, 0x137390, 0x00020000, 0x00020000);
-		nv_mask(device, 0x132020, 0x00000004, 0x00000004);
-	}
-
-	/* for the moment, until the clock tree is better understood, use
-	 * pll mode for all clock frequencies
-	 */
-	ret = nvbios_pll_parse(bios, 0x132000, &pll);
-	if (ret == 0) {
-		pll.refclk = read_pll(dev, 0x132020);
-		if (pll.refclk) {
-			ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
-			if (ret > 0) {
-				info->coef = (P << 16) | (N << 8) | M;
-				return 0;
-			}
-		}
-	}
-
-	return -EINVAL;
-}
-
-void *
-nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nvc0_pm_state *info;
-	int ret;
-
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return ERR_PTR(-ENOMEM);
-
-	/* NFI why this is still in the performance table, the ROPCs appear
-	 * to get their clock from clock 2 ("hub07", actually hub05 on this
-	 * chip, but, anyway...) as well.  nvatiming confirms hub05 and ROP
-	 * are always the same freq with the binary driver even when the
-	 * performance table says they should differ.
-	 */
-	if (device->chipset == 0xd9)
-		perflvl->rop = 0;
-
-	if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
-	    (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
-	    (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
-	    (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
-	    (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
-	    (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
-	    (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
-	    (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
-		kfree(info);
-		return ERR_PTR(ret);
-	}
-
-	if (perflvl->memory) {
-		ret = calc_mem(dev, &info->mem, perflvl->memory);
-		if (ret) {
-			kfree(info);
-			return ERR_PTR(ret);
-		}
-	}
-
-	info->perflvl = perflvl;
-	return info;
-}
-
-static void
-prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-
-	/* program dividers at 137160/1371d0 first */
-	if (clk < 7 && !info->ssel) {
-		nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
-		nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
-	}
-
-	/* switch clock to non-pll mode */
-	nv_mask(device, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(device, 0x137100, (1 << clk), 0x00000000);
-
-	/* reprogram pll */
-	if (clk < 7) {
-		/* make sure it's disabled first... */
-		u32 base = 0x137000 + (clk * 0x20);
-		u32 ctrl = nv_rd32(device, base + 0x00);
-		if (ctrl & 0x00000001) {
-			nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
-			nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
-		}
-		/* program it to new values, if necessary */
-		if (info->ssel) {
-			nv_wr32(device, base + 0x04, info->coef);
-			nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
-			nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
-			nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
-		}
-	}
-
-	/* select pll/non-pll mode, and program final clock divider */
-	nv_mask(device, 0x137100, (1 << clk), info->ssel);
-	nv_wait(device, 0x137100, (1 << clk), info->ssel);
-	nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
-}
-
-static void
-mclk_precharge(struct nouveau_mem_exec_func *exec)
-{
-}
-
-static void
-mclk_refresh(struct nouveau_mem_exec_func *exec)
-{
-}
-
-static void
-mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
-}
-
-static void
-mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
-{
-}
-
-static void
-mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
-{
-	udelay((nsec + 500) / 1000);
-}
-
-static u32
-mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
-		if (mr <= 1)
-			return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
-		return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
-	} else {
-		if (mr == 0)
-			return nv_rd32(device, 0x10f300 + (mr * 4));
-		else
-		if (mr <= 7)
-			return nv_rd32(device, 0x10f32c + (mr * 4));
-		return nv_rd32(device, 0x10f34c);
-	}
-}
-
-static void
-mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nouveau_fb *pfb = nouveau_fb(device);
-	if (pfb->ram->type != NV_MEM_TYPE_GDDR5) {
-		if (mr <= 1) {
-			nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
-			if (pfb->ram->ranks > 1)
-				nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
-		} else
-		if (mr <= 3) {
-			nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
-			if (pfb->ram->ranks > 1)
-				nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
-		}
-	} else {
-		if      (mr ==  0) nv_wr32(device, 0x10f300 + (mr * 4), data);
-		else if (mr <=  7) nv_wr32(device, 0x10f32c + (mr * 4), data);
-		else if (mr == 15) nv_wr32(device, 0x10f34c, data);
-	}
-}
-
-static void
-mclk_clock_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nvc0_pm_state *info = exec->priv;
-	u32 ctrl = nv_rd32(device, 0x132000);
-
-	nv_wr32(device, 0x137360, 0x00000001);
-	nv_wr32(device, 0x137370, 0x00000000);
-	nv_wr32(device, 0x137380, 0x00000000);
-	if (ctrl & 0x00000001)
-		nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
-
-	nv_wr32(device, 0x132004, info->mem.coef);
-	nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
-	nv_wait(device, 0x137390, 0x00000002, 0x00000002);
-	nv_wr32(device, 0x132018, 0x00005000);
-
-	nv_wr32(device, 0x137370, 0x00000001);
-	nv_wr32(device, 0x137380, 0x00000001);
-	nv_wr32(device, 0x137360, 0x00000000);
-}
-
-static void
-mclk_timing_set(struct nouveau_mem_exec_func *exec)
-{
-	struct nouveau_device *device = nouveau_dev(exec->dev);
-	struct nvc0_pm_state *info = exec->priv;
-	struct nouveau_pm_level *perflvl = info->perflvl;
-	int i;
-
-	for (i = 0; i < 5; i++)
-		nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
-}
-
-static void
-prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
-{
-	struct nouveau_device *device = nouveau_dev(dev);
-	struct nouveau_mem_exec_func exec = {
-		.dev = dev,
-		.precharge = mclk_precharge,
-		.refresh = mclk_refresh,
-		.refresh_auto = mclk_refresh_auto,
-		.refresh_self = mclk_refresh_self,
-		.wait = mclk_wait,
-		.mrg = mclk_mrg,
-		.mrs = mclk_mrs,
-		.clock_set = mclk_clock_set,
-		.timing_set = mclk_timing_set,
-		.priv = info
-	};
-
-	if (device->chipset < 0xd0)
-		nv_wr32(device, 0x611200, 0x00003300);
-	else
-		nv_wr32(device, 0x62c000, 0x03030000);
-
-	nouveau_mem_exec(&exec, info->perflvl);
-
-	if (device->chipset < 0xd0)
-		nv_wr32(device, 0x611200, 0x00003330);
-	else
-		nv_wr32(device, 0x62c000, 0x03030300);
-}
-int
-nvc0_pm_clocks_set(struct drm_device *dev, void *data)
-{
-	struct nvc0_pm_state *info = data;
-	int i;
-
-	if (info->mem.coef)
-		prog_mem(dev, info);
-
-	for (i = 0; i < 16; i++) {
-		if (!info->eng[i].freq)
-			continue;
-		prog_clk(dev, i, &info->eng[i]);
-	}
-
-	kfree(info);
-	return 0;
-}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 20c41e7..6c220cd 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,7 @@
 	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
 	depends on OMAP2_DSS
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2603d90..e7fa3cd 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -620,7 +620,6 @@
 		.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 		.gem_prime_export = omap_gem_prime_export,
 		.gem_prime_import = omap_gem_prime_import,
-		.gem_init_object = omap_gem_init_object,
 		.gem_free_object = omap_gem_free_object,
 		.gem_vm_ops = &omap_gem_vm_ops,
 		.dumb_create = omap_gem_dumb_create,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 30b95b7..0784769 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -220,7 +220,6 @@
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 		union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
 void omap_gem_free_object(struct drm_gem_object *obj);
-int omap_gem_init_object(struct drm_gem_object *obj);
 void *omap_gem_vaddr(struct drm_gem_object *obj);
 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 		uint32_t handle, uint64_t *offset);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 533f6eb..5aec3e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1274,11 +1274,6 @@
 	return ret;
 }
 
-int omap_gem_init_object(struct drm_gem_object *obj)
-{
-	return -EINVAL;          /* unused */
-}
-
 /* don't call directly.. called from GEM core when it is time to actually
  * free the object..
  */
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 9263db1..cb85860 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -261,7 +261,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		return -EBUSY;
 	}
-	dev->irq_enabled = 1;
+	dev->irq_enabled = true;
 	mutex_unlock(&dev->struct_mutex);
 
 	/* Before installing handler */
@@ -272,7 +272,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
 	}
@@ -283,7 +283,7 @@
 
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
-		dev->irq_enabled = 0;
+		dev->irq_enabled = false;
 		mutex_unlock(&dev->struct_mutex);
 		dispc_free_irq(dev);
 	}
@@ -294,11 +294,12 @@
 int omap_drm_irq_uninstall(struct drm_device *dev)
 {
 	unsigned long irqflags;
-	int irq_enabled, i;
+	bool irq_enabled;
+	int i;
 
 	mutex_lock(&dev->struct_mutex);
 	irq_enabled = dev->irq_enabled;
-	dev->irq_enabled = 0;
+	dev->irq_enabled = false;
 	mutex_unlock(&dev->struct_mutex);
 
 	/*
@@ -307,9 +308,9 @@
 	if (dev->num_crtcs) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		for (i = 0; i < dev->num_crtcs; i++) {
-			DRM_WAKEUP(&dev->vbl_queue[i]);
-			dev->vblank_enabled[i] = 0;
-			dev->last_vblank[i] =
+			DRM_WAKEUP(&dev->vblank[i].queue);
+			dev->vblank[i].enabled = false;
+			dev->vblank[i].last =
 				dev->driver->get_vblank_counter(dev, i);
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d6c1279..037d324 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -6,6 +6,7 @@
 	select FB_SYS_IMAGEBLIT
 	select FB_DEFERRED_IO
         select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
         select DRM_TTM
 	help
 		QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 835caba..5e827c2 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -107,10 +107,17 @@
 		qxl_io_log(qdev, "failed crc check for client_monitors_config,"
 				 " retrying\n");
 	}
-	drm_helper_hpd_irq_event(qdev->ddev);
+
+	if (!drm_helper_hpd_irq_event(qdev->ddev)) {
+		/* notify that the monitor configuration changed, to
+		   adjust at the arbitrary resolution */
+		drm_kms_helper_hotplug_event(qdev->ddev);
+	}
 }
 
-static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+static int qxl_add_monitors_config_modes(struct drm_connector *connector,
+                                         unsigned *pwidth,
+                                         unsigned *pheight)
 {
 	struct drm_device *dev = connector->dev;
 	struct qxl_device *qdev = dev->dev_private;
@@ -126,11 +133,15 @@
 	mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
 			    false);
 	mode->type |= DRM_MODE_TYPE_PREFERRED;
+	*pwidth = head->width;
+	*pheight = head->height;
 	drm_mode_probed_add(connector, mode);
 	return 1;
 }
 
-static int qxl_add_common_modes(struct drm_connector *connector)
+static int qxl_add_common_modes(struct drm_connector *connector,
+                                unsigned pwidth,
+                                unsigned pheight)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode = NULL;
@@ -159,12 +170,9 @@
 	};
 
 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
-		if (common_modes[i].w < 320 || common_modes[i].h < 200)
-			continue;
-
 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
 				    60, false, false, false);
-		if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+		if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
 			mode->type |= DRM_MODE_TYPE_PREFERRED;
 		drm_mode_probed_add(connector, mode);
 	}
@@ -720,16 +728,18 @@
 {
 	int ret = 0;
 	struct qxl_device *qdev = connector->dev->dev_private;
+	unsigned pwidth = 1024;
+	unsigned pheight = 768;
 
 	DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
 	/* TODO: what should we do here? only show the configured modes for the
 	 * device, or allow the full list, or both? */
 	if (qdev->monitors_config && qdev->monitors_config->count) {
-		ret = qxl_add_monitors_config_modes(connector);
+		ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
 		if (ret < 0)
 			return ret;
 	}
-	ret += qxl_add_common_modes(connector);
+	ret += qxl_add_common_modes(connector, pwidth, pheight);
 	return ret;
 }
 
@@ -793,7 +803,10 @@
 		     qdev->client_monitors_config->count > output->index &&
 		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG("#%d connected: %d\n", output->index, connected);
+	if (!connected)
+		qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0);
+
 	return connected ? connector_status_connected
 			 : connector_status_disconnected;
 }
@@ -835,8 +848,21 @@
 	.destroy = qxl_enc_destroy,
 };
 
+static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
+{
+	if (qdev->hotplug_mode_update_property)
+		return 0;
+
+	qdev->hotplug_mode_update_property =
+		drm_property_create_range(qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
+					  "hotplug_mode_update", 0, 1);
+
+	return 0;
+}
+
 static int qdev_output_init(struct drm_device *dev, int num_output)
 {
+	struct qxl_device *qdev = dev->dev_private;
 	struct qxl_output *qxl_output;
 	struct drm_connector *connector;
 	struct drm_encoder *encoder;
@@ -863,6 +889,8 @@
 	drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
 	drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
 
+	drm_object_attach_property(&connector->base,
+				   qdev->hotplug_mode_update_property, 0);
 	drm_sysfs_connector_add(connector);
 	return 0;
 }
@@ -975,6 +1003,9 @@
 	qdev->ddev->mode_config.max_height = 8192;
 
 	qdev->ddev->mode_config.fb_base = qdev->vram_base;
+
+	qxl_mode_create_hotplug_mode_update_property(qdev);
+
 	for (i = 0 ; i < qxl_num_crtc; ++i) {
 		qdev_crtc_init(qdev->ddev, i);
 		qdev_output_init(qdev->ddev, i);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 514118a..fee8748 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -225,7 +225,6 @@
 	.debugfs_init = qxl_debugfs_init,
 	.debugfs_cleanup = qxl_debugfs_takedown,
 #endif
-	.gem_init_object = qxl_gem_object_init,
 	.gem_free_object = qxl_gem_object_free,
 	.gem_open_object = qxl_gem_object_open,
 	.gem_close_object = qxl_gem_object_close,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f7c9add..7bda32f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -323,6 +323,8 @@
 	struct work_struct gc_work;
 
 	struct work_struct fb_work;
+
+	struct drm_property *hotplug_mode_update_property;
 };
 
 /* forward declaration for QXL_INFO_IO */
@@ -412,7 +414,6 @@
 				      struct qxl_surface *surf,
 				      struct qxl_bo **qobj,
 				      uint32_t *handle);
-int qxl_gem_object_init(struct drm_gem_object *obj);
 void qxl_gem_object_free(struct drm_gem_object *gobj);
 int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
 void qxl_gem_object_close(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 88722f2..f437b30 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -108,7 +108,7 @@
 	u32 x1, x2, y1, y2;
 
 	/* TODO: hard coding 32 bpp */
-	int stride = qfbdev->qfb.base.pitches[0] * 4;
+	int stride = qfbdev->qfb.base.pitches[0];
 
 	x1 = qfbdev->dirty.x1;
 	x2 = qfbdev->dirty.x2;
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 1648e41..b96f0c9 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -28,12 +28,6 @@
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
-int qxl_gem_object_init(struct drm_gem_object *obj)
-{
-	/* we do nothings here */
-	return 0;
-}
-
 void qxl_gem_object_free(struct drm_gem_object *gobj)
 {
 	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9e8da9e..e5ca498 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -120,7 +120,7 @@
 		    struct pci_dev *pdev,
 		    unsigned long flags)
 {
-	int r;
+	int r, sb;
 
 	qdev->dev = &pdev->dev;
 	qdev->ddev = ddev;
@@ -136,21 +136,39 @@
 	qdev->rom_base = pci_resource_start(pdev, 2);
 	qdev->rom_size = pci_resource_len(pdev, 2);
 	qdev->vram_base = pci_resource_start(pdev, 0);
-	qdev->surfaceram_base = pci_resource_start(pdev, 1);
-	qdev->surfaceram_size = pci_resource_len(pdev, 1);
 	qdev->io_base = pci_resource_start(pdev, 3);
 
 	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
-	qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
-	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+
+	if (pci_resource_len(pdev, 4) > 0) {
+		/* 64bit surface bar present */
+		sb = 4;
+		qdev->surfaceram_base = pci_resource_start(pdev, sb);
+		qdev->surfaceram_size = pci_resource_len(pdev, sb);
+		qdev->surface_mapping =
+			io_mapping_create_wc(qdev->surfaceram_base,
+					     qdev->surfaceram_size);
+	}
+	if (qdev->surface_mapping == NULL) {
+		/* 64bit surface bar not present (or mapping failed) */
+		sb = 1;
+		qdev->surfaceram_base = pci_resource_start(pdev, sb);
+		qdev->surfaceram_size = pci_resource_len(pdev, sb);
+		qdev->surface_mapping =
+			io_mapping_create_wc(qdev->surfaceram_base,
+					     qdev->surfaceram_size);
+	}
+
+	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
 		 (unsigned long long)qdev->vram_base,
 		 (unsigned long long)pci_resource_end(pdev, 0),
 		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
 		 (int)pci_resource_len(pdev, 0) / 1024,
 		 (unsigned long long)qdev->surfaceram_base,
-		 (unsigned long long)pci_resource_end(pdev, 1),
+		 (unsigned long long)pci_resource_end(pdev, sb),
 		 (int)qdev->surfaceram_size / 1024 / 1024,
-		 (int)qdev->surfaceram_size / 1024);
+		 (int)qdev->surfaceram_size / 1024,
+		 (sb == 4) ? "64bit" : "32bit");
 
 	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
 	if (!qdev->rom) {
@@ -230,9 +248,13 @@
 	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
 		(unsigned long)qdev->surfaceram_base,
 		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
-	DRM_INFO("main mem slot %d [%lx,%x)\n",
-		qdev->main_mem_slot,
-		(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+	DRM_INFO("main mem slot %d [%lx,%x]\n",
+		 qdev->main_mem_slot,
+		 (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+	DRM_INFO("surface mem slot %d [%lx,%lx]\n",
+		 qdev->surfaces_mem_slot,
+		 (unsigned long)qdev->surfaceram_base,
+		 (unsigned long)qdev->surfaceram_size);
 
 
 	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 037786d..c7e7e65 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -516,6 +516,8 @@
 		 (unsigned)qdev->vram_size / (1024 * 1024));
 	DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
 		 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+	DRM_INFO("qxl: %uM of Surface memory size\n",
+		 (unsigned)qdev->surfaceram_size / (1024 * 1024));
 	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
 		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
 	r = qxl_ttm_debugfs_init(qdev);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index bf87f6d..86d9ee0 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1753,7 +1753,7 @@
 				if (pll != ATOM_PPLL_INVALID)
 					return pll;
 			}
-		} else {
+		} else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
 			/* use the same PPLL for all monitors with the same clock */
 			pll = radeon_get_shared_nondp_ppll(crtc);
 			if (pll != ATOM_PPLL_INVALID)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0088541..fb3ae07 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -690,8 +690,7 @@
 
 	/* set the lane count on the sink */
 	tmp = dp_info->dp_lane_count;
-	if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
-	    dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
 		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 5e891b2..a42d615 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -213,7 +213,7 @@
 	props.type = BACKLIGHT_RAW;
 	snprintf(bl_name, sizeof(bl_name),
 		 "radeon_bl%d", dev->primary->index);
-	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
 				       pdata, &radeon_atom_backlight_ops, &props);
 	if (IS_ERR(bd)) {
 		DRM_ERROR("Backlight registration failed\n");
@@ -1662,19 +1662,11 @@
 			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
 			/* enable the transmitter */
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 		} else {
 			/* setup and enable the encoder and transmitter */
 			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			/* some dce3.x boards have a bug in their transmitter control table.
-			 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
-			 * does the same thing and more.
-			 */
-			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
-			    (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
 		}
 		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
 			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1692,16 +1684,11 @@
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
-		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+		if (ASIC_IS_DCE4(rdev)) {
 			/* disable the transmitter */
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-		} else if (ASIC_IS_DCE4(rdev)) {
-			/* disable the transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
 		} else {
 			/* disable the encoder and transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
 			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
 			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
 		}
@@ -2410,6 +2397,15 @@
 
 	/* this is needed for the pll/ss setup to work correctly in some cases */
 	atombios_set_encoder_crtc_source(encoder);
+	/* set up the FMT blocks */
+	if (ASIC_IS_DCE8(rdev))
+		dce8_program_fmt(encoder);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_program_fmt(encoder);
+	else if (ASIC_IS_DCE3(rdev))
+		dce3_program_fmt(encoder);
+	else if (ASIC_IS_AVIVO(rdev))
+		avivo_program_fmt(encoder);
 }
 
 static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9cd2bc9..e854475 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -67,11 +67,6 @@
 extern int cik_sdma_resume(struct radeon_device *rdev);
 extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
 extern void cik_sdma_fini(struct radeon_device *rdev);
-extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
-				 struct radeon_ib *ib,
-				 uint64_t pe,
-				 uint64_t addr, unsigned count,
-				 uint32_t incr, uint32_t flags);
 static void cik_rlc_stop(struct radeon_device *rdev);
 static void cik_pcie_gen3_enable(struct radeon_device *rdev);
 static void cik_program_aspm(struct radeon_device *rdev);
@@ -3097,6 +3092,85 @@
 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
 }
 
+/**
+ * cik_copy_cpdma - copy pages using the CP DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the CP DMA engine (CIK+).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_cpdma(struct radeon_device *rdev,
+		   uint64_t src_offset, uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.blit_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes, control;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0x1fffff)
+			cur_size_in_bytes = 0x1fffff;
+		size_in_bytes -= cur_size_in_bytes;
+		control = 0;
+		if (size_in_bytes == 0)
+			control |= PACKET3_DMA_DATA_CP_SYNC;
+		radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
+		radeon_ring_write(ring, control);
+		radeon_ring_write(ring, lower_32_bits(src_offset));
+		radeon_ring_write(ring, upper_32_bits(src_offset));
+		radeon_ring_write(ring, lower_32_bits(dst_offset));
+		radeon_ring_write(ring, upper_32_bits(dst_offset));
+		radeon_ring_write(ring, cur_size_in_bytes);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
 /*
  * IB stuff
  */
@@ -4834,62 +4908,6 @@
 	}
 }
 
-/**
- * cik_vm_set_page - update the page tables using sDMA
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using CP or sDMA (CIK).
- */
-void cik_vm_set_page(struct radeon_device *rdev,
-		     struct radeon_ib *ib,
-		     uint64_t pe,
-		     uint64_t addr, unsigned count,
-		     uint32_t incr, uint32_t flags)
-{
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-	uint64_t value;
-	unsigned ndw;
-
-	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
-		/* CP */
-		while (count) {
-			ndw = 2 + count * 2;
-			if (ndw > 0x3FFE)
-				ndw = 0x3FFE;
-
-			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
-			ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
-						    WRITE_DATA_DST_SEL(1));
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= r600_flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		/* DMA */
-		cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
-	}
-}
-
 /*
  * RLC
  * The RLC is a multi-purpose microengine that handles a
@@ -5556,7 +5574,7 @@
 		}
 
 		for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
-			dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+			dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
 		}
 		bo_offset += CP_ME_TABLE_SIZE;
 	}
@@ -5778,52 +5796,53 @@
 	if (buffer == NULL)
 		return;
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
-	buffer[count++] = 0x80000000;
-	buffer[count++] = 0x80000000;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
 
 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
 		for (ext = sect->section; ext->extent != NULL; ++ext) {
 			if (sect->id == SECT_CONTEXT) {
-				buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
-				buffer[count++] = ext->reg_index - 0xa000;
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
 				for (i = 0; i < ext->reg_count; i++)
-					buffer[count++] = ext->extent[i];
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
 			} else {
 				return;
 			}
 		}
 	}
 
-	buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
-	buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (rdev->family) {
 	case CHIP_BONAIRE:
-		buffer[count++] = 0x16000012;
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x16000012);
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	case CHIP_KAVERI:
-		buffer[count++] = 0x00000000; /* XXX */
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	case CHIP_KABINI:
-		buffer[count++] = 0x00000000; /* XXX */
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	default:
-		buffer[count++] = 0x00000000;
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000);
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	}
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
-	buffer[count++] = 0;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
 }
 
 static void cik_init_pg(struct radeon_device *rdev)
@@ -7118,7 +7137,7 @@
 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
 			     CP_RB0_RPTR, CP_RB0_WPTR,
-			     RADEON_CP_PACKET2);
+			     PACKET3(PACKET3_NOP, 0x3FFF));
 	if (r)
 		return r;
 
@@ -7428,6 +7447,70 @@
 	rdev->bios = NULL;
 }
 
+void dce8_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS/eDP FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
+		break;
+	case 10:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
+		break;
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
 /* display watermark setup */
 /**
  * dce8_line_buffer_adjust - Set up the line buffer
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index b628606..8d84ebe 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -25,6 +25,7 @@
 #include <drm/drmP.h>
 #include "radeon.h"
 #include "radeon_asic.h"
+#include "radeon_trace.h"
 #include "cikd.h"
 
 /* sdma */
@@ -653,11 +654,12 @@
 			  uint64_t addr, unsigned count,
 			  uint32_t incr, uint32_t flags)
 {
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 	uint64_t value;
 	unsigned ndw;
 
-	if (flags & RADEON_VM_PAGE_SYSTEM) {
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if (flags & R600_PTE_SYSTEM) {
 		while (count) {
 			ndw = count * 2;
 			if (ndw > 0xFFFFE)
@@ -669,16 +671,10 @@
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
 			ib->ptr[ib->length_dw++] = ndw;
 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
+				value = radeon_vm_map_gart(rdev, addr);
+				value &= 0xFFFFFFFFFFFFF000ULL;
 				addr += incr;
-				value |= r600_flags;
+				value |= flags;
 				ib->ptr[ib->length_dw++] = value;
 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 			}
@@ -689,7 +685,7 @@
 			if (ndw > 0x7FFFF)
 				ndw = 0x7FFFF;
 
-			if (flags & RADEON_VM_PAGE_VALID)
+			if (flags & R600_PTE_VALID)
 				value = addr;
 			else
 				value = 0;
@@ -697,7 +693,7 @@
 			ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+			ib->ptr[ib->length_dw++] = flags; /* mask */
 			ib->ptr[ib->length_dw++] = 0;
 			ib->ptr[ib->length_dw++] = value; /* value */
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 203d2a0..380cea3 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -906,6 +906,39 @@
 #define DPG_PIPE_STUTTER_CONTROL                          0x6cd4
 #       define STUTTER_ENABLE                             (1 << 0)
 
+/* DCE8 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL                 0x6fb4
+#       define FMT_DYNAMIC_EXP_EN            (1 << 0)
+#       define FMT_DYNAMIC_EXP_MODE          (1 << 4)
+        /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL                          0x6fb8
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6fc8
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_MODE             (1 << 1)
+#       define FMT_TRUNCATE_DEPTH(x)         ((x) << 4) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH(x)   ((x) << 11) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH(x)  ((x) << 17) /* 0 - 18bpp, 1 - 24bpp, 2 - 30bpp */
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x6fe4
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
 #define	GRBM_CNTL					0x8000
 #define		GRBM_READ_TIMEOUT(x)				((x) << 0)
 
@@ -1714,6 +1747,68 @@
 #              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
 #              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
 #define	PACKET3_DMA_DATA				0x50
+/* 1. header
+ * 2. CONTROL
+ * 3. SRC_ADDR_LO or DATA [31:0]
+ * 4. SRC_ADDR_HI [31:0]
+ * 5. DST_ADDR_LO [31:0]
+ * 6. DST_ADDR_HI [7:0]
+ * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+/* CONTROL */
+#              define PACKET3_DMA_DATA_ENGINE(x)     ((x) << 0)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
+#              define PACKET3_DMA_DATA_DST_SEL(x)  ((x) << 20)
+                /* 0 - DST_ADDR using DAS
+		 * 1 - GDS
+		 * 3 - DST_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
+                /* 0 - LRU
+		 * 1 - Stream
+		 * 2 - Bypass
+		 */
+#              define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
+#              define PACKET3_DMA_DATA_SRC_SEL(x)  ((x) << 29)
+                /* 0 - SRC_ADDR using SAS
+		 * 1 - GDS
+		 * 2 - DATA
+		 * 3 - SRC_ADDR using L2
+		 */
+#              define PACKET3_DMA_DATA_CP_SYNC     (1 << 31)
+/* COMMAND */
+#              define PACKET3_DMA_DATA_DIS_WC      (1 << 21)
+#              define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAS     (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_DAS     (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
+#              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
+#              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
 #define	PACKET3_AQUIRE_MEM				0x58
 #define	PACKET3_REWIND					0x59
 #define	PACKET3_LOAD_UCONFIG_REG			0x5E
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 9fcd338..ab92620 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -102,6 +102,49 @@
 	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
 }
 
+void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+				    struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector = NULL;
+	u32 tmp = 0, offset;
+
+	if (!dig->afmt->pin)
+		return;
+
+	offset = dig->afmt->pin->offset;
+
+	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
+	}
+
+	if (!radeon_connector) {
+		DRM_ERROR("Couldn't find encoder's connector\n");
+		return;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (connector->latency_present[1])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+				AUDIO_LIPSYNC(connector->audio_latency[1]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	} else {
+		if (connector->latency_present[0])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+				AUDIO_LIPSYNC(connector->audio_latency[0]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	}
+	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
 void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
 {
 	struct radeon_device *rdev = encoder->dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b5c67a9..52f1ae1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1193,6 +1193,62 @@
 	}
 }
 
+void dce4_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS/eDP FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN);
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
+				FMT_RGB_RANDOM_ENABLE |
+				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
 static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
 {
 	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
@@ -3963,7 +4019,7 @@
 		if (rdev->family >= CHIP_TAHITI) {
 			/* SI */
 			for (i = 0; i < rdev->rlc.reg_list_size; i++)
-				dst_ptr[i] = src_ptr[i];
+				dst_ptr[i] = cpu_to_le32(src_ptr[i]);
 		} else {
 			/* ON/LN/TN */
 			/* format:
@@ -3977,10 +4033,10 @@
 				if (i < dws)
 					data |= (src_ptr[i] >> 2) << 16;
 				j = (((i - 1) * 3) / 2);
-				dst_ptr[j] = data;
+				dst_ptr[j] = cpu_to_le32(data);
 			}
 			j = ((i * 3) / 2);
-			dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+			dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
 		}
 		radeon_bo_kunmap(rdev->rlc.save_restore_obj);
 		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
@@ -4042,40 +4098,40 @@
 			cik_get_csb_buffer(rdev, dst_ptr);
 		} else if (rdev->family >= CHIP_TAHITI) {
 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
-			dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
-			dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
-			dst_ptr[2] = rdev->rlc.clear_state_size;
+			dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
+			dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
+			dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
 			si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
 		} else {
 			reg_list_hdr_blk_index = 0;
 			reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
 			data = upper_32_bits(reg_list_mc_addr);
-			dst_ptr[reg_list_hdr_blk_index] = data;
+			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 			reg_list_hdr_blk_index++;
 			for (i = 0; cs_data[i].section != NULL; i++) {
 				for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
 					reg_num = cs_data[i].section[j].reg_count;
 					data = reg_list_mc_addr & 0xffffffff;
-					dst_ptr[reg_list_hdr_blk_index] = data;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 					reg_list_hdr_blk_index++;
 
 					data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
-					dst_ptr[reg_list_hdr_blk_index] = data;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 					reg_list_hdr_blk_index++;
 
 					data = 0x08000000 | (reg_num * 4);
-					dst_ptr[reg_list_hdr_blk_index] = data;
+					dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
 					reg_list_hdr_blk_index++;
 
 					for (k = 0; k < reg_num; k++) {
 						data = cs_data[i].section[j].extent[k];
-						dst_ptr[reg_list_blk_index + k] = data;
+						dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
 					}
 					reg_list_mc_addr += reg_num * 4;
 					reg_list_blk_index += reg_num;
 				}
 			}
-			dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
+			dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
 		}
 		radeon_bo_kunmap(rdev->rlc.clear_state_obj);
 		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index fe1de85..a82b6f7 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -35,6 +35,8 @@
 extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
 extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
 extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+					   struct drm_display_mode *mode);
 
 /*
  * update the N and CTS parameters for a given pixel clock rate
@@ -58,6 +60,42 @@
 	WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
 }
 
+static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
+					   struct drm_display_mode *mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector = NULL;
+	u32 tmp = 0;
+
+	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
+	}
+
+	if (!radeon_connector) {
+		DRM_ERROR("Couldn't find encoder's connector\n");
+		return;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (connector->latency_present[1])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+				AUDIO_LIPSYNC(connector->audio_latency[1]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	} else {
+		if (connector->latency_present[0])
+			tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
+				AUDIO_LIPSYNC(connector->audio_latency[0]);
+		else
+			tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+	}
+	WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
+}
+
 static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
 {
 	struct radeon_device *rdev = encoder->dev->dev_private;
@@ -71,8 +109,10 @@
 	return;
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -124,8 +164,10 @@
 	};
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -291,6 +333,7 @@
 	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
 
 	WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+	       HDMI_ACR_SOURCE | /* select SW CTS value */
 	       HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
 	evergreen_hdmi_update_ACR(encoder, mode->clock);
@@ -323,8 +366,10 @@
 	if (ASIC_IS_DCE6(rdev)) {
 		dce6_afmt_select_pin(encoder);
 		dce6_afmt_write_sad_regs(encoder);
+		dce6_afmt_write_latency_fields(encoder, mode);
 	} else {
 		evergreen_hdmi_write_sad_regs(encoder);
+		dce4_afmt_write_latency_fields(encoder, mode);
 	}
 
 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 4f6d296..17f9907 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -750,6 +750,44 @@
  * bit6 = 192 kHz
  */
 
+#define AZ_CHANNEL_COUNT_CONTROL                          0x5fe4
+#       define HBR_CHANNEL_COUNT(x)                       (((x) & 0x7) << 0)
+#       define COMPRESSED_CHANNEL_COUNT(x)                (((x) & 0x7) << 4)
+/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
+ * 0   = use stream header
+ * 1-7 = channel count - 1
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC         0x5fe8
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR             0x5fec
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
+#       define DISPLAY0_TYPE(x)                           (((x) & 0x3) << 0)
+#       define DISPLAY_TYPE_NONE                   0
+#       define DISPLAY_TYPE_HDMI                   1
+#       define DISPLAY_TYPE_DP                     2
+#       define DISPLAY0_ID(x)                             (((x) & 0x3f) << 2)
+#       define DISPLAY1_TYPE(x)                           (((x) & 0x3) << 8)
+#       define DISPLAY1_ID(x)                             (((x) & 0x3f) << 10)
+#       define DISPLAY2_TYPE(x)                           (((x) & 0x3) << 16)
+#       define DISPLAY2_ID(x)                             (((x) & 0x3f) << 18)
+#       define DISPLAY3_TYPE(x)                           (((x) & 0x3) << 24)
+#       define DISPLAY3_ID(x)                             (((x) & 0x3f) << 26)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
+#       define DISPLAY4_TYPE(x)                           (((x) & 0x3) << 0)
+#       define DISPLAY4_ID(x)                             (((x) & 0x3f) << 2)
+#       define DISPLAY5_TYPE(x)                           (((x) & 0x3) << 8)
+#       define DISPLAY5_ID(x)                             (((x) & 0x3f) << 10)
+#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER       0x5ffc
+#       define NUMBER_OF_DISPLAY_ID(x)                    (((x) & 0x7) << 0)
+
 #define AZ_HOT_PLUG_CONTROL                               0x5e78
 #       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
 #       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
@@ -1312,6 +1350,38 @@
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* DCE4/5/6 FMT blocks */
+#define FMT_DYNAMIC_EXP_CNTL                 0x6fb4
+#       define FMT_DYNAMIC_EXP_EN            (1 << 0)
+#       define FMT_DYNAMIC_EXP_MODE          (1 << 4)
+        /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
+#define FMT_CONTROL                          0x6fb8
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6fc8
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_DEPTH            (1 << 4)
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x6fe4
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
 /* ASYNC DMA */
 #define DMA_RB_RPTR                                       0xd008
 #define DMA_RB_WPTR                                       0xd00c
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 7139906..b419055 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2635,7 +2635,7 @@
 	pi->caps_sclk_ds = true;
 	pi->enable_auto_thermal_throttling = true;
 	pi->disable_nb_ps3_in_battery = false;
-	pi->bapm_enable = true;
+	pi->bapm_enable = false;
 	pi->voltage_drop_t = 0;
 	pi->caps_sclk_throttle_low_notification = false;
 	pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cac2866..11aab2a 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -174,11 +174,6 @@
 extern void evergreen_program_aspm(struct radeon_device *rdev);
 extern void sumo_rlc_fini(struct radeon_device *rdev);
 extern int sumo_rlc_init(struct radeon_device *rdev);
-extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
-				   struct radeon_ib *ib,
-				   uint64_t pe,
-				   uint64_t addr, unsigned count,
-				   uint32_t incr, uint32_t flags);
 
 /* Firmware Names */
 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -2400,77 +2395,6 @@
 	       block, mc_id);
 }
 
-#define R600_ENTRY_VALID   (1 << 0)
-#define R600_PTE_SYSTEM    (1 << 1)
-#define R600_PTE_SNOOPED   (1 << 2)
-#define R600_PTE_READABLE  (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
-{
-	uint32_t r600_flags = 0;
-	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
-	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
-	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
-	if (flags & RADEON_VM_PAGE_SYSTEM) {
-		r600_flags |= R600_PTE_SYSTEM;
-		r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
-	}
-	return r600_flags;
-}
-
-/**
- * cayman_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (cayman/TN).
- */
-void cayman_vm_set_page(struct radeon_device *rdev,
-			struct radeon_ib *ib,
-			uint64_t pe,
-			uint64_t addr, unsigned count,
-			uint32_t incr, uint32_t flags)
-{
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-	uint64_t value;
-	unsigned ndw;
-
-	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
-		while (count) {
-			ndw = 1 + count * 2;
-			if (ndw > 0x3FFF)
-				ndw = 0x3FFF;
-
-			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			for (; ndw > 1; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= r600_flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
-	}
-}
-
 /**
  * cayman_vm_flush - vm flush using the CP
  *
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index dd6e968..bdeb65e 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "radeon.h"
 #include "radeon_asic.h"
+#include "radeon_trace.h"
 #include "nid.h"
 
 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -245,8 +246,7 @@
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
  * @incr: increase next addr by incr bytes
- * @flags: access flags
- * @r600_flags: hw access flags 
+ * @flags: hw access flags 
  *
  * Update the page tables using the DMA (cayman/TN).
  */
@@ -256,11 +256,12 @@
 			    uint64_t addr, unsigned count,
 			    uint32_t incr, uint32_t flags)
 {
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 	uint64_t value;
 	unsigned ndw;
 
-	if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
 		while (count) {
 			ndw = count * 2;
 			if (ndw > 0xFFFFE)
@@ -271,16 +272,16 @@
 			ib->ptr[ib->length_dw++] = pe;
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
+				if (flags & R600_PTE_SYSTEM) {
 					value = radeon_vm_map_gart(rdev, addr);
 					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
+				} else if (flags & R600_PTE_VALID) {
 					value = addr;
 				} else {
 					value = 0;
 				}
 				addr += incr;
-				value |= r600_flags;
+				value |= flags;
 				ib->ptr[ib->length_dw++] = value;
 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 			}
@@ -291,7 +292,7 @@
 			if (ndw > 0xFFFFE)
 				ndw = 0xFFFFE;
 
-			if (flags & RADEON_VM_PAGE_VALID)
+			if (flags & R600_PTE_VALID)
 				value = addr;
 			else
 				value = 0;
@@ -299,7 +300,7 @@
 			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+			ib->ptr[ib->length_dw++] = flags; /* mask */
 			ib->ptr[ib->length_dw++] = 0;
 			ib->ptr[ib->length_dw++] = value; /* value */
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index d713330..784983d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1434,7 +1434,7 @@
 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	crtc = obj_to_crtc(obj);
 	radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f9be220..4e609e8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -124,6 +124,59 @@
 	return 0;
 }
 
+void dce3_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	/* not needed for analog */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= FMT_SPATIAL_DITHER_EN;
+		else
+			tmp |= FMT_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
+}
+
 /* get temperature in millidegrees */
 int rv6xx_get_temp(struct radeon_device *rdev)
 {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 01a3ec8..5dceea6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@
 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
-		return -EINVAL;
+		return -ENOENT;
 	}
 	crtc = obj_to_crtc(obj);
 	radeon_crtc = to_radeon_crtc(crtc);
@@ -2328,13 +2328,8 @@
 	unsigned i;
 
 	kfree(parser->relocs);
-	for (i = 0; i < parser->nchunks; i++) {
-		kfree(parser->chunks[i].kdata);
-		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
-			kfree(parser->chunks[i].kpage[0]);
-			kfree(parser->chunks[i].kpage[1]);
-		}
-	}
+	for (i = 0; i < parser->nchunks; i++)
+		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
 	kfree(parser->chunks_array);
 }
@@ -2391,13 +2386,12 @@
 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
 	parser.ib.length_dw = ib_chunk->length_dw;
 	*l = parser.ib.length_dw;
-	r = r600_cs_parse(&parser);
-	if (r) {
-		DRM_ERROR("Invalid command stream !\n");
+	if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
+		r = -EFAULT;
 		r600_cs_parser_fini(&parser, r);
 		return r;
 	}
-	r = radeon_cs_finish_pages(&parser);
+	r = r600_cs_parse(&parser);
 	if (r) {
 		DRM_ERROR("Invalid command stream !\n");
 		r600_cs_parser_fini(&parser, r);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 06022e3..0977c30 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -313,8 +313,10 @@
 	return;
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
@@ -366,8 +368,10 @@
 	};
 
 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
-		if (connector->encoder == encoder)
+		if (connector->encoder == encoder) {
 			radeon_connector = to_radeon_connector(connector);
+			break;
+		}
 	}
 
 	if (!radeon_connector) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 7b3c7b5..ebe3872 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1199,6 +1199,34 @@
 #       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
 #       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
 
+/* DCE3 FMT blocks */
+#define FMT_CONTROL                          0x6700
+#       define FMT_PIXEL_ENCODING            (1 << 16)
+        /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
+#define FMT_BIT_DEPTH_CONTROL                0x6710
+#       define FMT_TRUNCATE_EN               (1 << 0)
+#       define FMT_TRUNCATE_DEPTH            (1 << 4)
+#       define FMT_SPATIAL_DITHER_EN         (1 << 8)
+#       define FMT_SPATIAL_DITHER_MODE(x)    ((x) << 9)
+#       define FMT_SPATIAL_DITHER_DEPTH      (1 << 12)
+#       define FMT_FRAME_RANDOM_ENABLE       (1 << 13)
+#       define FMT_RGB_RANDOM_ENABLE         (1 << 14)
+#       define FMT_HIGHPASS_RANDOM_ENABLE    (1 << 15)
+#       define FMT_TEMPORAL_DITHER_EN        (1 << 16)
+#       define FMT_TEMPORAL_DITHER_DEPTH     (1 << 20)
+#       define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
+#       define FMT_TEMPORAL_LEVEL            (1 << 24)
+#       define FMT_TEMPORAL_DITHER_RESET     (1 << 25)
+#       define FMT_25FRC_SEL(x)              ((x) << 26)
+#       define FMT_50FRC_SEL(x)              ((x) << 28)
+#       define FMT_75FRC_SEL(x)              ((x) << 30)
+#define FMT_CLAMP_CONTROL                    0x672c
+#       define FMT_CLAMP_DATA_EN             (1 << 0)
+#       define FMT_CLAMP_COLOR_FORMAT(x)     ((x) << 16)
+#       define FMT_CLAMP_6BPC                0
+#       define FMT_CLAMP_8BPC                1
+#       define FMT_CLAMP_10BPC               2
+
 /* Power management */
 #define CG_SPLL_FUNC_CNTL                                 0x600
 #       define SPLL_RESET                                (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a400ac1..b9ee992 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -98,6 +98,7 @@
 extern int radeon_fastfb;
 extern int radeon_dpm;
 extern int radeon_aspm;
+extern int radeon_runtime_pm;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -327,7 +328,6 @@
 	/* sync_seq is protected by ring emission lock */
 	uint64_t			sync_seq[RADEON_NUM_RINGS];
 	atomic64_t			last_seq;
-	unsigned long			last_activity;
 	bool				initialized;
 };
 
@@ -832,6 +832,12 @@
 #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
 #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
 
+#define R600_PTE_VALID		(1 << 0)
+#define R600_PTE_SYSTEM		(1 << 1)
+#define R600_PTE_SNOOPED	(1 << 2)
+#define R600_PTE_READABLE	(1 << 5)
+#define R600_PTE_WRITEABLE	(1 << 6)
+
 struct radeon_vm {
 	struct list_head		list;
 	struct list_head		va;
@@ -967,12 +973,8 @@
 struct radeon_cs_chunk {
 	uint32_t		chunk_id;
 	uint32_t		length_dw;
-	int			kpage_idx[2];
-	uint32_t		*kpage[2];
 	uint32_t		*kdata;
 	void __user		*user_ptr;
-	int			last_copied_page;
-	int			last_page_index;
 };
 
 struct radeon_cs_parser {
@@ -1007,8 +1009,15 @@
 	struct ww_acquire_ctx	ticket;
 };
 
-extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
-extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+
+	if (ibc->kdata)
+		return ibc->kdata[idx];
+	return p->ib.ptr[idx];
+}
+
 
 struct radeon_cs_packet {
 	unsigned	idx;
@@ -1272,8 +1281,8 @@
 struct radeon_clock_and_voltage_limits {
 	u32 sclk;
 	u32 mclk;
-	u32 vddc;
-	u32 vddci;
+	u16 vddc;
+	u16 vddci;
 };
 
 struct radeon_clock_array {
@@ -1675,8 +1684,6 @@
 	struct {
 		int (*init)(struct radeon_device *rdev);
 		void (*fini)(struct radeon_device *rdev);
-
-		u32 pt_ring_index;
 		void (*set_page)(struct radeon_device *rdev,
 				 struct radeon_ib *ib,
 				 uint64_t pe,
@@ -2170,6 +2177,7 @@
 	bool				need_dma32;
 	bool				accel_working;
 	bool				fastfb_working; /* IGP feature*/
+	bool				needs_reset;
 	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
 	const struct firmware *me_fw;	/* all family ME firmware */
 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
@@ -2212,6 +2220,9 @@
 	/* clock, powergating flags */
 	u32 cg_flags;
 	u32 pg_flags;
+
+	struct dev_pm_domain vga_pm_domain;
+	bool have_disp_power_ref;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -2673,8 +2684,8 @@
 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
 extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern int radeon_resume_kms(struct drm_device *dev);
-extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 extern void radeon_program_register_sequence(struct radeon_device *rdev,
 					     const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 8f7e045..d4b9167 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1622,8 +1622,7 @@
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cayman_vm_set_page,
+		.set_page = &cayman_dma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1723,8 +1722,7 @@
 	.vm = {
 		.init = &cayman_vm_init,
 		.fini = &cayman_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cayman_vm_set_page,
+		.set_page = &cayman_dma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1854,8 +1852,7 @@
 	.vm = {
 		.init = &si_vm_init,
 		.fini = &si_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &si_vm_set_page,
+		.set_page = &si_dma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1879,7 +1876,7 @@
 		.hdmi_setmode = &evergreen_hdmi_setmode,
 	},
 	.copy = {
-		.blit = NULL,
+		.blit = &r600_copy_cpdma,
 		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
 		.dma = &si_copy_dma,
 		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2000,8 +1997,7 @@
 	.vm = {
 		.init = &cik_vm_init,
 		.fini = &cik_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cik_vm_set_page,
+		.set_page = &cik_sdma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2100,8 +2096,7 @@
 	.vm = {
 		.init = &cik_vm_init,
 		.fini = &cik_vm_fini,
-		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
-		.set_page = &cik_vm_set_page,
+		.set_page = &cik_sdma_vm_set_page,
 	},
 	.ring = {
 		[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 70c29d5..f2833ee 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -581,17 +581,18 @@
 void cayman_vm_fini(struct radeon_device *rdev);
 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
-void cayman_vm_set_page(struct radeon_device *rdev,
-			struct radeon_ib *ib,
-			uint64_t pe,
-			uint64_t addr, unsigned count,
-			uint32_t incr, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
 				struct radeon_ib *ib);
 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+			    struct radeon_ib *ib,
+			    uint64_t pe,
+			    uint64_t addr, unsigned count,
+			    uint32_t incr, uint32_t flags);
+
 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 int ni_dpm_init(struct radeon_device *rdev);
@@ -653,17 +654,17 @@
 int si_irq_process(struct radeon_device *rdev);
 int si_vm_init(struct radeon_device *rdev);
 void si_vm_fini(struct radeon_device *rdev);
-void si_vm_set_page(struct radeon_device *rdev,
-		    struct radeon_ib *ib,
-		    uint64_t pe,
-		    uint64_t addr, unsigned count,
-		    uint32_t incr, uint32_t flags);
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 int si_copy_dma(struct radeon_device *rdev,
 		uint64_t src_offset, uint64_t dst_offset,
 		unsigned num_gpu_pages,
 		struct radeon_fence **fence);
+void si_dma_vm_set_page(struct radeon_device *rdev,
+			struct radeon_ib *ib,
+			uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags);
 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 u32 si_get_xclk(struct radeon_device *rdev);
 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -705,6 +706,10 @@
 		 uint64_t src_offset, uint64_t dst_offset,
 		 unsigned num_gpu_pages,
 		 struct radeon_fence **fence);
+int cik_copy_cpdma(struct radeon_device *rdev,
+		   uint64_t src_offset, uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence);
 int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -731,11 +736,11 @@
 int cik_vm_init(struct radeon_device *rdev);
 void cik_vm_fini(struct radeon_device *rdev);
 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_vm_set_page(struct radeon_device *rdev,
-		     struct radeon_ib *ib,
-		     uint64_t pe,
-		     uint64_t addr, unsigned count,
-		     uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+			  struct radeon_ib *ib,
+			  uint64_t pe,
+			  uint64_t addr, unsigned count,
+			  uint32_t incr, uint32_t flags);
 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index d96070b..6153ec189 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,6 +59,10 @@
 	u16 mux;
 } __packed;
 
+bool radeon_is_px(void) {
+	return radeon_atpx_priv.atpx_detected;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 061b227..c155d6f 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -499,7 +499,7 @@
 	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
 	fp2_gen_cntl = 0;
 
-	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
 	}
 
@@ -536,7 +536,7 @@
 		(RADEON_CRTC_SYNC_TRISTAT |
 		 RADEON_CRTC_DISPLAY_DIS)));
 
-	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
 	}
 
@@ -554,7 +554,7 @@
 		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
 	}
 	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
-	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+	if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
 		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
 	}
 	return r;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 6456573..f60b310 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -31,6 +31,8 @@
 #include "radeon.h"
 #include "atom.h"
 
+#include <linux/pm_runtime.h>
+
 extern void
 radeon_combios_connected_scratch_regs(struct drm_connector *connector,
 				      struct drm_encoder *encoder,
@@ -411,6 +413,21 @@
 		}
 	}
 
+	if (property == rdev->mode_info.dither_property) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_connector->dither != val) {
+			radeon_connector->dither = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
 	if (property == rdev->mode_info.underscan_property) {
 		/* need to find digital encoder on connector */
 		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -626,6 +643,11 @@
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
 
 	if (encoder) {
 		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -651,6 +673,8 @@
 	/* check acpi lid status ??? */
 
 	radeon_connector_update_scratch_regs(connector, ret);
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
 	return ret;
 }
 
@@ -750,6 +774,11 @@
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	bool dret = false;
 	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
+
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
 
 	encoder = radeon_best_single_encoder(connector);
 	if (!encoder)
@@ -790,9 +819,8 @@
 			 * detected a monitor via load.
 			 */
 			if (radeon_connector->detected_by_load)
-				return connector->status;
-			else
-				return ret;
+				ret = connector->status;
+			goto out;
 		}
 
 		if (radeon_connector->dac_load_detect && encoder) {
@@ -817,6 +845,11 @@
 	}
 
 	radeon_connector_update_scratch_regs(connector, ret);
+
+out:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
 	return ret;
 }
 
@@ -873,10 +906,15 @@
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	enum drm_connector_status ret = connector_status_disconnected;
+	int r;
 
 	if (!radeon_connector->dac_load_detect)
 		return ret;
 
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
 	encoder = radeon_best_single_encoder(connector);
 	if (!encoder)
 		ret = connector_status_disconnected;
@@ -887,6 +925,8 @@
 	if (ret == connector_status_connected)
 		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
 	radeon_connector_update_scratch_regs(connector, ret);
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
 	return ret;
 }
 
@@ -954,12 +994,18 @@
 	struct drm_encoder *encoder = NULL;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_mode_object *obj;
-	int i;
+	int i, r;
 	enum drm_connector_status ret = connector_status_disconnected;
 	bool dret = false, broken_edid = false;
 
-	if (!force && radeon_check_hpd_status_unchanged(connector))
-		return connector->status;
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (!force && radeon_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto exit;
+	}
 
 	if (radeon_connector->ddc_bus)
 		dret = radeon_ddc_probe(radeon_connector, false);
@@ -1110,6 +1156,11 @@
 
 	/* updated in get modes as well since we need to know if it's analog or digital */
 	radeon_connector_update_scratch_regs(connector, ret);
+
+exit:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
 	return ret;
 }
 
@@ -1377,9 +1428,16 @@
 	enum drm_connector_status ret = connector_status_disconnected;
 	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	int r;
 
-	if (!force && radeon_check_hpd_status_unchanged(connector))
-		return connector->status;
+	r = pm_runtime_get_sync(connector->dev->dev);
+	if (r < 0)
+		return connector_status_disconnected;
+
+	if (!force && radeon_check_hpd_status_unchanged(connector)) {
+		ret = connector->status;
+		goto out;
+	}
 
 	if (radeon_connector->edid) {
 		kfree(radeon_connector->edid);
@@ -1443,6 +1501,10 @@
 	}
 
 	radeon_connector_update_scratch_regs(connector, ret);
+out:
+	pm_runtime_mark_last_busy(connector->dev->dev);
+	pm_runtime_put_autosuspend(connector->dev->dev);
+
 	return ret;
 }
 
@@ -1658,12 +1720,18 @@
 			drm_object_attach_property(&radeon_connector->base.base,
 						      rdev->mode_info.underscan_vborder_property,
 						      0);
+
+			drm_object_attach_property(&radeon_connector->base.base,
+						   rdev->mode_info.dither_property,
+						   RADEON_FMT_DITHER_DISABLE);
+
 			if (radeon_audio != 0)
 				drm_object_attach_property(&radeon_connector->base.base,
 							   rdev->mode_info.audio_property,
 							   (radeon_audio == 1) ?
 							   RADEON_AUDIO_AUTO :
 							   RADEON_AUDIO_DISABLE);
+
 			subpixel_order = SubPixelHorizontalRGB;
 			connector->interlace_allowed = true;
 			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1764,6 +1832,11 @@
 							   RADEON_AUDIO_AUTO :
 							   RADEON_AUDIO_DISABLE);
 			}
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+			}
 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
 				radeon_connector->dac_load_detect = true;
 				drm_object_attach_property(&radeon_connector->base.base,
@@ -1811,6 +1884,11 @@
 							   RADEON_AUDIO_AUTO :
 							   RADEON_AUDIO_DISABLE);
 			}
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+			}
 			subpixel_order = SubPixelHorizontalRGB;
 			connector->interlace_allowed = true;
 			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1857,6 +1935,12 @@
 							   RADEON_AUDIO_AUTO :
 							   RADEON_AUDIO_DISABLE);
 			}
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							   rdev->mode_info.dither_property,
+							   RADEON_FMT_DITHER_DISABLE);
+
+			}
 			connector->interlace_allowed = true;
 			/* in theory with a DP to VGA converter... */
 			connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 80285e3..26ca223 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -212,9 +212,7 @@
 			return -EFAULT;
 		}
 		p->chunks[i].length_dw = user_chunk.length_dw;
-		p->chunks[i].kdata = NULL;
 		p->chunks[i].chunk_id = user_chunk.chunk_id;
-		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
 		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
 			p->chunk_relocs_idx = i;
 		}
@@ -237,25 +235,31 @@
 				return -EINVAL;
 		}
 
-		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
-		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
-		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
-			size = p->chunks[i].length_dw * sizeof(uint32_t);
-			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
-			if (p->chunks[i].kdata == NULL) {
-				return -ENOMEM;
-			}
-			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
-					       p->chunks[i].user_ptr, size)) {
-				return -EFAULT;
-			}
-			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
-				p->cs_flags = p->chunks[i].kdata[0];
-				if (p->chunks[i].length_dw > 1)
-					ring = p->chunks[i].kdata[1];
-				if (p->chunks[i].length_dw > 2)
-					priority = (s32)p->chunks[i].kdata[2];
-			}
+		size = p->chunks[i].length_dw;
+		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		p->chunks[i].user_ptr = cdata;
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+			continue;
+
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
+				continue;
+		}
+
+		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+		size *= sizeof(uint32_t);
+		if (p->chunks[i].kdata == NULL) {
+			return -ENOMEM;
+		}
+		if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
+			return -EFAULT;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+			p->cs_flags = p->chunks[i].kdata[0];
+			if (p->chunks[i].length_dw > 1)
+				ring = p->chunks[i].kdata[1];
+			if (p->chunks[i].length_dw > 2)
+				priority = (s32)p->chunks[i].kdata[2];
 		}
 	}
 
@@ -278,34 +282,6 @@
 		}
 	}
 
-	/* deal with non-vm */
-	if ((p->chunk_ib_idx != -1) &&
-	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
-	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
-		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
-			DRM_ERROR("cs IB too big: %d\n",
-				  p->chunks[p->chunk_ib_idx].length_dw);
-			return -EINVAL;
-		}
-		if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
-			p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-			p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
-			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
-			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
-				kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
-				kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
-				p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
-				p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
-				return -ENOMEM;
-			}
-		}
-		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
-		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
-		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
-		p->chunks[p->chunk_ib_idx].last_page_index =
-			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
-	}
-
 	return 0;
 }
 
@@ -339,13 +315,8 @@
 	kfree(parser->track);
 	kfree(parser->relocs);
 	kfree(parser->relocs_ptr);
-	for (i = 0; i < parser->nchunks; i++) {
-		kfree(parser->chunks[i].kdata);
-		if ((parser->rdev->flags & RADEON_IS_AGP)) {
-			kfree(parser->chunks[i].kpage[0]);
-			kfree(parser->chunks[i].kpage[1]);
-		}
-	}
+	for (i = 0; i < parser->nchunks; i++)
+		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
 	kfree(parser->chunks_array);
 	radeon_ib_free(parser->rdev, &parser->ib);
@@ -355,7 +326,6 @@
 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
 			      struct radeon_cs_parser *parser)
 {
-	struct radeon_cs_chunk *ib_chunk;
 	int r;
 
 	if (parser->chunk_ib_idx == -1)
@@ -364,28 +334,11 @@
 	if (parser->cs_flags & RADEON_CS_USE_VM)
 		return 0;
 
-	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
-	/* Copy the packet into the IB, the parser will read from the
-	 * input memory (cached) and write to the IB (which can be
-	 * uncached).
-	 */
-	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
-			   NULL, ib_chunk->length_dw * 4);
-	if (r) {
-		DRM_ERROR("Failed to get ib !\n");
-		return r;
-	}
-	parser->ib.length_dw = ib_chunk->length_dw;
 	r = radeon_cs_parse(rdev, parser->ring, parser);
 	if (r || parser->parser_error) {
 		DRM_ERROR("Invalid command stream !\n");
 		return r;
 	}
-	r = radeon_cs_finish_pages(parser);
-	if (r) {
-		DRM_ERROR("Invalid command stream !\n");
-		return r;
-	}
 
 	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
 		radeon_uvd_note_usage(rdev);
@@ -423,7 +376,6 @@
 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
 				 struct radeon_cs_parser *parser)
 {
-	struct radeon_cs_chunk *ib_chunk;
 	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
 	struct radeon_vm *vm = &fpriv->vm;
 	int r;
@@ -433,49 +385,13 @@
 	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
 		return 0;
 
-	if ((rdev->family >= CHIP_TAHITI) &&
-	    (parser->chunk_const_ib_idx != -1)) {
-		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
-		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
-			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
-			return -EINVAL;
-		}
-		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
-				   vm, ib_chunk->length_dw * 4);
-		if (r) {
-			DRM_ERROR("Failed to get const ib !\n");
-			return r;
-		}
-		parser->const_ib.is_const_ib = true;
-		parser->const_ib.length_dw = ib_chunk->length_dw;
-		/* Copy the packet into the IB */
-		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
-				       ib_chunk->length_dw * 4)) {
-			return -EFAULT;
-		}
+	if (parser->const_ib.length_dw) {
 		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
 		if (r) {
 			return r;
 		}
 	}
 
-	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
-	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
-		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
-		return -EINVAL;
-	}
-	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
-			   vm, ib_chunk->length_dw * 4);
-	if (r) {
-		DRM_ERROR("Failed to get ib !\n");
-		return r;
-	}
-	parser->ib.length_dw = ib_chunk->length_dw;
-	/* Copy the packet into the IB */
-	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
-			       ib_chunk->length_dw * 4)) {
-		return -EFAULT;
-	}
 	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
 	if (r) {
 		return r;
@@ -527,6 +443,62 @@
 	return r;
 }
 
+static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	struct radeon_vm *vm = NULL;
+	int r;
+
+	if (parser->chunk_ib_idx == -1)
+		return 0;
+
+	if (parser->cs_flags & RADEON_CS_USE_VM) {
+		struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+		vm = &fpriv->vm;
+
+		if ((rdev->family >= CHIP_TAHITI) &&
+		    (parser->chunk_const_ib_idx != -1)) {
+			ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+			if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+				DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+				return -EINVAL;
+			}
+			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+					   vm, ib_chunk->length_dw * 4);
+			if (r) {
+				DRM_ERROR("Failed to get const ib !\n");
+				return r;
+			}
+			parser->const_ib.is_const_ib = true;
+			parser->const_ib.length_dw = ib_chunk->length_dw;
+			if (DRM_COPY_FROM_USER(parser->const_ib.ptr,
+					       ib_chunk->user_ptr,
+					       ib_chunk->length_dw * 4))
+				return -EFAULT;
+		}
+
+		ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+			DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+			return -EINVAL;
+		}
+	}
+	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   vm, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	if (ib_chunk->kdata)
+		memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
+	else if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
+		return -EFAULT;
+	return 0;
+}
+
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct radeon_device *rdev = dev->dev_private;
@@ -552,10 +524,15 @@
 		r = radeon_cs_handle_lockup(rdev, r);
 		return r;
 	}
-	r = radeon_cs_parser_relocs(&parser);
-	if (r) {
-		if (r != -ERESTARTSYS)
+
+	r = radeon_cs_ib_fill(rdev, &parser);
+	if (!r) {
+		r = radeon_cs_parser_relocs(&parser);
+		if (r && r != -ERESTARTSYS)
 			DRM_ERROR("Failed to parse relocation %d!\n", r);
+	}
+
+	if (r) {
 		radeon_cs_parser_fini(&parser, r, false);
 		up_read(&rdev->exclusive_lock);
 		r = radeon_cs_handle_lockup(rdev, r);
@@ -579,97 +556,6 @@
 	return r;
 }
 
-int radeon_cs_finish_pages(struct radeon_cs_parser *p)
-{
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	int i;
-	int size = PAGE_SIZE;
-
-	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
-		if (i == ibc->last_page_index) {
-			size = (ibc->length_dw * 4) % PAGE_SIZE;
-			if (size == 0)
-				size = PAGE_SIZE;
-		}
-		
-		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
-				       ibc->user_ptr + (i * PAGE_SIZE),
-				       size))
-			return -EFAULT;
-	}
-	return 0;
-}
-
-static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
-{
-	int new_page;
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	int i;
-	int size = PAGE_SIZE;
-	bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
-		false : true;
-
-	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
-		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
-				       ibc->user_ptr + (i * PAGE_SIZE),
-				       PAGE_SIZE)) {
-			p->parser_error = -EFAULT;
-			return 0;
-		}
-	}
-
-	if (pg_idx == ibc->last_page_index) {
-		size = (ibc->length_dw * 4) % PAGE_SIZE;
-		if (size == 0)
-			size = PAGE_SIZE;
-	}
-
-	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
-	if (copy1)
-		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
-
-	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
-			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
-			       size)) {
-		p->parser_error = -EFAULT;
-		return 0;
-	}
-
-	/* copy to IB for non single case */
-	if (!copy1)
-		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
-
-	ibc->last_copied_page = pg_idx;
-	ibc->kpage_idx[new_page] = pg_idx;
-
-	return new_page;
-}
-
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
-	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
-	u32 pg_idx, pg_offset;
-	u32 idx_value = 0;
-	int new_page;
-
-	pg_idx = (idx * 4) / PAGE_SIZE;
-	pg_offset = (idx * 4) % PAGE_SIZE;
-
-	if (ibc->kpage_idx[0] == pg_idx)
-		return ibc->kpage[0][pg_offset/4];
-	if (ibc->kpage_idx[1] == pg_idx)
-		return ibc->kpage[1][pg_offset/4];
-
-	new_page = radeon_cs_update_pages(p, pg_idx);
-	if (new_page < 0) {
-		p->parser_error = new_page;
-		return 0;
-	}
-
-	idx_value = ibc->kpage[new_page][pg_offset/4];
-	return idx_value;
-}
-
 /**
  * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
  * @parser:	parser structure holding parsing context.
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 841d0e0..61dbdd9 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -101,6 +101,12 @@
 	"LAST",
 };
 
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
 /**
  * radeon_program_register_sequence - program an array of registers.
  *
@@ -1076,7 +1082,10 @@
 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+	if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
+		return;
+
 	if (state == VGA_SWITCHEROO_ON) {
 		unsigned d3_delay = dev->pdev->d3_delay;
 
@@ -1087,7 +1096,7 @@
 		if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
 			dev->pdev->d3_delay = 20;
 
-		radeon_resume_kms(dev);
+		radeon_resume_kms(dev, true, true);
 
 		dev->pdev->d3_delay = d3_delay;
 
@@ -1097,7 +1106,7 @@
 		printk(KERN_INFO "radeon: switched off\n");
 		drm_kms_helper_poll_disable(dev);
 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-		radeon_suspend_kms(dev, pmm);
+		radeon_suspend_kms(dev, true, true);
 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
@@ -1147,6 +1156,7 @@
 {
 	int r, i;
 	int dma_bits;
+	bool runtime = false;
 
 	rdev->shutdown = false;
 	rdev->dev = &pdev->dev;
@@ -1293,7 +1303,14 @@
 	/* this will fail for cards that aren't VGA class devices, just
 	 * ignore it */
 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
-	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
+
+	if (radeon_runtime_pm == 1)
+		runtime = true;
+	if ((radeon_runtime_pm == -1) && radeon_is_px())
+		runtime = true;
+	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+	if (runtime)
+		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
 
 	r = radeon_init(rdev);
 	if (r)
@@ -1383,7 +1400,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver suspend.
  */
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
 {
 	struct radeon_device *rdev;
 	struct drm_crtc *crtc;
@@ -1394,9 +1411,7 @@
 	if (dev == NULL || dev->dev_private == NULL) {
 		return -ENODEV;
 	}
-	if (state.event == PM_EVENT_PRETHAW) {
-		return 0;
-	}
+
 	rdev = dev->dev_private;
 
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1455,14 +1470,17 @@
 	radeon_agp_suspend(rdev);
 
 	pci_save_state(dev->pdev);
-	if (state.event == PM_EVENT_SUSPEND) {
+	if (suspend) {
 		/* Shut down the device */
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
 	}
-	console_lock();
-	radeon_fbdev_set_suspend(rdev, 1);
-	console_unlock();
+
+	if (fbcon) {
+		console_lock();
+		radeon_fbdev_set_suspend(rdev, 1);
+		console_unlock();
+	}
 	return 0;
 }
 
@@ -1475,7 +1493,7 @@
  * Returns 0 for success or an error on failure.
  * Called at driver resume.
  */
-int radeon_resume_kms(struct drm_device *dev)
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
 	struct drm_connector *connector;
 	struct radeon_device *rdev = dev->dev_private;
@@ -1484,12 +1502,17 @@
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	console_lock();
-	pci_set_power_state(dev->pdev, PCI_D0);
-	pci_restore_state(dev->pdev);
-	if (pci_enable_device(dev->pdev)) {
-		console_unlock();
-		return -1;
+	if (fbcon) {
+		console_lock();
+	}
+	if (resume) {
+		pci_set_power_state(dev->pdev, PCI_D0);
+		pci_restore_state(dev->pdev);
+		if (pci_enable_device(dev->pdev)) {
+			if (fbcon)
+				console_unlock();
+			return -1;
+		}
 	}
 	/* resume AGP if in use */
 	radeon_agp_resume(rdev);
@@ -1502,9 +1525,11 @@
 	radeon_pm_resume(rdev);
 	radeon_restore_bios_scratch_regs(rdev);
 
-	radeon_fbdev_set_suspend(rdev, 0);
-	console_unlock();
-
+	if (fbcon) {
+		radeon_fbdev_set_suspend(rdev, 0);
+		console_unlock();
+	}
+       
 	/* init dig PHYs, disp eng pll */
 	if (rdev->is_atom_bios) {
 		radeon_atom_encoder_init(rdev);
@@ -1549,6 +1574,14 @@
 	int resched;
 
 	down_write(&rdev->exclusive_lock);
+
+	if (!rdev->needs_reset) {
+		up_write(&rdev->exclusive_lock);
+		return 0;
+	}
+
+	rdev->needs_reset = false;
+
 	radeon_save_bios_scratch_regs(rdev);
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0d1aa05..7b25381 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -30,6 +30,7 @@
 #include "atom.h"
 #include <asm/div64.h>
 
+#include <linux/pm_runtime.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
@@ -306,7 +307,7 @@
 	 */
 	if (update_pending &&
 	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
-							       &vpos, &hpos)) &&
+							       &vpos, &hpos, NULL, NULL)) &&
 	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
 	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
 		/* crtc didn't flip in this target vblank interval,
@@ -494,11 +495,55 @@
 	return r;
 }
 
+static int
+radeon_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct radeon_device *rdev;
+	struct drm_crtc *crtc;
+	bool active = false;
+	int ret;
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	dev = set->crtc->dev;
+
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_crtc_helper_set_config(set);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->enabled)
+			active = true;
+
+	pm_runtime_mark_last_busy(dev->dev);
+
+	rdev = dev->dev_private;
+	/* if we have active crtcs and we don't have a power ref,
+	   take the current one */
+	if (active && !rdev->have_disp_power_ref) {
+		rdev->have_disp_power_ref = true;
+		return ret;
+	}
+	/* if we have no active crtcs, then drop the power ref
+	   we got before */
+	if (!active && rdev->have_disp_power_ref) {
+		pm_runtime_put_autosuspend(dev->dev);
+		rdev->have_disp_power_ref = false;
+	}
+
+	/* drop the power reference we got coming in here */
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
 static const struct drm_crtc_funcs radeon_crtc_funcs = {
 	.cursor_set = radeon_crtc_cursor_set,
 	.cursor_move = radeon_crtc_cursor_move,
 	.gamma_set = radeon_crtc_gamma_set,
-	.set_config = drm_crtc_helper_set_config,
+	.set_config = radeon_crtc_set_config,
 	.destroy = radeon_crtc_destroy,
 	.page_flip = radeon_crtc_page_flip,
 };
@@ -1178,6 +1223,12 @@
 	{ RADEON_AUDIO_AUTO, "auto" },
 };
 
+/* XXX support different dither options? spatial, temporal, both, etc. */
+static struct drm_prop_enum_list radeon_dither_enum_list[] =
+{	{ RADEON_FMT_DITHER_DISABLE, "off" },
+	{ RADEON_FMT_DITHER_ENABLE, "on" },
+};
+
 static int radeon_modeset_create_props(struct radeon_device *rdev)
 {
 	int sz;
@@ -1234,6 +1285,12 @@
 					 "audio",
 					 radeon_audio_enum_list, sz);
 
+	sz = ARRAY_SIZE(radeon_dither_enum_list);
+	rdev->mode_info.dither_property =
+		drm_property_create_enum(rdev->ddev, 0,
+					 "dither",
+					 radeon_dither_enum_list, sz);
+
 	return 0;
 }
 
@@ -1539,12 +1596,17 @@
 }
 
 /*
- * Retrieve current video scanout position of crtc on a given gpu.
+ * Retrieve current video scanout position of crtc on a given gpu, and
+ * an optional accurate timestamp of when query happened.
  *
  * \param dev Device to query.
  * \param crtc Crtc to query.
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
+ * \param *stime Target location for timestamp taken immediately before
+ *               scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ *               scanout position query. Can be NULL to skip timestamp.
  *
  * Returns vpos as a positive number while in active scanout area.
  * Returns vpos as a negative number inside vblank, counting the number
@@ -1560,7 +1622,8 @@
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos,
+			       ktime_t *stime, ktime_t *etime)
 {
 	u32 stat_crtc = 0, vbl = 0, position = 0;
 	int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1568,6 +1631,12 @@
 
 	struct radeon_device *rdev = dev->dev_private;
 
+	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+	/* Get optional system timestamp before query. */
+	if (stime)
+		*stime = ktime_get();
+
 	if (ASIC_IS_DCE4(rdev)) {
 		if (crtc == 0) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
@@ -1650,6 +1719,12 @@
 		}
 	}
 
+	/* Get optional system timestamp after query. */
+	if (etime)
+		*etime = ktime_get();
+
+	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
 	/* Decode into vertical and horizontal scanout position. */
 	*vpos = position & 0x1fff;
 	*hpos = (position >> 16) & 0x1fff;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9c14a1b..1aee3221 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,8 +36,9 @@
 #include <drm/drm_pciids.h>
 #include <linux/console.h>
 #include <linux/module.h>
-
-
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drm_crtc_helper.h"
 /*
  * KMS wrapper.
  * - 2.0.0 - initial interface
@@ -87,8 +88,8 @@
 				 struct drm_file *file_priv);
 void radeon_driver_preclose_kms(struct drm_device *dev,
 				struct drm_file *file_priv);
-int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
-int radeon_resume_kms(struct drm_device *dev);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
@@ -100,14 +101,14 @@
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
 				struct drm_file *file_priv);
 void radeon_gem_object_close(struct drm_gem_object *obj,
 				struct drm_file *file_priv);
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-				      int *vpos, int *hpos);
+				      int *vpos, int *hpos, ktime_t *stime,
+				      ktime_t *etime);
 extern const struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -137,9 +138,11 @@
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
 void radeon_unregister_atpx_handler(void);
+bool radeon_is_px(void);
 #else
 static inline void radeon_register_atpx_handler(void) {}
 static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_is_px(void) { return false; }
 #endif
 
 int radeon_no_wb;
@@ -162,6 +165,7 @@
 int radeon_fastfb = 0;
 int radeon_dpm = -1;
 int radeon_aspm = -1;
+int radeon_runtime_pm = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -223,6 +227,9 @@
 MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(aspm, radeon_aspm, int, 0444);
 
+MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+module_param_named(runpm, radeon_runtime_pm, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 	radeon_PCI_IDS
 };
@@ -259,6 +266,7 @@
 	return 0;
 }
 
+
 static const struct file_operations radeon_driver_old_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
@@ -353,25 +361,144 @@
 	drm_put_dev(dev);
 }
 
-static int
-radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int radeon_pmops_suspend(struct device *dev)
 {
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	return radeon_suspend_kms(dev, state);
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(drm_dev, true, true);
 }
 
-static int
-radeon_pci_resume(struct pci_dev *pdev)
+static int radeon_pmops_resume(struct device *dev)
 {
-	struct drm_device *dev = pci_get_drvdata(pdev);
-	return radeon_resume_kms(dev);
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_resume_kms(drm_dev, true, true);
 }
 
+static int radeon_pmops_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	return radeon_resume_kms(drm_dev, false, true);
+}
+
+static int radeon_pmops_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (radeon_runtime_pm == 0)
+		return -EINVAL;
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+	drm_kms_helper_poll_disable(drm_dev);
+	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+
+	ret = radeon_suspend_kms(drm_dev, false, false);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3cold);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+
+	return 0;
+}
+
+static int radeon_pmops_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (radeon_runtime_pm == 0)
+		return -EINVAL;
+
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	ret = radeon_resume_kms(drm_dev, false, false);
+	drm_kms_helper_poll_enable(drm_dev);
+	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	return 0;
+}
+
+static int radeon_pmops_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct drm_crtc *crtc;
+
+	if (radeon_runtime_pm == 0)
+		return -EBUSY;
+
+	/* are we PX enabled? */
+	if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+		DRM_DEBUG_DRIVER("failing to power off - not px\n");
+		return -EBUSY;
+	}
+
+	list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
+		if (crtc->enabled) {
+			DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+			return -EBUSY;
+		}
+	}
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_autosuspend(dev);
+	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+	return 1;
+}
+
+long radeon_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev;
+	long ret;
+	dev = file_priv->minor->dev;
+	ret = pm_runtime_get_sync(dev->dev);
+	if (ret < 0)
+		return ret;
+
+	ret = drm_ioctl(filp, cmd, arg);
+	
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+	return ret;
+}
+
+static const struct dev_pm_ops radeon_pm_ops = {
+	.suspend = radeon_pmops_suspend,
+	.resume = radeon_pmops_resume,
+	.freeze = radeon_pmops_freeze,
+	.thaw = radeon_pmops_thaw,
+	.poweroff = radeon_pmops_freeze,
+	.restore = radeon_pmops_resume,
+	.runtime_suspend = radeon_pmops_runtime_suspend,
+	.runtime_resume = radeon_pmops_runtime_resume,
+	.runtime_idle = radeon_pmops_runtime_idle,
+};
+
 static const struct file_operations radeon_driver_kms_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
+	.unlocked_ioctl = radeon_drm_ioctl,
 	.mmap = radeon_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
@@ -380,6 +507,15 @@
 #endif
 };
 
+
+static void
+radeon_pci_shutdown(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	radeon_driver_unload_kms(dev);
+}
+
 static struct drm_driver kms_driver = {
 	.driver_features =
 	    DRIVER_USE_AGP |
@@ -392,8 +528,6 @@
 	.postclose = radeon_driver_postclose_kms,
 	.lastclose = radeon_driver_lastclose_kms,
 	.unload = radeon_driver_unload_kms,
-	.suspend = radeon_suspend_kms,
-	.resume = radeon_resume_kms,
 	.get_vblank_counter = radeon_get_vblank_counter_kms,
 	.enable_vblank = radeon_enable_vblank_kms,
 	.disable_vblank = radeon_disable_vblank_kms,
@@ -408,7 +542,6 @@
 	.irq_uninstall = radeon_driver_irq_uninstall_kms,
 	.irq_handler = radeon_driver_irq_handler_kms,
 	.ioctls = radeon_ioctls_kms,
-	.gem_init_object = radeon_gem_object_init,
 	.gem_free_object = radeon_gem_object_free,
 	.gem_open_object = radeon_gem_object_open,
 	.gem_close_object = radeon_gem_object_close,
@@ -451,8 +584,8 @@
 	.id_table = pciidlist,
 	.probe = radeon_pci_probe,
 	.remove = radeon_pci_remove,
-	.suspend = radeon_pci_suspend,
-	.resume = radeon_pci_resume,
+	.driver.pm = &radeon_pm_ops,
+	.shutdown = radeon_pci_shutdown,
 };
 
 static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index b369d42..543dcfa 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
 #define DRIVER_MINOR		33
 #define DRIVER_PATCHLEVEL	0
 
+long radeon_drm_ioctl(struct file *filp,
+		      unsigned int cmd, unsigned long arg);
+
 /* The rest of the file is DEPRECATED! */
 #ifdef CONFIG_DRM_RADEON_UMS
 
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index ddb8f8e..281d14c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -190,10 +190,8 @@
 		}
 	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 
-	if (wake) {
-		rdev->fence_drv[ring].last_activity = jiffies;
+	if (wake)
 		wake_up_all(&rdev->fence_queue);
-	}
 }
 
 /**
@@ -212,13 +210,13 @@
 }
 
 /**
- * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ * radeon_fence_seq_signaled - check if a fence sequence number has signaled
  *
  * @rdev: radeon device pointer
  * @seq: sequence number
  * @ring: ring index the fence is associated with
  *
- * Check if the last singled fence sequnce number is >= the requested
+ * Check if the last signaled fence sequnce number is >= the requested
  * sequence number (all asics).
  * Returns true if the fence has signaled (current fence value
  * is >= requested value) or false if it has not (current fence
@@ -263,113 +261,131 @@
 }
 
 /**
- * radeon_fence_wait_seq - wait for a specific sequence number
+ * radeon_fence_any_seq_signaled - check if any sequence number is signaled
  *
  * @rdev: radeon device pointer
- * @target_seq: sequence number we want to wait for
- * @ring: ring index the fence is associated with
+ * @seq: sequence numbers
+ *
+ * Check if the last signaled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if any has signaled (current value is >= requested value)
+ * or false if it has not. Helper function for radeon_fence_wait_seq.
+ */
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
+			return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence numbers
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
  * @intr: use interruptable sleep
  * @lock_ring: whether the ring should be locked or not
  *
- * Wait for the requested sequence number to be written (all asics).
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics).  Sequnce number array is indexed by ring id.
  * @intr selects whether to use interruptable (true) or non-interruptable
  * (false) sleep when waiting for the sequence number.  Helper function
- * for radeon_fence_wait(), et al.
+ * for radeon_fence_wait_*().
  * Returns 0 if the sequence number has passed, error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected and the ring is
- * marked as not ready so no further jobs get scheduled until a successful
- * reset.
+ * -EDEADLK is returned when a GPU lockup has been detected.
  */
-static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
-				 unsigned ring, bool intr, bool lock_ring)
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
+				 bool intr, bool lock_ring)
 {
-	unsigned long timeout, last_activity;
-	uint64_t seq;
-	unsigned i;
+	uint64_t last_seq[RADEON_NUM_RINGS];
 	bool signaled;
-	int r;
+	int i, r;
 
-	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
-		if (!rdev->ring[ring].ready) {
-			return -EBUSY;
+	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+
+		/* Save current sequence values, used to check for GPU lockups */
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (!target_seq[i])
+				continue;
+
+			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
+			trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
+			radeon_irq_kms_sw_irq_get(rdev, i);
 		}
 
-		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
-		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
-			/* the normal case, timeout is somewhere before last_activity */
-			timeout = rdev->fence_drv[ring].last_activity - timeout;
-		} else {
-			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-			 * anyway we will just wait for the minimum amount and then check for a lockup
-			 */
-			timeout = 1;
-		}
-		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
-		/* Save current last activity valuee, used to check for GPU lockups */
-		last_activity = rdev->fence_drv[ring].last_activity;
-
-		trace_radeon_fence_wait_begin(rdev->ddev, seq);
-		radeon_irq_kms_sw_irq_get(rdev, ring);
 		if (intr) {
-			r = wait_event_interruptible_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
-				timeout);
-                } else {
-			r = wait_event_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
-				timeout);
+			r = wait_event_interruptible_timeout(rdev->fence_queue, (
+				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
+		} else {
+			r = wait_event_timeout(rdev->fence_queue, (
+				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
+				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
 		}
-		radeon_irq_kms_sw_irq_put(rdev, ring);
-		if (unlikely(r < 0)) {
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (!target_seq[i])
+				continue;
+
+			radeon_irq_kms_sw_irq_put(rdev, i);
+			trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
+		}
+
+		if (unlikely(r < 0))
 			return r;
-		}
-		trace_radeon_fence_wait_end(rdev->ddev, seq);
 
 		if (unlikely(!signaled)) {
+			if (rdev->needs_reset)
+				return -EDEADLK;
+
 			/* we were interrupted for some reason and fence
 			 * isn't signaled yet, resume waiting */
-			if (r) {
+			if (r)
 				continue;
+
+			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (!target_seq[i])
+					continue;
+
+				if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
+					break;
 			}
 
-			/* check if sequence value has changed since last_activity */
-			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+			if (i != RADEON_NUM_RINGS)
 				continue;
-			}
 
-			if (lock_ring) {
+			if (lock_ring)
 				mutex_lock(&rdev->ring_lock);
+
+			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (!target_seq[i])
+					continue;
+
+				if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
+					break;
 			}
 
-			/* test if somebody else has already decided that this is a lockup */
-			if (last_activity != rdev->fence_drv[ring].last_activity) {
-				if (lock_ring) {
-					mutex_unlock(&rdev->ring_lock);
-				}
-				continue;
-			}
-
-			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+			if (i < RADEON_NUM_RINGS) {
 				/* good news we believe it's a lockup */
-				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
-					 target_seq, seq);
+				dev_warn(rdev->dev, "GPU lockup (waiting for "
+					 "0x%016llx last fence id 0x%016llx on"
+					 " ring %d)\n",
+					 target_seq[i], last_seq[i], i);
 
-				/* change last activity so nobody else think there is a lockup */
-				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-					rdev->fence_drv[i].last_activity = jiffies;
-				}
-
-				/* mark the ring as not ready any more */
-				rdev->ring[ring].ready = false;
-				if (lock_ring) {
+				/* remember that we need an reset */
+				rdev->needs_reset = true;
+				if (lock_ring)
 					mutex_unlock(&rdev->ring_lock);
-				}
+				wake_up_all(&rdev->fence_queue);
 				return -EDEADLK;
 			}
 
-			if (lock_ring) {
+			if (lock_ring)
 				mutex_unlock(&rdev->ring_lock);
-			}
 		}
 	}
 	return 0;
@@ -388,6 +404,7 @@
  */
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
+	uint64_t seq[RADEON_NUM_RINGS] = {};
 	int r;
 
 	if (fence == NULL) {
@@ -395,150 +412,18 @@
 		return -EINVAL;
 	}
 
-	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
-				  fence->ring, intr, true);
-	if (r) {
+	seq[fence->ring] = fence->seq;
+	if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
+		return 0;
+
+	r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
+	if (r)
 		return r;
-	}
+
 	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
 	return 0;
 }
 
-static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-{
-	unsigned i;
-
-	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
-			return true;
-		}
-	}
-	return false;
-}
-
-/**
- * radeon_fence_wait_any_seq - wait for a sequence number on any ring
- *
- * @rdev: radeon device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics).  Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number.  Helper function
- * for radeon_fence_wait_any(), et al.
- * Returns 0 if the sequence number has passed, error for all other cases.
- */
-static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
-				     u64 *target_seq, bool intr)
-{
-	unsigned long timeout, last_activity, tmp;
-	unsigned i, ring = RADEON_NUM_RINGS;
-	bool signaled;
-	int r;
-
-	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
-		if (!target_seq[i]) {
-			continue;
-		}
-
-		/* use the most recent one as indicator */
-		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
-			last_activity = rdev->fence_drv[i].last_activity;
-		}
-
-		/* For lockup detection just pick the lowest ring we are
-		 * actively waiting for
-		 */
-		if (i < ring) {
-			ring = i;
-		}
-	}
-
-	/* nothing to wait for ? */
-	if (ring == RADEON_NUM_RINGS) {
-		return -ENOENT;
-	}
-
-	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
-		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
-		if (time_after(last_activity, timeout)) {
-			/* the normal case, timeout is somewhere before last_activity */
-			timeout = last_activity - timeout;
-		} else {
-			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-			 * anyway we will just wait for the minimum amount and then check for a lockup
-			 */
-			timeout = 1;
-		}
-
-		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
-		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-			if (target_seq[i]) {
-				radeon_irq_kms_sw_irq_get(rdev, i);
-			}
-		}
-		if (intr) {
-			r = wait_event_interruptible_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-				timeout);
-		} else {
-			r = wait_event_timeout(rdev->fence_queue,
-				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-				timeout);
-		}
-		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-			if (target_seq[i]) {
-				radeon_irq_kms_sw_irq_put(rdev, i);
-			}
-		}
-		if (unlikely(r < 0)) {
-			return r;
-		}
-		trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
-
-		if (unlikely(!signaled)) {
-			/* we were interrupted for some reason and fence
-			 * isn't signaled yet, resume waiting */
-			if (r) {
-				continue;
-			}
-
-			mutex_lock(&rdev->ring_lock);
-			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
-				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
-					tmp = rdev->fence_drv[i].last_activity;
-				}
-			}
-			/* test if somebody else has already decided that this is a lockup */
-			if (last_activity != tmp) {
-				last_activity = tmp;
-				mutex_unlock(&rdev->ring_lock);
-				continue;
-			}
-
-			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
-				/* good news we believe it's a lockup */
-				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
-					 target_seq[ring]);
-
-				/* change last activity so nobody else think there is a lockup */
-				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-					rdev->fence_drv[i].last_activity = jiffies;
-				}
-
-				/* mark the ring as not ready any more */
-				rdev->ring[ring].ready = false;
-				mutex_unlock(&rdev->ring_lock);
-				return -EDEADLK;
-			}
-			mutex_unlock(&rdev->ring_lock);
-		}
-	}
-	return 0;
-}
-
 /**
  * radeon_fence_wait_any - wait for a fence to signal on any ring
  *
@@ -557,7 +442,7 @@
 			  bool intr)
 {
 	uint64_t seq[RADEON_NUM_RINGS];
-	unsigned i;
+	unsigned i, num_rings = 0;
 	int r;
 
 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -567,15 +452,19 @@
 			continue;
 		}
 
-		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
-			/* something was allready signaled */
-			return 0;
-		}
-
 		seq[i] = fences[i]->seq;
+		++num_rings;
+
+		/* test if something was allready signaled */
+		if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
+			return 0;
 	}
 
-	r = radeon_fence_wait_any_seq(rdev, seq, intr);
+	/* nothing to wait for ? */
+	if (num_rings == 0)
+		return -ENOENT;
+
+	r = radeon_fence_wait_seq(rdev, seq, intr, true);
 	if (r) {
 		return r;
 	}
@@ -594,15 +483,15 @@
  */
 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
 {
-	uint64_t seq;
+	uint64_t seq[RADEON_NUM_RINGS] = {};
 
-	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
-	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
 		/* nothing to wait for, last_seq is
 		   already the last emited fence */
 		return -ENOENT;
 	}
-	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+	return radeon_fence_wait_seq(rdev, seq, false, false);
 }
 
 /**
@@ -617,14 +506,18 @@
  */
 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
 {
-	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+	uint64_t seq[RADEON_NUM_RINGS] = {};
 	int r;
 
-	r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
+	if (!seq[ring])
+		return 0;
+
+	r = radeon_fence_wait_seq(rdev, seq, false, false);
 	if (r) {
-		if (r == -EDEADLK) {
+		if (r == -EDEADLK)
 			return -EDEADLK;
-		}
+
 		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
 			ring, r);
 	}
@@ -826,7 +719,6 @@
 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 		rdev->fence_drv[ring].sync_seq[i] = 0;
 	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
-	rdev->fence_drv[ring].last_activity = jiffies;
 	rdev->fence_drv[ring].initialized = false;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b990b1a..8a83b89 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -607,8 +607,8 @@
  */
 int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
 {
-	unsigned pd_size, pts_size;
-	u64 *pd_addr;
+	unsigned pd_size, pd_entries, pts_size;
+	struct radeon_ib ib;
 	int r;
 
 	if (vm == NULL) {
@@ -619,8 +619,10 @@
 		return 0;
 	}
 
-retry:
 	pd_size = radeon_vm_directory_size(rdev);
+	pd_entries = radeon_vm_num_pdes(rdev);
+
+retry:
 	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
 			     &vm->page_directory, pd_size,
 			     RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -637,9 +639,31 @@
 	vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
 
 	/* Initially clear the page directory */
-	pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
-	memset(pd_addr, 0, pd_size);
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+			  NULL, pd_entries * 2 + 64);
+	if (r) {
+		radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+		return r;
+	}
 
+	ib.length_dw = 0;
+
+	radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
+				0, pd_entries, 0, 0);
+
+	radeon_ib_sync_to(&ib, vm->fence);
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+		return r;
+	}
+	radeon_fence_unref(&vm->fence);
+	vm->fence = radeon_fence_ref(ib.fence);
+	radeon_ib_free(rdev, &ib);
+	radeon_fence_unref(&vm->last_flush);
+
+	/* allocate page table array */
 	pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
 
@@ -914,6 +938,26 @@
 }
 
 /**
+ * radeon_vm_page_flags - translate page flags to what the hw uses
+ *
+ * @flags: flags comming from userspace
+ *
+ * Translate the flags the userspace ABI uses to hw flags.
+ */
+static uint32_t radeon_vm_page_flags(uint32_t flags)
+{
+        uint32_t hw_flags = 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+        hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+        if (flags & RADEON_VM_PAGE_SYSTEM) {
+                hw_flags |= R600_PTE_SYSTEM;
+                hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+        }
+        return hw_flags;
+}
+
+/**
  * radeon_vm_update_pdes - make sure that page directory is valid
  *
  * @rdev: radeon_device pointer
@@ -974,7 +1018,11 @@
 			if (count) {
 				radeon_asic_vm_set_page(rdev, ib, last_pde,
 							last_pt, count, incr,
-							RADEON_VM_PAGE_VALID);
+							R600_PTE_VALID);
+
+				count *= RADEON_VM_PTE_COUNT;
+				radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+							count, 0, 0);
 			}
 
 			count = 1;
@@ -987,8 +1035,11 @@
 
 	if (count) {
 		radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
-					incr, RADEON_VM_PAGE_VALID);
+					incr, R600_PTE_VALID);
 
+		count *= RADEON_VM_PTE_COUNT;
+		radeon_asic_vm_set_page(rdev, ib, last_pt, 0,
+					count, 0, 0);
 	}
 
 	return 0;
@@ -1082,7 +1133,6 @@
 			    struct radeon_bo *bo,
 			    struct ttm_mem_reg *mem)
 {
-	unsigned ridx = rdev->asic->vm.pt_ring_index;
 	struct radeon_ib ib;
 	struct radeon_bo_va *bo_va;
 	unsigned nptes, npdes, ndw;
@@ -1151,11 +1201,14 @@
 	/* reserve space for pde addresses */
 	ndw += npdes * 2;
 
+	/* reserve space for clearing new page tables */
+	ndw += npdes * 2 * RADEON_VM_PTE_COUNT;
+
 	/* update too big for an IB */
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
 	ib.length_dw = 0;
 
 	r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
@@ -1165,7 +1218,7 @@
 	}
 
 	radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
-			      addr, bo_va->flags);
+			      addr, radeon_vm_page_flags(bo_va->flags));
 
 	radeon_ib_sync_to(&ib, vm->fence);
 	r = radeon_ib_schedule(rdev, &ib, NULL);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dce99c8..805c5e5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -29,13 +29,6 @@
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 
-int radeon_gem_object_init(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
 	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index c180df8..bdb0f93 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -418,7 +418,7 @@
 	if (nr < DRM_COMMAND_BASE)
 		return drm_compat_ioctl(filp, cmd, arg);
 
-	ret = drm_ioctl(filp, cmd, arg);
+	ret = radeon_drm_ioctl(filp, cmd, arg);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index cc9e848..ec6240b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,8 @@
 #include "radeon.h"
 #include "atom.h"
 
+#include <linux/pm_runtime.h>
+
 #define RADEON_WAIT_IDLE_TIMEOUT 200
 
 /**
@@ -47,8 +49,12 @@
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	struct radeon_device *rdev = dev->dev_private;
+	irqreturn_t ret;
 
-	return radeon_irq_process(rdev);
+	ret = radeon_irq_process(rdev);
+	if (ret == IRQ_HANDLED)
+		pm_runtime_mark_last_busy(dev->dev);
+	return ret;
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 61580dd..bb87105 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,7 @@
 
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
-
+#include <linux/pm_runtime.h>
 /**
  * radeon_driver_unload_kms - Main unload function for KMS.
  *
@@ -50,9 +50,14 @@
 
 	if (rdev == NULL)
 		return 0;
+
 	if (rdev->rmmio == NULL)
 		goto done_free;
+
+	pm_runtime_get_sync(dev->dev);
+
 	radeon_acpi_fini(rdev);
+	
 	radeon_modeset_fini(rdev);
 	radeon_device_fini(rdev);
 
@@ -125,9 +130,20 @@
 				"Error during ACPI methods call\n");
 	}
 
+	if (radeon_runtime_pm != 0) {
+		pm_runtime_use_autosuspend(dev->dev);
+		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+		pm_runtime_set_active(dev->dev);
+		pm_runtime_allow(dev->dev);
+		pm_runtime_mark_last_busy(dev->dev);
+		pm_runtime_put_autosuspend(dev->dev);
+	}
+
 out:
 	if (r)
 		radeon_driver_unload_kms(dev);
+
+
 	return r;
 }
 
@@ -191,7 +207,7 @@
 
 	switch (info->request) {
 	case RADEON_INFO_DEVICE_ID:
-		*value = dev->pci_device;
+		*value = dev->pdev->device;
 		break;
 	case RADEON_INFO_NUM_GB_PIPES:
 		*value = rdev->num_gb_pipes;
@@ -475,9 +491,14 @@
 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 {
 	struct radeon_device *rdev = dev->dev_private;
+	int r;
 
 	file_priv->driver_priv = NULL;
 
+	r = pm_runtime_get_sync(dev->dev);
+	if (r < 0)
+		return r;
+
 	/* new gpu have virtual address space support */
 	if (rdev->family >= CHIP_CAYMAN) {
 		struct radeon_fpriv *fpriv;
@@ -506,6 +527,9 @@
 
 		file_priv->driver_priv = fpriv;
 	}
+
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 62cd512..c89971d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -392,7 +392,7 @@
 	props.type = BACKLIGHT_RAW;
 	snprintf(bl_name, sizeof(bl_name),
 		 "radeon_bl%d", dev->primary->index);
-	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+	bd = backlight_device_register(bl_name, drm_connector->kdev,
 				       pdata, &radeon_backlight_ops, &props);
 	if (IS_ERR(bd)) {
 		DRM_ERROR("Backlight registration failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ef63d3f..3f0dd66 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -249,6 +249,8 @@
 	struct drm_property *underscan_vborder_property;
 	/* audio */
 	struct drm_property *audio_property;
+	/* FMT dithering */
+	struct drm_property *dither_property;
 	/* hardcoded DFP edid from BIOS */
 	struct edid *bios_hardcoded_edid;
 	int bios_hardcoded_edid_size;
@@ -479,6 +481,11 @@
 	RADEON_AUDIO_AUTO = 2
 };
 
+enum radeon_connector_dither {
+	RADEON_FMT_DITHER_DISABLE = 0,
+	RADEON_FMT_DITHER_ENABLE = 1,
+};
+
 struct radeon_connector {
 	struct drm_connector base;
 	uint32_t connector_id;
@@ -498,6 +505,7 @@
 	struct radeon_router router;
 	struct radeon_i2c_chan *router_bus;
 	enum radeon_connector_audio audio;
+	enum radeon_connector_dither dither;
 };
 
 struct radeon_framebuffer {
@@ -758,7 +766,8 @@
 				   int x, int y);
 
 extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-				      int *vpos, int *hpos);
+				      int *vpos, int *hpos, ktime_t *stime,
+				      ktime_t *etime);
 
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
@@ -850,6 +859,12 @@
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode);
 
+/* fmt blocks */
+void avivo_program_fmt(struct drm_encoder *encoder);
+void dce3_program_fmt(struct drm_encoder *encoder);
+void dce4_program_fmt(struct drm_encoder *encoder);
+void dce8_program_fmt(struct drm_encoder *encoder);
+
 /* fbdev layer */
 int radeon_fbdev_init(struct radeon_device *rdev);
 void radeon_fbdev_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 4f6b7fc..00bdcd3 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -508,17 +508,21 @@
 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
 		level = RADEON_DPM_FORCED_LEVEL_AUTO;
 	} else {
-		mutex_unlock(&rdev->pm.mutex);
 		count = -EINVAL;
 		goto fail;
 	}
 	if (rdev->asic->dpm.force_performance_level) {
+		if (rdev->pm.dpm.thermal_active) {
+			count = -EINVAL;
+			goto fail;
+		}
 		ret = radeon_dpm_force_performance_level(rdev, level);
 		if (ret)
 			count = -EINVAL;
 	}
-	mutex_unlock(&rdev->pm.mutex);
 fail:
+	mutex_unlock(&rdev->pm.mutex);
+
 	return count;
 }
 
@@ -881,11 +885,12 @@
 		}
 	}
 
-	printk("switching from power state:\n");
-	radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
-	printk("switching to power state:\n");
-	radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
-
+	if (radeon_dpm == 1) {
+		printk("switching from power state:\n");
+		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
+		printk("switching to power state:\n");
+		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
+	}
 	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
@@ -918,12 +923,16 @@
 	radeon_dpm_post_set_power_state(rdev);
 
 	if (rdev->asic->dpm.force_performance_level) {
-		if (rdev->pm.dpm.thermal_active)
+		if (rdev->pm.dpm.thermal_active) {
+			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 			/* force low perf level for thermal */
 			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
-		else
-			/* otherwise, enable auto */
-			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+			/* save the user's level */
+			rdev->pm.dpm.forced_level = level;
+		} else {
+			/* otherwise, user selected level */
+			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
+		}
 	}
 
 done:
@@ -1179,7 +1188,8 @@
 	mutex_lock(&rdev->pm.mutex);
 	radeon_dpm_init(rdev);
 	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
-	radeon_dpm_print_power_states(rdev);
+	if (radeon_dpm == 1)
+		radeon_dpm_print_power_states(rdev);
 	radeon_dpm_setup_asic(rdev);
 	ret = radeon_dpm_enable(rdev);
 	mutex_unlock(&rdev->pm.mutex);
@@ -1241,6 +1251,23 @@
 	case CHIP_RV670:
 	case CHIP_RS780:
 	case CHIP_RS880:
+	case CHIP_CAYMAN:
+	case CHIP_ARUBA:
+	case CHIP_BONAIRE:
+	case CHIP_KABINI:
+	case CHIP_KAVERI:
+		/* DPM requires the RLC, RV770+ dGPU requires SMC */
+		if (!rdev->rlc_fw)
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if ((rdev->family >= CHIP_RV770) &&
+			 (!(rdev->flags & RADEON_IS_IGP)) &&
+			 (!rdev->smc_fw))
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else if (radeon_dpm == 1)
+			rdev->pm.pm_method = PM_METHOD_DPM;
+		else
+			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		break;
 	case CHIP_RV770:
 	case CHIP_RV730:
 	case CHIP_RV710:
@@ -1256,16 +1283,11 @@
 	case CHIP_BARTS:
 	case CHIP_TURKS:
 	case CHIP_CAICOS:
-	case CHIP_CAYMAN:
-	case CHIP_ARUBA:
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
 	case CHIP_VERDE:
 	case CHIP_OLAND:
 	case CHIP_HAINAN:
-	case CHIP_BONAIRE:
-	case CHIP_KABINI:
-	case CHIP_KAVERI:
 		/* DPM requires the RLC, RV770+ dGPU requires SMC */
 		if (!rdev->rlc_fw)
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1273,10 +1295,10 @@
 			 (!(rdev->flags & RADEON_IS_IGP)) &&
 			 (!rdev->smc_fw))
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
-		else if (radeon_dpm == 1)
-			rdev->pm.pm_method = PM_METHOD_DPM;
-		else
+		else if (radeon_dpm == 0)
 			rdev->pm.pm_method = PM_METHOD_PROFILE;
+		else
+			rdev->pm.pm_method = PM_METHOD_DPM;
 		break;
 	default:
 		/* default to profile method */
@@ -1468,7 +1490,7 @@
 	 */
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
-			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL);
 			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
 			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
 				in_vbl = false;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f7e3678..811bca6 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -47,6 +47,30 @@
 		      __entry->fences)
 );
 
+TRACE_EVENT(radeon_vm_set_page,
+	    TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
+		     uint32_t incr, uint32_t flags),
+	    TP_ARGS(pe, addr, count, incr, flags),
+	    TP_STRUCT__entry(
+			     __field(u64, pe)
+			     __field(u64, addr)
+			     __field(u32, count)
+			     __field(u32, incr)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->pe = pe;
+			   __entry->addr = addr;
+			   __entry->count = count;
+			   __entry->incr = incr;
+			   __entry->flags = flags;
+			   ),
+	    TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+		      __entry->pe, __entry->addr, __entry->incr,
+		      __entry->flags, __entry->count)
+);
+
 DECLARE_EVENT_CLASS(radeon_fence_request,
 
 	    TP_PROTO(struct drm_device *dev, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 308eff5..ab0a172 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -240,6 +240,8 @@
 		if (handle != 0 && rdev->uvd.filp[i] == filp) {
 			struct radeon_fence *fence;
 
+			radeon_uvd_note_usage(rdev);
+
 			r = radeon_uvd_get_destroy_msg(rdev,
 				R600_RING_TYPE_UVD_INDEX, handle, &fence);
 			if (r) {
@@ -620,7 +622,7 @@
 	if (r) 
 		goto err;
 
-	r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
 	if (r)
 		goto err;
 
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6acba80..76cc8d3 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -153,6 +153,70 @@
 	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
 }
 
+void avivo_program_fmt(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 0;
+	u32 tmp = 0;
+	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		bpc = radeon_get_monitor_bpc(connector);
+		dither = radeon_connector->dither;
+	}
+
+	/* LVDS FMT is set up by atom */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return;
+
+	if (bpc == 0)
+		return;
+
+	switch (bpc) {
+	case 6:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+		else
+			tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+		break;
+	case 8:
+		if (dither == RADEON_FMT_DITHER_ENABLE)
+			/* XXX sort out optimal dither settings */
+			tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
+				AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
+		else
+			tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
+				AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
+		break;
+	case 10:
+	default:
+		/* not needed */
+		break;
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+		WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
 void rs600_pm_misc(struct radeon_device *rdev)
 {
 	int requested_index = rdev->pm.requested_power_state_index;
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 5811d27..26633a0 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -407,9 +407,9 @@
 	WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
 }
 
-static u64 rv6xx_clocks_per_unit(u32 unit)
+static u32 rv6xx_clocks_per_unit(u32 unit)
 {
-	u64 tmp = 1 << (2 * unit);
+	u32 tmp = 1 << (2 * unit);
 
 	return tmp;
 }
@@ -417,7 +417,7 @@
 static u32 rv6xx_scale_count_given_unit(struct radeon_device *rdev,
 					u32 unscaled_count, u32 unit)
 {
-	u32 count_per_unit = (u32)rv6xx_clocks_per_unit(unit);
+	u32 count_per_unit = rv6xx_clocks_per_unit(unit);
 
 	return (unscaled_count + count_per_unit - 1) / count_per_unit;
 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d96f7cb..6a64cca 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -78,11 +78,6 @@
 extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
-extern void si_dma_vm_set_page(struct radeon_device *rdev,
-			       struct radeon_ib *ib,
-			       uint64_t pe,
-			       uint64_t addr, unsigned count,
-			       uint32_t incr, uint32_t flags);
 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
 					 bool enable);
 static void si_fini_pg(struct radeon_device *rdev);
@@ -4673,61 +4668,6 @@
 	       block, mc_id);
 }
 
-/**
- * si_vm_set_page - update the page tables using the CP
- *
- * @rdev: radeon_device pointer
- * @ib: indirect buffer to fill with commands
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: access flags
- *
- * Update the page tables using the CP (SI).
- */
-void si_vm_set_page(struct radeon_device *rdev,
-		    struct radeon_ib *ib,
-		    uint64_t pe,
-		    uint64_t addr, unsigned count,
-		    uint32_t incr, uint32_t flags)
-{
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-	uint64_t value;
-	unsigned ndw;
-
-	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
-		while (count) {
-			ndw = 2 + count * 2;
-			if (ndw > 0x3FFE)
-				ndw = 0x3FFE;
-
-			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
-			ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
-					WRITE_DATA_DST_SEL(1));
-			ib->ptr[ib->length_dw++] = pe;
-			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
-				addr += incr;
-				value |= r600_flags;
-				ib->ptr[ib->length_dw++] = value;
-				ib->ptr[ib->length_dw++] = upper_32_bits(value);
-			}
-		}
-	} else {
-		/* DMA */
-		si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
-	}
-}
-
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
 {
 	struct radeon_ring *ring = &rdev->ring[ridx];
@@ -5372,52 +5312,53 @@
 	if (buffer == NULL)
 		return;
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
-	buffer[count++] = 0x80000000;
-	buffer[count++] = 0x80000000;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+	buffer[count++] = cpu_to_le32(0x80000000);
+	buffer[count++] = cpu_to_le32(0x80000000);
 
 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
 		for (ext = sect->section; ext->extent != NULL; ++ext) {
 			if (sect->id == SECT_CONTEXT) {
-				buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
-				buffer[count++] = ext->reg_index - 0xa000;
+				buffer[count++] =
+					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
 				for (i = 0; i < ext->reg_count; i++)
-					buffer[count++] = ext->extent[i];
+					buffer[count++] = cpu_to_le32(ext->extent[i]);
 			} else {
 				return;
 			}
 		}
 	}
 
-	buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
-	buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (rdev->family) {
 	case CHIP_TAHITI:
 	case CHIP_PITCAIRN:
-		buffer[count++] = 0x2a00126a;
+		buffer[count++] = cpu_to_le32(0x2a00126a);
 		break;
 	case CHIP_VERDE:
-		buffer[count++] = 0x0000124a;
+		buffer[count++] = cpu_to_le32(0x0000124a);
 		break;
 	case CHIP_OLAND:
-		buffer[count++] = 0x00000082;
+		buffer[count++] = cpu_to_le32(0x00000082);
 		break;
 	case CHIP_HAINAN:
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	default:
-		buffer[count++] = 0x00000000;
+		buffer[count++] = cpu_to_le32(0x00000000);
 		break;
 	}
 
-	buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
-	buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
 
-	buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
-	buffer[count++] = 0;
+	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+	buffer[count++] = cpu_to_le32(0);
 }
 
 static void si_init_pg(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 49909d2..8e8f461 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "radeon.h"
 #include "radeon_asic.h"
+#include "radeon_trace.h"
 #include "sid.h"
 
 u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
@@ -75,11 +76,12 @@
 			uint64_t addr, unsigned count,
 			uint32_t incr, uint32_t flags)
 {
-	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 	uint64_t value;
 	unsigned ndw;
 
-	if (flags & RADEON_VM_PAGE_SYSTEM) {
+	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+	if (flags & R600_PTE_SYSTEM) {
 		while (count) {
 			ndw = count * 2;
 			if (ndw > 0xFFFFE)
@@ -90,16 +92,10 @@
 			ib->ptr[ib->length_dw++] = pe;
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
-				if (flags & RADEON_VM_PAGE_SYSTEM) {
-					value = radeon_vm_map_gart(rdev, addr);
-					value &= 0xFFFFFFFFFFFFF000ULL;
-				} else if (flags & RADEON_VM_PAGE_VALID) {
-					value = addr;
-				} else {
-					value = 0;
-				}
+				value = radeon_vm_map_gart(rdev, addr);
+				value &= 0xFFFFFFFFFFFFF000ULL;
 				addr += incr;
-				value |= r600_flags;
+				value |= flags;
 				ib->ptr[ib->length_dw++] = value;
 				ib->ptr[ib->length_dw++] = upper_32_bits(value);
 			}
@@ -110,7 +106,7 @@
 			if (ndw > 0xFFFFE)
 				ndw = 0xFFFFE;
 
-			if (flags & RADEON_VM_PAGE_VALID)
+			if (flags & R600_PTE_VALID)
 				value = addr;
 			else
 				value = 0;
@@ -118,7 +114,7 @@
 			ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
 			ib->ptr[ib->length_dw++] = pe; /* dst addr */
 			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
-			ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+			ib->ptr[ib->length_dw++] = flags; /* mask */
 			ib->ptr[ib->length_dw++] = 0;
 			ib->ptr[ib->length_dw++] = value; /* value */
 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2332aa1..0b00c79 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3589,7 +3589,12 @@
 		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
 	}
 
-	si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0);
+	/* Setting this to false forces the performance state to low if the crtcs are disabled.
+	 * This can be a problem on PowerXpress systems or if you want to use the card
+	 * for offscreen rendering or compute if there are no crtcs enabled.  Set it to
+	 * true for now so that performance scales even if the displays are off.
+	 */
+	si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/);
 }
 
 static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
@@ -4553,7 +4558,7 @@
 		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
 
 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
-		table->systemFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
+		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
 
 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
 		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7e2e0ea..b322acc 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -478,7 +478,7 @@
 #define		STATE3_MASK				(0x1f << 15)
 #define		STATE3_SHIFT				15
 
-#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x2808
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x28e8
 #define		TRAIN_DONE_D0      			(1 << 30)
 #define		TRAIN_DONE_D1      			(1 << 31)
 
@@ -683,6 +683,51 @@
  * bit5 = 176.4 kHz
  * bit6 = 192 kHz
  */
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC         0x37
+#       define VIDEO_LIPSYNC(x)                           (((x) & 0xff) << 0)
+#       define AUDIO_LIPSYNC(x)                           (((x) & 0xff) << 8)
+/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
+ * 0   = invalid
+ * x   = legal delay value
+ * 255 = sync not supported
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR             0x38
+#       define HBR_CAPABLE                                (1 << 0) /* enabled by default */
+
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0               0x3a
+#       define MANUFACTURER_ID(x)                        (((x) & 0xffff) << 0)
+#       define PRODUCT_ID(x)                             (((x) & 0xffff) << 16)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1               0x3b
+#       define SINK_DESCRIPTION_LEN(x)                   (((x) & 0xff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2               0x3c
+#       define PORT_ID0(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3               0x3d
+#       define PORT_ID1(x)                               (((x) & 0xffffffff) << 0)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4               0x3e
+#       define DESCRIPTION0(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION1(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION2(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION3(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5               0x3f
+#       define DESCRIPTION4(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION5(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION6(x)                           (((x) & 0xff) << 16)
+#       define DESCRIPTION7(x)                           (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6               0x40
+#       define DESCRIPTION8(x)                           (((x) & 0xff) << 0)
+#       define DESCRIPTION9(x)                           (((x) & 0xff) << 8)
+#       define DESCRIPTION10(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION11(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7               0x41
+#       define DESCRIPTION12(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION13(x)                          (((x) & 0xff) << 8)
+#       define DESCRIPTION14(x)                          (((x) & 0xff) << 16)
+#       define DESCRIPTION15(x)                          (((x) & 0xff) << 24)
+#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8               0x42
+#       define DESCRIPTION16(x)                          (((x) & 0xff) << 0)
+#       define DESCRIPTION17(x)                          (((x) & 0xff) << 8)
+
 #define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL          0x54
 #       define AUDIO_ENABLED                             (1 << 31)
 
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c590cd9..d8e835a 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -4,6 +4,7 @@
 	select DRM_KMS_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_FB_HELPER
 	help
 	  Choose this option if you have an R-Car chipset.
 	  If M is selected the module will be called rcar-du-drm.
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index ca498d1..2ee44ca 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,7 +1,9 @@
 config DRM_SHMOBILE
 	tristate "DRM Support for SH Mobile"
 	depends on DRM && (ARM || SUPERH)
+	select BACKLIGHT_CLASS_DEVICE
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
 	help
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/drm/tegra/Kconfig
similarity index 86%
rename from drivers/gpu/host1x/drm/Kconfig
rename to drivers/gpu/drm/tegra/Kconfig
index 69853a4..8961ba6 100644
--- a/drivers/gpu/host1x/drm/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,7 +1,10 @@
 config DRM_TEGRA
 	bool "NVIDIA Tegra DRM"
+	depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
 	depends on DRM
+	select TEGRA_HOST1X
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -13,6 +16,11 @@
 
 if DRM_TEGRA
 
+config DRM_TEGRA_DEBUG
+	bool "NVIDIA Tegra DRM debug support"
+	help
+	  Say yes here to enable debugging support.
+
 config DRM_TEGRA_STAGING
 	bool "Enable HOST1X interface"
 	depends on STAGING
@@ -21,9 +29,4 @@
 
 	  If unsure, choose N.
 
-config DRM_TEGRA_DEBUG
-	bool "NVIDIA Tegra DRM debug support"
-	help
-	  Say yes here to enable debugging support.
-
 endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 0000000..edc76ab
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,15 @@
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := \
+	bus.o \
+	drm.o \
+	gem.o \
+	fb.o \
+	dc.o \
+	output.o \
+	rgb.o \
+	hdmi.o \
+	gr2d.o \
+	gr3d.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644
index 0000000..565f8f7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/bus.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static int drm_host1x_set_busid(struct drm_device *dev,
+				struct drm_master *master)
+{
+	const char *device = dev_name(dev->dev);
+	const char *driver = dev->driver->name;
+	const char *bus = dev->dev->bus->name;
+	int length;
+
+	master->unique_len = strlen(bus) + 1 + strlen(device);
+	master->unique_size = master->unique_len;
+
+	master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+	if (!master->unique)
+		return -ENOMEM;
+
+	snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
+
+	length = strlen(driver) + 1 + master->unique_len;
+
+	dev->devname = kmalloc(length + 1, GFP_KERNEL);
+	if (!dev->devname)
+		return -ENOMEM;
+
+	snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
+
+	return 0;
+}
+
+static struct drm_bus drm_host1x_bus = {
+	.bus_type = DRIVER_BUS_HOST1X,
+	.set_busid = drm_host1x_set_busid,
+};
+
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
+{
+	struct drm_device *drm;
+	int ret;
+
+	INIT_LIST_HEAD(&driver->device_list);
+	driver->bus = &drm_host1x_bus;
+
+	drm = drm_dev_alloc(driver, &device->dev);
+	if (!drm)
+		return -ENOMEM;
+
+	ret = drm_dev_register(drm, 0);
+	if (ret)
+		goto err_free;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+		 driver->major, driver->minor, driver->patchlevel,
+		 driver->date, drm->primary->index);
+
+	return 0;
+
+err_free:
+	drm_dev_free(drm);
+	return ret;
+}
+
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
+{
+	struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
+
+	drm_put_dev(tegra->drm);
+}
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/drm/tegra/dc.c
similarity index 92%
rename from drivers/gpu/host1x/drm/dc.c
rename to drivers/gpu/drm/tegra/dc.c
index b1a05ad..ae1cb31e 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -8,13 +8,9 @@
  */
 
 #include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 #include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
 
-#include "host1x_client.h"
 #include "dc.h"
 #include "drm.h"
 #include "gem.h"
@@ -51,6 +47,8 @@
 	window.dst.h = crtc_h;
 	window.format = tegra_dc_format(fb->pixel_format);
 	window.bits_per_pixel = fb->bits_per_pixel;
+	window.bottom_up = tegra_fb_is_bottom_up(fb);
+	window.tiled = tegra_fb_is_tiled(fb);
 
 	for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
 		struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@
 
 static void tegra_plane_destroy(struct drm_plane *plane)
 {
+	struct tegra_plane *p = to_tegra_plane(plane);
+
 	tegra_plane_disable(plane);
 	drm_plane_cleanup(plane);
+	kfree(p);
 }
 
 static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@
 	for (i = 0; i < 2; i++) {
 		struct tegra_plane *plane;
 
-		plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+		plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 		if (!plane)
 			return -ENOMEM;
 
@@ -133,8 +134,10 @@
 		err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
 				     &tegra_plane_funcs, plane_formats,
 				     ARRAY_SIZE(plane_formats), false);
-		if (err < 0)
+		if (err < 0) {
+			kfree(plane);
 			return err;
+		}
 	}
 
 	return 0;
@@ -145,6 +148,7 @@
 {
 	unsigned int format = tegra_dc_format(fb->pixel_format);
 	struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+	unsigned int h_offset = 0, v_offset = 0;
 	unsigned long value;
 
 	tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@
 	tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
 	tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
 
+	if (tegra_fb_is_tiled(fb)) {
+		value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+			DC_WIN_BUFFER_ADDR_MODE_TILE;
+	} else {
+		value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+			DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+	}
+
+	tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
+	/* make sure bottom-up buffers are properly displayed */
+	if (tegra_fb_is_bottom_up(fb)) {
+		value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+		value |= INVERT_V;
+		tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+		v_offset += fb->height - 1;
+	} else {
+		value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+		value &= ~INVERT_V;
+		tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+	}
+
+	tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+	tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
 	value = GENERAL_UPDATE | WIN_A_UPDATE;
 	tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
 
@@ -255,14 +285,26 @@
 	return 0;
 }
 
+static void drm_crtc_clear(struct drm_crtc *crtc)
+{
+	memset(crtc, 0, sizeof(*crtc));
+}
+
+static void tegra_dc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	drm_crtc_clear(crtc);
+}
+
 static const struct drm_crtc_funcs tegra_crtc_funcs = {
 	.page_flip = tegra_dc_page_flip,
 	.set_config = drm_crtc_helper_set_config,
-	.destroy = drm_crtc_cleanup,
+	.destroy = tegra_dc_destroy,
 };
 
 static void tegra_crtc_disable(struct drm_crtc *crtc)
 {
+	struct tegra_dc *dc = to_tegra_dc(crtc);
 	struct drm_device *drm = crtc->dev;
 	struct drm_plane *plane;
 
@@ -277,6 +319,8 @@
 			}
 		}
 	}
+
+	drm_vblank_off(drm, dc->pipe);
 }
 
 static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@
 		tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
 	}
 
+	if (window->bottom_up)
+		v_offset += window->src.h - 1;
+
 	tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
 	tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
 
+	if (window->tiled) {
+		value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+			DC_WIN_BUFFER_ADDR_MODE_TILE;
+	} else {
+		value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+			DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+	}
+
+	tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
 	value = WIN_ENABLE;
 
 	if (yuv) {
@@ -512,6 +569,9 @@
 		value |= COLOR_EXPAND;
 	}
 
+	if (window->bottom_up)
+		value |= INVERT_V;
+
 	tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
 
 	/*
@@ -1041,30 +1101,30 @@
 	return 0;
 }
 
-static int tegra_dc_drm_init(struct host1x_client *client,
-			     struct drm_device *drm)
+static int tegra_dc_init(struct host1x_client *client)
 {
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	int err;
 
-	dc->pipe = drm->mode_config.num_crtc;
+	dc->pipe = tegra->drm->mode_config.num_crtc;
 
-	drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+	drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
 	drm_mode_crtc_set_gamma_size(&dc->base, 256);
 	drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
 
-	err = tegra_dc_rgb_init(drm, dc);
+	err = tegra_dc_rgb_init(tegra->drm, dc);
 	if (err < 0 && err != -ENODEV) {
 		dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
 		return err;
 	}
 
-	err = tegra_dc_add_planes(drm, dc);
+	err = tegra_dc_add_planes(tegra->drm, dc);
 	if (err < 0)
 		return err;
 
 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
-		err = tegra_dc_debugfs_init(dc, drm->primary);
+		err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
 		if (err < 0)
 			dev_err(dc->dev, "debugfs setup failed: %d\n", err);
 	}
@@ -1080,7 +1140,7 @@
 	return 0;
 }
 
-static int tegra_dc_drm_exit(struct host1x_client *client)
+static int tegra_dc_exit(struct host1x_client *client)
 {
 	struct tegra_dc *dc = host1x_client_to_dc(client);
 	int err;
@@ -1103,13 +1163,12 @@
 }
 
 static const struct host1x_client_ops dc_client_ops = {
-	.drm_init = tegra_dc_drm_init,
-	.drm_exit = tegra_dc_drm_exit,
+	.init = tegra_dc_init,
+	.exit = tegra_dc_exit,
 };
 
 static int tegra_dc_probe(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
 	struct resource *regs;
 	struct tegra_dc *dc;
 	int err;
@@ -1153,7 +1212,7 @@
 		return err;
 	}
 
-	err = host1x_register_client(host1x, &dc->client);
+	err = host1x_client_register(&dc->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
@@ -1167,17 +1226,22 @@
 
 static int tegra_dc_remove(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
 	struct tegra_dc *dc = platform_get_drvdata(pdev);
 	int err;
 
-	err = host1x_unregister_client(host1x, &dc->client);
+	err = host1x_client_unregister(&dc->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
 			err);
 		return err;
 	}
 
+	err = tegra_dc_rgb_remove(dc);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
+		return err;
+	}
+
 	clk_disable_unprepare(dc->clk);
 
 	return 0;
diff --git a/drivers/gpu/host1x/drm/dc.h b/drivers/gpu/drm/tegra/dc.h
similarity index 97%
rename from drivers/gpu/host1x/drm/dc.h
rename to drivers/gpu/drm/tegra/dc.h
index 79eaec9..91bbda2 100644
--- a/drivers/gpu/host1x/drm/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -302,6 +302,7 @@
 #define DC_WIN_CSC_KVB				0x618
 
 #define DC_WIN_WIN_OPTIONS			0x700
+#define INVERT_V     (1 <<  2)
 #define COLOR_EXPAND (1 <<  6)
 #define CSC_ENABLE   (1 << 18)
 #define WIN_ENABLE   (1 << 30)
@@ -365,6 +366,10 @@
 #define DC_WIN_BUF_STRIDE			0x70b
 #define DC_WIN_UV_BUF_STRIDE			0x70c
 #define DC_WIN_BUFFER_ADDR_MODE			0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR		(0 <<  0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE		(1 <<  0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV	(0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV		(1 << 16)
 #define DC_WIN_DV_CONTROL			0x70e
 
 #define DC_WIN_BLEND_NOKEY			0x70f
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 0000000..28e1781
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/host1x.h>
+
+#include "drm.h"
+#include "gem.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct tegra_drm_file {
+	struct list_head contexts;
+};
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+	struct host1x_device *device = to_host1x_device(drm->dev);
+	struct tegra_drm *tegra;
+	int err;
+
+	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+	if (!tegra)
+		return -ENOMEM;
+
+	dev_set_drvdata(drm->dev, tegra);
+	mutex_init(&tegra->clients_lock);
+	INIT_LIST_HEAD(&tegra->clients);
+	drm->dev_private = tegra;
+	tegra->drm = drm;
+
+	drm_mode_config_init(drm);
+
+	err = host1x_device_init(device);
+	if (err < 0)
+		return err;
+
+	/*
+	 * We don't use the drm_irq_install() helpers provided by the DRM
+	 * core, so we need to set this manually in order to allow the
+	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+	 */
+	drm->irq_enabled = true;
+
+	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (err < 0)
+		return err;
+
+	err = tegra_drm_fb_init(drm);
+	if (err < 0)
+		return err;
+
+	drm_kms_helper_poll_init(drm);
+
+	return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+	struct host1x_device *device = to_host1x_device(drm->dev);
+	int err;
+
+	drm_kms_helper_poll_fini(drm);
+	tegra_drm_fb_exit(drm);
+	drm_vblank_cleanup(drm);
+	drm_mode_config_cleanup(drm);
+
+	err = host1x_device_exit(device);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+	struct tegra_drm_file *fpriv;
+
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (!fpriv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&fpriv->contexts);
+	filp->driver_priv = fpriv;
+
+	return 0;
+}
+
+static void tegra_drm_context_free(struct tegra_drm_context *context)
+{
+	context->client->ops->close_channel(context);
+	kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+	struct tegra_drm *tegra = drm->dev_private;
+
+	tegra_fbdev_restore_mode(tegra->fbdev);
+}
+
+static struct host1x_bo *
+host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+{
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	gem = drm_gem_object_lookup(drm, file, handle);
+	if (!gem)
+		return NULL;
+
+	mutex_lock(&drm->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&drm->struct_mutex);
+
+	bo = to_tegra_bo(gem);
+	return &bo->base;
+}
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+		     struct drm_tegra_submit *args, struct drm_device *drm,
+		     struct drm_file *file)
+{
+	unsigned int num_cmdbufs = args->num_cmdbufs;
+	unsigned int num_relocs = args->num_relocs;
+	unsigned int num_waitchks = args->num_waitchks;
+	struct drm_tegra_cmdbuf __user *cmdbufs =
+		(void * __user)(uintptr_t)args->cmdbufs;
+	struct drm_tegra_reloc __user *relocs =
+		(void * __user)(uintptr_t)args->relocs;
+	struct drm_tegra_waitchk __user *waitchks =
+		(void * __user)(uintptr_t)args->waitchks;
+	struct drm_tegra_syncpt syncpt;
+	struct host1x_job *job;
+	int err;
+
+	/* We don't yet support other than one syncpt_incr struct per submit */
+	if (args->num_syncpts != 1)
+		return -EINVAL;
+
+	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+			       args->num_relocs, args->num_waitchks);
+	if (!job)
+		return -ENOMEM;
+
+	job->num_relocs = args->num_relocs;
+	job->num_waitchk = args->num_waitchks;
+	job->client = (u32)args->context;
+	job->class = context->client->base.class;
+	job->serialize = true;
+
+	while (num_cmdbufs) {
+		struct drm_tegra_cmdbuf cmdbuf;
+		struct host1x_bo *bo;
+
+		err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+		if (err)
+			goto fail;
+
+		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+		if (!bo) {
+			err = -ENOENT;
+			goto fail;
+		}
+
+		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+		num_cmdbufs--;
+		cmdbufs++;
+	}
+
+	err = copy_from_user(job->relocarray, relocs,
+			     sizeof(*relocs) * num_relocs);
+	if (err)
+		goto fail;
+
+	while (num_relocs--) {
+		struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+		struct host1x_bo *cmdbuf, *target;
+
+		cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+		target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+		reloc->cmdbuf = cmdbuf;
+		reloc->target = target;
+
+		if (!reloc->target || !reloc->cmdbuf) {
+			err = -ENOENT;
+			goto fail;
+		}
+	}
+
+	err = copy_from_user(job->waitchk, waitchks,
+			     sizeof(*waitchks) * num_waitchks);
+	if (err)
+		goto fail;
+
+	err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+			     sizeof(syncpt));
+	if (err)
+		goto fail;
+
+	job->is_addr_reg = context->client->ops->is_addr_reg;
+	job->syncpt_incrs = syncpt.incrs;
+	job->syncpt_id = syncpt.id;
+	job->timeout = 10000;
+
+	if (args->timeout && args->timeout < 10000)
+		job->timeout = args->timeout;
+
+	err = host1x_job_pin(job, context->client->base.dev);
+	if (err)
+		goto fail;
+
+	err = host1x_job_submit(job);
+	if (err)
+		goto fail_submit;
+
+	args->fence = job->syncpt_end;
+
+	host1x_job_put(job);
+	return 0;
+
+fail_submit:
+	host1x_job_unpin(job);
+fail:
+	host1x_job_put(job);
+	return err;
+}
+
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
+{
+	return (struct tegra_drm_context *)(uintptr_t)context;
+}
+
+static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
+					struct tegra_drm_context *context)
+{
+	struct tegra_drm_context *ctx;
+
+	list_for_each_entry(ctx, &file->contexts, list)
+		if (ctx == context)
+			return true;
+
+	return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+			    struct drm_file *file)
+{
+	struct drm_tegra_gem_create *args = data;
+	struct tegra_bo *bo;
+
+	bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
+					 &args->handle);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+			  struct drm_file *file)
+{
+	struct drm_tegra_gem_mmap *args = data;
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	gem = drm_gem_object_lookup(drm, file, args->handle);
+	if (!gem)
+		return -EINVAL;
+
+	bo = to_tegra_bo(gem);
+
+	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
+
+	drm_gem_object_unreference(gem);
+
+	return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct host1x *host = dev_get_drvdata(drm->dev->parent);
+	struct drm_tegra_syncpt_read *args = data;
+	struct host1x_syncpt *sp;
+
+	sp = host1x_syncpt_get(host, args->id);
+	if (!sp)
+		return -EINVAL;
+
+	args->value = host1x_syncpt_read_min(sp);
+	return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+	struct drm_tegra_syncpt_incr *args = data;
+	struct host1x_syncpt *sp;
+
+	sp = host1x_syncpt_get(host1x, args->id);
+	if (!sp)
+		return -EINVAL;
+
+	return host1x_syncpt_incr(sp);
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+	struct drm_tegra_syncpt_wait *args = data;
+	struct host1x_syncpt *sp;
+
+	sp = host1x_syncpt_get(host1x, args->id);
+	if (!sp)
+		return -EINVAL;
+
+	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+				  &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+			      struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct tegra_drm *tegra = drm->dev_private;
+	struct drm_tegra_open_channel *args = data;
+	struct tegra_drm_context *context;
+	struct tegra_drm_client *client;
+	int err = -ENODEV;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	list_for_each_entry(client, &tegra->clients, list)
+		if (client->base.class == args->client) {
+			err = client->ops->open_channel(client, context);
+			if (err)
+				break;
+
+			list_add(&context->list, &fpriv->contexts);
+			args->context = (uintptr_t)context;
+			context->client = client;
+			return 0;
+		}
+
+	kfree(context);
+	return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+			       struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_close_channel *args = data;
+	struct tegra_drm_context *context;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -EINVAL;
+
+	list_del(&context->list);
+	tegra_drm_context_free(context);
+
+	return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+			    struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_get_syncpt *args = data;
+	struct tegra_drm_context *context;
+	struct host1x_syncpt *syncpt;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	if (args->index >= context->client->base.num_syncpts)
+		return -EINVAL;
+
+	syncpt = context->client->base.syncpts[args->index];
+	args->id = host1x_syncpt_id(syncpt);
+
+	return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+			struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_submit *args = data;
+	struct tegra_drm_context *context;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	return context->client->ops->submit(context, args, drm, file);
+}
+
+static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
+				 struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct drm_tegra_get_syncpt_base *args = data;
+	struct tegra_drm_context *context;
+	struct host1x_syncpt_base *base;
+	struct host1x_syncpt *syncpt;
+
+	context = tegra_drm_get_context(args->context);
+
+	if (!tegra_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	if (args->syncpt >= context->client->base.num_syncpts)
+		return -EINVAL;
+
+	syncpt = context->client->base.syncpts[args->syncpt];
+
+	base = host1x_syncpt_get_base(syncpt);
+	if (!base)
+		return -ENXIO;
+
+	args->id = host1x_syncpt_base_id(base);
+
+	return 0;
+}
+#endif
+
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = tegra_drm_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+		struct tegra_dc *dc = to_tegra_dc(crtc);
+
+		if (dc->pipe == pipe)
+			return crtc;
+	}
+
+	return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	/* TODO: implement real hardware counter using syncpoints */
+	return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	if (!crtc)
+		return -ENODEV;
+
+	tegra_dc_enable_vblank(dc);
+
+	return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	if (crtc)
+		tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+	struct tegra_drm_file *fpriv = file->driver_priv;
+	struct tegra_drm_context *context, *tmp;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+		tegra_dc_cancel_page_flip(crtc, file);
+
+	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+		tegra_drm_context_free(context);
+
+	kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)s->private;
+	struct drm_device *drm = node->minor->dev;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&drm->mode_config.fb_lock);
+
+	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+			   fb->base.id, fb->width, fb->height, fb->depth,
+			   fb->bits_per_pixel,
+			   atomic_read(&fb->refcount.refcount));
+	}
+
+	mutex_unlock(&drm->mode_config.fb_lock);
+
+	return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(tegra_debugfs_list,
+					ARRAY_SIZE(tegra_debugfs_list),
+					minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(tegra_debugfs_list,
+				 ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM,
+	.load = tegra_drm_load,
+	.unload = tegra_drm_unload,
+	.open = tegra_drm_open,
+	.preclose = tegra_drm_preclose,
+	.lastclose = tegra_drm_lastclose,
+
+	.get_vblank_counter = tegra_drm_get_vblank_counter,
+	.enable_vblank = tegra_drm_enable_vblank,
+	.disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = tegra_debugfs_init,
+	.debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+	.gem_free_object = tegra_bo_free_object,
+	.gem_vm_ops = &tegra_bo_vm_ops,
+	.dumb_create = tegra_bo_dumb_create,
+	.dumb_map_offset = tegra_bo_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+
+	.ioctls = tegra_drm_ioctls,
+	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+	.fops = &tegra_drm_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+			      struct tegra_drm_client *client)
+{
+	mutex_lock(&tegra->clients_lock);
+	list_add_tail(&client->list, &tegra->clients);
+	mutex_unlock(&tegra->clients_lock);
+
+	return 0;
+}
+
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+				struct tegra_drm_client *client)
+{
+	mutex_lock(&tegra->clients_lock);
+	list_del_init(&client->list);
+	mutex_unlock(&tegra->clients_lock);
+
+	return 0;
+}
+
+static int host1x_drm_probe(struct host1x_device *device)
+{
+	return drm_host1x_init(&tegra_drm_driver, device);
+}
+
+static int host1x_drm_remove(struct host1x_device *device)
+{
+	drm_host1x_exit(&tegra_drm_driver, device);
+
+	return 0;
+}
+
+static const struct of_device_id host1x_drm_subdevs[] = {
+	{ .compatible = "nvidia,tegra20-dc", },
+	{ .compatible = "nvidia,tegra20-hdmi", },
+	{ .compatible = "nvidia,tegra20-gr2d", },
+	{ .compatible = "nvidia,tegra20-gr3d", },
+	{ .compatible = "nvidia,tegra30-dc", },
+	{ .compatible = "nvidia,tegra30-hdmi", },
+	{ .compatible = "nvidia,tegra30-gr2d", },
+	{ .compatible = "nvidia,tegra30-gr3d", },
+	{ .compatible = "nvidia,tegra114-hdmi", },
+	{ .compatible = "nvidia,tegra114-gr3d", },
+	{ /* sentinel */ }
+};
+
+static struct host1x_driver host1x_drm_driver = {
+	.name = "drm",
+	.probe = host1x_drm_probe,
+	.remove = host1x_drm_remove,
+	.subdevs = host1x_drm_subdevs,
+};
+
+static int __init host1x_drm_init(void)
+{
+	int err;
+
+	err = host1x_driver_register(&host1x_drm_driver);
+	if (err < 0)
+		return err;
+
+	err = platform_driver_register(&tegra_dc_driver);
+	if (err < 0)
+		goto unregister_host1x;
+
+	err = platform_driver_register(&tegra_hdmi_driver);
+	if (err < 0)
+		goto unregister_dc;
+
+	err = platform_driver_register(&tegra_gr2d_driver);
+	if (err < 0)
+		goto unregister_hdmi;
+
+	err = platform_driver_register(&tegra_gr3d_driver);
+	if (err < 0)
+		goto unregister_gr2d;
+
+	return 0;
+
+unregister_gr2d:
+	platform_driver_unregister(&tegra_gr2d_driver);
+unregister_hdmi:
+	platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+	platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+	host1x_driver_unregister(&host1x_drm_driver);
+	return err;
+}
+module_init(host1x_drm_init);
+
+static void __exit host1x_drm_exit(void)
+{
+	platform_driver_unregister(&tegra_gr3d_driver);
+	platform_driver_unregister(&tegra_gr2d_driver);
+	platform_driver_unregister(&tegra_hdmi_driver);
+	platform_driver_unregister(&tegra_dc_driver);
+	host1x_driver_unregister(&host1x_drm_driver);
+}
+module_exit(host1x_drm_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/host1x/drm/drm.h b/drivers/gpu/drm/tegra/drm.h
similarity index 72%
rename from drivers/gpu/host1x/drm/drm.h
rename to drivers/gpu/drm/tegra/drm.h
index 02ce020..fdfe259 100644
--- a/drivers/gpu/host1x/drm/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -10,14 +10,14 @@
 #ifndef HOST1X_DRM_H
 #define HOST1X_DRM_H 1
 
+#include <uapi/drm/tegra_drm.h>
+#include <linux/host1x.h>
+
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fixed.h>
-#include <uapi/drm/tegra_drm.h>
-
-#include "host1x.h"
 
 struct tegra_fb {
 	struct drm_framebuffer base;
@@ -30,17 +30,8 @@
 	struct tegra_fb *fb;
 };
 
-struct host1x_drm {
+struct tegra_drm {
 	struct drm_device *drm;
-	struct device *dev;
-	void __iomem *regs;
-	struct clk *clk;
-	int syncpt;
-	int irq;
-
-	struct mutex drm_clients_lock;
-	struct list_head drm_clients;
-	struct list_head drm_active;
 
 	struct mutex clients_lock;
 	struct list_head clients;
@@ -48,66 +39,60 @@
 	struct tegra_fbdev *fbdev;
 };
 
-struct host1x_client;
+struct tegra_drm_client;
 
-struct host1x_drm_context {
-	struct host1x_client *client;
+struct tegra_drm_context {
+	struct tegra_drm_client *client;
 	struct host1x_channel *channel;
 	struct list_head list;
 };
 
-struct host1x_client_ops {
-	int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
-	int (*drm_exit)(struct host1x_client *client);
-	int (*open_channel)(struct host1x_client *client,
-			    struct host1x_drm_context *context);
-	void (*close_channel)(struct host1x_drm_context *context);
-	int (*submit)(struct host1x_drm_context *context,
+struct tegra_drm_client_ops {
+	int (*open_channel)(struct tegra_drm_client *client,
+			    struct tegra_drm_context *context);
+	void (*close_channel)(struct tegra_drm_context *context);
+	int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
+	int (*submit)(struct tegra_drm_context *context,
 		      struct drm_tegra_submit *args, struct drm_device *drm,
 		      struct drm_file *file);
 };
 
-struct host1x_drm_file {
-	struct list_head contexts;
-};
+int tegra_drm_submit(struct tegra_drm_context *context,
+		     struct drm_tegra_submit *args, struct drm_device *drm,
+		     struct drm_file *file);
 
-struct host1x_client {
-	struct host1x_drm *host1x;
-	struct device *dev;
-
-	const struct host1x_client_ops *ops;
-
-	enum host1x_class class;
-	struct host1x_channel *channel;
-
-	struct host1x_syncpt **syncpts;
-	unsigned int num_syncpts;
-
+struct tegra_drm_client {
+	struct host1x_client base;
 	struct list_head list;
+
+	const struct tegra_drm_client_ops *ops;
 };
 
-extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x_drm *host1x);
+static inline struct tegra_drm_client *
+host1x_to_drm_client(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_drm_client, base);
+}
 
-extern int host1x_register_client(struct host1x_drm *host1x,
-				  struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x_drm *host1x,
-				    struct host1x_client *client);
+extern int tegra_drm_register_client(struct tegra_drm *tegra,
+				     struct tegra_drm_client *client);
+extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
+				       struct tegra_drm_client *client);
+
+extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+extern int tegra_drm_exit(struct tegra_drm *tegra);
 
 struct tegra_output;
 
 struct tegra_dc {
 	struct host1x_client client;
-	spinlock_t lock;
-
-	struct host1x_drm *host1x;
 	struct device *dev;
+	spinlock_t lock;
 
 	struct drm_crtc base;
 	int pipe;
 
 	struct clk *clk;
-
 	void __iomem *regs;
 	int irq;
 
@@ -123,7 +108,8 @@
 	struct drm_pending_vblank_event *event;
 };
 
-static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
 {
 	return container_of(client, struct tegra_dc, client);
 }
@@ -162,6 +148,8 @@
 	unsigned int format;
 	unsigned int stride[2];
 	unsigned long base[3];
+	bool bottom_up;
+	bool tiled;
 };
 
 /* from dc.c */
@@ -249,23 +237,34 @@
 	return output ? -ENOSYS : -EINVAL;
 }
 
+/* from bus.c */
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
+
 /* from rgb.c */
 extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
 extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
 extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
 
 /* from output.c */
-extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_probe(struct tegra_output *output);
+extern int tegra_output_remove(struct tegra_output *output);
 extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
 extern int tegra_output_exit(struct tegra_output *output);
 
 /* from fb.c */
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
 				    unsigned int index);
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
 extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
 
-extern struct drm_driver tegra_drm_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_gr3d_driver;
 
 #endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/host1x/drm/fb.c b/drivers/gpu/drm/tegra/fb.c
similarity index 92%
rename from drivers/gpu/host1x/drm/fb.c
rename to drivers/gpu/drm/tegra/fb.c
index 979a3e3..490f771 100644
--- a/drivers/gpu/host1x/drm/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -10,8 +10,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
-
 #include "drm.h"
 #include "gem.h"
 
@@ -36,6 +34,26 @@
 	return fb->planes[index];
 }
 
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+	if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
+		return true;
+
+	return false;
+}
+
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+	if (fb->planes[0]->flags & TEGRA_BO_TILED)
+		return true;
+
+	return false;
+}
+
 static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@
 
 	size = cmd.pitches[0] * cmd.height;
 
-	bo = tegra_bo_create(drm, size);
+	bo = tegra_bo_create(drm, size, 0);
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
@@ -323,10 +341,10 @@
 
 static void tegra_fb_output_poll_changed(struct drm_device *drm)
 {
-	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_drm *tegra = drm->dev_private;
 
-	if (host1x->fbdev)
-		drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+	if (tegra->fbdev)
+		drm_fb_helper_hotplug_event(&tegra->fbdev->base);
 }
 
 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@
 
 int tegra_drm_fb_init(struct drm_device *drm)
 {
-	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_drm *tegra = drm->dev_private;
 	struct tegra_fbdev *fbdev;
 
 	drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@
 	if (IS_ERR(fbdev))
 		return PTR_ERR(fbdev);
 
-	host1x->fbdev = fbdev;
+	tegra->fbdev = fbdev;
 
 	return 0;
 }
 
 void tegra_drm_fb_exit(struct drm_device *drm)
 {
-	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_drm *tegra = drm->dev_private;
 
-	tegra_fbdev_free(host1x->fbdev);
+	tegra_fbdev_free(tegra->fbdev);
 }
 
 void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/drm/tegra/gem.c
similarity index 86%
rename from drivers/gpu/host1x/drm/gem.c
rename to drivers/gpu/drm/tegra/gem.c
index 59623de..28a9cbc 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -18,25 +18,18 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
+#include <drm/tegra_drm.h>
 
 #include "gem.h"
 
-static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
 {
 	return container_of(bo, struct tegra_bo, base);
 }
 
 static void tegra_bo_put(struct host1x_bo *bo)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 	struct drm_device *drm = obj->gem.dev;
 
 	mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@
 
 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 	return obj->paddr;
 }
@@ -57,7 +50,7 @@
 
 static void *tegra_bo_mmap(struct host1x_bo *bo)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 	return obj->vaddr;
 }
@@ -68,7 +61,7 @@
 
 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
 	return obj->vaddr + page * PAGE_SIZE;
 }
@@ -80,7 +73,7 @@
 
 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 {
-	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 	struct drm_device *drm = obj->gem.dev;
 
 	mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@
 	dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
 }
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+				 unsigned long flags)
 {
 	struct tegra_bo *bo;
 	int err;
@@ -135,6 +129,12 @@
 	if (err)
 		goto err_mmap;
 
+	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
+		bo->flags |= TEGRA_BO_TILED;
+
+	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
+		bo->flags |= TEGRA_BO_BOTTOM_UP;
+
 	return bo;
 
 err_mmap:
@@ -149,14 +149,15 @@
 }
 
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
-					    struct drm_device *drm,
-					    unsigned int size,
-					    unsigned int *handle)
+					     struct drm_device *drm,
+					     unsigned int size,
+					     unsigned long flags,
+					     unsigned int *handle)
 {
 	struct tegra_bo *bo;
 	int ret;
 
-	bo = tegra_bo_create(drm, size);
+	bo = tegra_bo_create(drm, size, flags);
 	if (IS_ERR(bo))
 		return bo;
 
@@ -178,7 +179,6 @@
 	struct tegra_bo *bo = to_tegra_bo(gem);
 
 	drm_gem_free_mmap_offset(gem);
-
 	drm_gem_object_release(gem);
 	tegra_bo_destroy(gem->dev, bo);
 
@@ -197,8 +197,8 @@
 	if (args->size < args->pitch * args->height)
 		args->size = args->pitch * args->height;
 
-	bo = tegra_bo_create_with_handle(file, drm, args->size,
-					    &args->handle);
+	bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+					 &args->handle);
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/drm/tegra/gem.h
similarity index 84%
rename from drivers/gpu/host1x/drm/gem.h
rename to drivers/gpu/drm/tegra/gem.h
index 492533a..7674000 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -19,14 +19,18 @@
 #ifndef __HOST1X_GEM_H
 #define __HOST1X_GEM_H
 
+#include <linux/host1x.h>
+
 #include <drm/drm.h>
 #include <drm/drmP.h>
 
-#include "host1x_bo.h"
+#define TEGRA_BO_TILED     (1 << 0)
+#define TEGRA_BO_BOTTOM_UP (1 << 1)
 
 struct tegra_bo {
 	struct drm_gem_object gem;
 	struct host1x_bo base;
+	unsigned long flags;
 	dma_addr_t paddr;
 	void *vaddr;
 };
@@ -38,11 +42,13 @@
 
 extern const struct host1x_bo_ops tegra_bo_ops;
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+				 unsigned long flags);
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
-					    struct drm_device *drm,
-					    unsigned int size,
-					    unsigned int *handle);
+					     struct drm_device *drm,
+					     unsigned int size,
+					     unsigned long flags,
+					     unsigned int *handle);
 void tegra_bo_free_object(struct drm_gem_object *gem);
 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
 			 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644
index 0000000..7ec4259
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr2d.h"
+
+struct gr2d {
+	struct tegra_drm_client client;
+	struct host1x_channel *channel;
+	struct clk *clk;
+
+	DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
+};
+
+static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
+{
+	return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_init(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+	struct gr2d *gr2d = to_gr2d(drm);
+
+	gr2d->channel = host1x_channel_request(client->dev);
+	if (!gr2d->channel)
+		return -ENOMEM;
+
+	client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+	if (!client->syncpts[0]) {
+		host1x_channel_free(gr2d->channel);
+		return -ENOMEM;
+	}
+
+	return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr2d_exit(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	struct gr2d *gr2d = to_gr2d(drm);
+	int err;
+
+	err = tegra_drm_unregister_client(tegra, drm);
+	if (err < 0)
+		return err;
+
+	host1x_syncpt_free(client->syncpts[0]);
+	host1x_channel_free(gr2d->channel);
+
+	return 0;
+}
+
+static const struct host1x_client_ops gr2d_client_ops = {
+	.init = gr2d_init,
+	.exit = gr2d_exit,
+};
+
+static int gr2d_open_channel(struct tegra_drm_client *client,
+			     struct tegra_drm_context *context)
+{
+	struct gr2d *gr2d = to_gr2d(client);
+
+	context->channel = host1x_channel_get(gr2d->channel);
+	if (!context->channel)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void gr2d_close_channel(struct tegra_drm_context *context)
+{
+	host1x_channel_put(context->channel);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+	struct gr2d *gr2d = dev_get_drvdata(dev);
+
+	switch (class) {
+	case HOST1X_CLASS_HOST1X:
+		if (offset == 0x2b)
+			return 1;
+
+		break;
+
+	case HOST1X_CLASS_GR2D:
+	case HOST1X_CLASS_GR2D_SB:
+		if (offset >= GR2D_NUM_REGS)
+			break;
+
+		if (test_bit(offset, gr2d->addr_regs))
+			return 1;
+
+		break;
+	}
+
+	return 0;
+}
+
+static const struct tegra_drm_client_ops gr2d_ops = {
+	.open_channel = gr2d_open_channel,
+	.close_channel = gr2d_close_channel,
+	.is_addr_reg = gr2d_is_addr_reg,
+	.submit = tegra_drm_submit,
+};
+
+static const struct of_device_id gr2d_match[] = {
+	{ .compatible = "nvidia,tegra30-gr2d" },
+	{ .compatible = "nvidia,tegra20-gr2d" },
+	{ },
+};
+
+static const u32 gr2d_addr_regs[] = {
+	GR2D_UA_BASE_ADDR,
+	GR2D_VA_BASE_ADDR,
+	GR2D_PAT_BASE_ADDR,
+	GR2D_DSTA_BASE_ADDR,
+	GR2D_DSTB_BASE_ADDR,
+	GR2D_DSTC_BASE_ADDR,
+	GR2D_SRCA_BASE_ADDR,
+	GR2D_SRCB_BASE_ADDR,
+	GR2D_SRC_BASE_ADDR_SB,
+	GR2D_DSTA_BASE_ADDR_SB,
+	GR2D_DSTB_BASE_ADDR_SB,
+	GR2D_UA_BASE_ADDR_SB,
+	GR2D_VA_BASE_ADDR_SB,
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct host1x_syncpt **syncpts;
+	struct gr2d *gr2d;
+	unsigned int i;
+	int err;
+
+	gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+	if (!gr2d)
+		return -ENOMEM;
+
+	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+	if (!syncpts)
+		return -ENOMEM;
+
+	gr2d->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(gr2d->clk)) {
+		dev_err(dev, "cannot get clock\n");
+		return PTR_ERR(gr2d->clk);
+	}
+
+	err = clk_prepare_enable(gr2d->clk);
+	if (err) {
+		dev_err(dev, "cannot turn on clock\n");
+		return err;
+	}
+
+	INIT_LIST_HEAD(&gr2d->client.base.list);
+	gr2d->client.base.ops = &gr2d_client_ops;
+	gr2d->client.base.dev = dev;
+	gr2d->client.base.class = HOST1X_CLASS_GR2D;
+	gr2d->client.base.syncpts = syncpts;
+	gr2d->client.base.num_syncpts = 1;
+
+	INIT_LIST_HEAD(&gr2d->client.list);
+	gr2d->client.ops = &gr2d_ops;
+
+	err = host1x_client_register(&gr2d->client.base);
+	if (err < 0) {
+		dev_err(dev, "failed to register host1x client: %d\n", err);
+		clk_disable_unprepare(gr2d->clk);
+		return err;
+	}
+
+	/* initialize address register map */
+	for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
+		set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
+
+	platform_set_drvdata(pdev, gr2d);
+
+	return 0;
+}
+
+static int gr2d_remove(struct platform_device *pdev)
+{
+	struct gr2d *gr2d = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_client_unregister(&gr2d->client.base);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	clk_disable_unprepare(gr2d->clk);
+
+	return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+	.driver = {
+		.name = "tegra-gr2d",
+		.of_match_table = gr2d_match,
+	},
+	.probe = gr2d_probe,
+	.remove = gr2d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644
index 0000000..4d7304f
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr2d.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR2D_H
+#define TEGRA_GR2D_H
+
+#define GR2D_UA_BASE_ADDR		0x1a
+#define GR2D_VA_BASE_ADDR		0x1b
+#define GR2D_PAT_BASE_ADDR		0x26
+#define GR2D_DSTA_BASE_ADDR		0x2b
+#define GR2D_DSTB_BASE_ADDR		0x2c
+#define GR2D_DSTC_BASE_ADDR		0x2d
+#define GR2D_SRCA_BASE_ADDR		0x31
+#define GR2D_SRCB_BASE_ADDR		0x32
+#define GR2D_SRC_BASE_ADDR_SB		0x48
+#define GR2D_DSTA_BASE_ADDR_SB		0x49
+#define GR2D_DSTB_BASE_ADDR_SB		0x4a
+#define GR2D_UA_BASE_ADDR_SB		0x4b
+#define GR2D_VA_BASE_ADDR_SB		0x4c
+
+#define GR2D_NUM_REGS			0x4d
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644
index 0000000..4cec8f5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2013 Avionic Design GmbH
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr3d.h"
+
+struct gr3d {
+	struct tegra_drm_client client;
+	struct host1x_channel *channel;
+	struct clk *clk_secondary;
+	struct clk *clk;
+
+	DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
+};
+
+static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
+{
+	return container_of(client, struct gr3d, client);
+}
+
+static int gr3d_init(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	unsigned long flags = HOST1X_SYNCPT_HAS_BASE;
+	struct gr3d *gr3d = to_gr3d(drm);
+
+	gr3d->channel = host1x_channel_request(client->dev);
+	if (!gr3d->channel)
+		return -ENOMEM;
+
+	client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+	if (!client->syncpts[0]) {
+		host1x_channel_free(gr3d->channel);
+		return -ENOMEM;
+	}
+
+	return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr3d_exit(struct host1x_client *client)
+{
+	struct tegra_drm_client *drm = host1x_to_drm_client(client);
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+	struct gr3d *gr3d = to_gr3d(drm);
+	int err;
+
+	err = tegra_drm_unregister_client(tegra, drm);
+	if (err < 0)
+		return err;
+
+	host1x_syncpt_free(client->syncpts[0]);
+	host1x_channel_free(gr3d->channel);
+
+	return 0;
+}
+
+static const struct host1x_client_ops gr3d_client_ops = {
+	.init = gr3d_init,
+	.exit = gr3d_exit,
+};
+
+static int gr3d_open_channel(struct tegra_drm_client *client,
+			     struct tegra_drm_context *context)
+{
+	struct gr3d *gr3d = to_gr3d(client);
+
+	context->channel = host1x_channel_get(gr3d->channel);
+	if (!context->channel)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void gr3d_close_channel(struct tegra_drm_context *context)
+{
+	host1x_channel_put(context->channel);
+}
+
+static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+	struct gr3d *gr3d = dev_get_drvdata(dev);
+
+	switch (class) {
+	case HOST1X_CLASS_HOST1X:
+		if (offset == 0x2b)
+			return 1;
+
+		break;
+
+	case HOST1X_CLASS_GR3D:
+		if (offset >= GR3D_NUM_REGS)
+			break;
+
+		if (test_bit(offset, gr3d->addr_regs))
+			return 1;
+
+		break;
+	}
+
+	return 0;
+}
+
+static const struct tegra_drm_client_ops gr3d_ops = {
+	.open_channel = gr3d_open_channel,
+	.close_channel = gr3d_close_channel,
+	.is_addr_reg = gr3d_is_addr_reg,
+	.submit = tegra_drm_submit,
+};
+
+static const struct of_device_id tegra_gr3d_match[] = {
+	{ .compatible = "nvidia,tegra114-gr3d" },
+	{ .compatible = "nvidia,tegra30-gr3d" },
+	{ .compatible = "nvidia,tegra20-gr3d" },
+	{ }
+};
+
+static const u32 gr3d_addr_regs[] = {
+	GR3D_IDX_ATTRIBUTE( 0),
+	GR3D_IDX_ATTRIBUTE( 1),
+	GR3D_IDX_ATTRIBUTE( 2),
+	GR3D_IDX_ATTRIBUTE( 3),
+	GR3D_IDX_ATTRIBUTE( 4),
+	GR3D_IDX_ATTRIBUTE( 5),
+	GR3D_IDX_ATTRIBUTE( 6),
+	GR3D_IDX_ATTRIBUTE( 7),
+	GR3D_IDX_ATTRIBUTE( 8),
+	GR3D_IDX_ATTRIBUTE( 9),
+	GR3D_IDX_ATTRIBUTE(10),
+	GR3D_IDX_ATTRIBUTE(11),
+	GR3D_IDX_ATTRIBUTE(12),
+	GR3D_IDX_ATTRIBUTE(13),
+	GR3D_IDX_ATTRIBUTE(14),
+	GR3D_IDX_ATTRIBUTE(15),
+	GR3D_IDX_INDEX_BASE,
+	GR3D_QR_ZTAG_ADDR,
+	GR3D_QR_CTAG_ADDR,
+	GR3D_QR_CZ_ADDR,
+	GR3D_TEX_TEX_ADDR( 0),
+	GR3D_TEX_TEX_ADDR( 1),
+	GR3D_TEX_TEX_ADDR( 2),
+	GR3D_TEX_TEX_ADDR( 3),
+	GR3D_TEX_TEX_ADDR( 4),
+	GR3D_TEX_TEX_ADDR( 5),
+	GR3D_TEX_TEX_ADDR( 6),
+	GR3D_TEX_TEX_ADDR( 7),
+	GR3D_TEX_TEX_ADDR( 8),
+	GR3D_TEX_TEX_ADDR( 9),
+	GR3D_TEX_TEX_ADDR(10),
+	GR3D_TEX_TEX_ADDR(11),
+	GR3D_TEX_TEX_ADDR(12),
+	GR3D_TEX_TEX_ADDR(13),
+	GR3D_TEX_TEX_ADDR(14),
+	GR3D_TEX_TEX_ADDR(15),
+	GR3D_DW_MEMORY_OUTPUT_ADDRESS,
+	GR3D_GLOBAL_SURFADDR( 0),
+	GR3D_GLOBAL_SURFADDR( 1),
+	GR3D_GLOBAL_SURFADDR( 2),
+	GR3D_GLOBAL_SURFADDR( 3),
+	GR3D_GLOBAL_SURFADDR( 4),
+	GR3D_GLOBAL_SURFADDR( 5),
+	GR3D_GLOBAL_SURFADDR( 6),
+	GR3D_GLOBAL_SURFADDR( 7),
+	GR3D_GLOBAL_SURFADDR( 8),
+	GR3D_GLOBAL_SURFADDR( 9),
+	GR3D_GLOBAL_SURFADDR(10),
+	GR3D_GLOBAL_SURFADDR(11),
+	GR3D_GLOBAL_SURFADDR(12),
+	GR3D_GLOBAL_SURFADDR(13),
+	GR3D_GLOBAL_SURFADDR(14),
+	GR3D_GLOBAL_SURFADDR(15),
+	GR3D_GLOBAL_SPILLSURFADDR,
+	GR3D_GLOBAL_SURFOVERADDR( 0),
+	GR3D_GLOBAL_SURFOVERADDR( 1),
+	GR3D_GLOBAL_SURFOVERADDR( 2),
+	GR3D_GLOBAL_SURFOVERADDR( 3),
+	GR3D_GLOBAL_SURFOVERADDR( 4),
+	GR3D_GLOBAL_SURFOVERADDR( 5),
+	GR3D_GLOBAL_SURFOVERADDR( 6),
+	GR3D_GLOBAL_SURFOVERADDR( 7),
+	GR3D_GLOBAL_SURFOVERADDR( 8),
+	GR3D_GLOBAL_SURFOVERADDR( 9),
+	GR3D_GLOBAL_SURFOVERADDR(10),
+	GR3D_GLOBAL_SURFOVERADDR(11),
+	GR3D_GLOBAL_SURFOVERADDR(12),
+	GR3D_GLOBAL_SURFOVERADDR(13),
+	GR3D_GLOBAL_SURFOVERADDR(14),
+	GR3D_GLOBAL_SURFOVERADDR(15),
+	GR3D_GLOBAL_SAMP01SURFADDR( 0),
+	GR3D_GLOBAL_SAMP01SURFADDR( 1),
+	GR3D_GLOBAL_SAMP01SURFADDR( 2),
+	GR3D_GLOBAL_SAMP01SURFADDR( 3),
+	GR3D_GLOBAL_SAMP01SURFADDR( 4),
+	GR3D_GLOBAL_SAMP01SURFADDR( 5),
+	GR3D_GLOBAL_SAMP01SURFADDR( 6),
+	GR3D_GLOBAL_SAMP01SURFADDR( 7),
+	GR3D_GLOBAL_SAMP01SURFADDR( 8),
+	GR3D_GLOBAL_SAMP01SURFADDR( 9),
+	GR3D_GLOBAL_SAMP01SURFADDR(10),
+	GR3D_GLOBAL_SAMP01SURFADDR(11),
+	GR3D_GLOBAL_SAMP01SURFADDR(12),
+	GR3D_GLOBAL_SAMP01SURFADDR(13),
+	GR3D_GLOBAL_SAMP01SURFADDR(14),
+	GR3D_GLOBAL_SAMP01SURFADDR(15),
+	GR3D_GLOBAL_SAMP23SURFADDR( 0),
+	GR3D_GLOBAL_SAMP23SURFADDR( 1),
+	GR3D_GLOBAL_SAMP23SURFADDR( 2),
+	GR3D_GLOBAL_SAMP23SURFADDR( 3),
+	GR3D_GLOBAL_SAMP23SURFADDR( 4),
+	GR3D_GLOBAL_SAMP23SURFADDR( 5),
+	GR3D_GLOBAL_SAMP23SURFADDR( 6),
+	GR3D_GLOBAL_SAMP23SURFADDR( 7),
+	GR3D_GLOBAL_SAMP23SURFADDR( 8),
+	GR3D_GLOBAL_SAMP23SURFADDR( 9),
+	GR3D_GLOBAL_SAMP23SURFADDR(10),
+	GR3D_GLOBAL_SAMP23SURFADDR(11),
+	GR3D_GLOBAL_SAMP23SURFADDR(12),
+	GR3D_GLOBAL_SAMP23SURFADDR(13),
+	GR3D_GLOBAL_SAMP23SURFADDR(14),
+	GR3D_GLOBAL_SAMP23SURFADDR(15),
+};
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct host1x_syncpt **syncpts;
+	struct gr3d *gr3d;
+	unsigned int i;
+	int err;
+
+	gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+	if (!gr3d)
+		return -ENOMEM;
+
+	syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+	if (!syncpts)
+		return -ENOMEM;
+
+	gr3d->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(gr3d->clk)) {
+		dev_err(&pdev->dev, "cannot get clock\n");
+		return PTR_ERR(gr3d->clk);
+	}
+
+	if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
+		gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
+		if (IS_ERR(gr3d->clk)) {
+			dev_err(&pdev->dev, "cannot get secondary clock\n");
+			return PTR_ERR(gr3d->clk);
+		}
+	}
+
+	err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to power up 3D unit\n");
+		return err;
+	}
+
+	if (gr3d->clk_secondary) {
+		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
+							gr3d->clk_secondary);
+		if (err < 0) {
+			dev_err(&pdev->dev,
+				"failed to power up secondary 3D unit\n");
+			return err;
+		}
+	}
+
+	INIT_LIST_HEAD(&gr3d->client.base.list);
+	gr3d->client.base.ops = &gr3d_client_ops;
+	gr3d->client.base.dev = &pdev->dev;
+	gr3d->client.base.class = HOST1X_CLASS_GR3D;
+	gr3d->client.base.syncpts = syncpts;
+	gr3d->client.base.num_syncpts = 1;
+
+	INIT_LIST_HEAD(&gr3d->client.list);
+	gr3d->client.ops = &gr3d_ops;
+
+	err = host1x_client_register(&gr3d->client.base);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	/* initialize address register map */
+	for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
+		set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
+
+	platform_set_drvdata(pdev, gr3d);
+
+	return 0;
+}
+
+static int gr3d_remove(struct platform_device *pdev)
+{
+	struct gr3d *gr3d = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_client_unregister(&gr3d->client.base);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	if (gr3d->clk_secondary) {
+		tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
+		clk_disable_unprepare(gr3d->clk_secondary);
+	}
+
+	tegra_powergate_power_off(TEGRA_POWERGATE_3D);
+	clk_disable_unprepare(gr3d->clk);
+
+	return 0;
+}
+
+struct platform_driver tegra_gr3d_driver = {
+	.driver = {
+		.name = "tegra-gr3d",
+		.of_match_table = tegra_gr3d_match,
+	},
+	.probe = gr3d_probe,
+	.remove = gr3d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644
index 0000000..0c30a13
--- /dev/null
+++ b/drivers/gpu/drm/tegra/gr3d.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR3D_H
+#define TEGRA_GR3D_H
+
+#define GR3D_IDX_ATTRIBUTE(x)		(0x100 + (x) * 2)
+#define GR3D_IDX_INDEX_BASE		0x121
+#define GR3D_QR_ZTAG_ADDR		0x415
+#define GR3D_QR_CTAG_ADDR		0x417
+#define GR3D_QR_CZ_ADDR			0x419
+#define GR3D_TEX_TEX_ADDR(x)		(0x710 + (x))
+#define GR3D_DW_MEMORY_OUTPUT_ADDRESS	0x904
+#define GR3D_GLOBAL_SURFADDR(x)		(0xe00 + (x))
+#define GR3D_GLOBAL_SPILLSURFADDR	0xe2a
+#define GR3D_GLOBAL_SURFOVERADDR(x)	(0xe30 + (x))
+#define GR3D_GLOBAL_SAMP01SURFADDR(x)	(0xe50 + (x))
+#define GR3D_GLOBAL_SAMP23SURFADDR(x)	(0xe60 + (x))
+
+#define GR3D_NUM_REGS			0xe88
+
+#endif
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
similarity index 83%
rename from drivers/gpu/host1x/drm/hdmi.c
rename to drivers/gpu/drm/tegra/hdmi.c
index 644d95c..0cd9bc2 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -8,21 +8,33 @@
  */
 
 #include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/gpio.h>
-#include <linux/hdmi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
 #include <linux/clk/tegra.h>
-
-#include <drm/drm_edid.h>
+#include <linux/debugfs.h>
+#include <linux/hdmi.h>
+#include <linux/regulator/consumer.h>
 
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
-#include "host1x_client.h"
+
+struct tmds_config {
+	unsigned int pclk;
+	u32 pll0;
+	u32 pll1;
+	u32 pe_current;
+	u32 drive_current;
+	u32 peak_current;
+};
+
+struct tegra_hdmi_config {
+	const struct tmds_config *tmds;
+	unsigned int num_tmds;
+
+	unsigned long fuse_override_offset;
+	unsigned long fuse_override_value;
+
+	bool has_sor_io_peak_current;
+};
 
 struct tegra_hdmi {
 	struct host1x_client client;
@@ -38,6 +50,8 @@
 	struct clk *clk_parent;
 	struct clk *clk;
 
+	const struct tegra_hdmi_config *config;
+
 	unsigned int audio_source;
 	unsigned int audio_freq;
 	bool stereo;
@@ -143,15 +157,7 @@
 	{         0,     0,      0,     0 },
 };
 
-struct tmds_config {
-	unsigned int pclk;
-	u32 pll0;
-	u32 pll1;
-	u32 pe_current;
-	u32 drive_current;
-};
-
-static const struct tmds_config tegra2_tmds_config[] = {
+static const struct tmds_config tegra20_tmds_config[] = {
 	{ /* slow pixel clock modes */
 		.pclk = 27000000,
 		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@
 	},
 };
 
-static const struct tmds_config tegra3_tmds_config[] = {
+static const struct tmds_config tegra30_tmds_config[] = {
 	{ /* 480p modes */
 		.pclk = 27000000,
 		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@
 	},
 };
 
+static const struct tmds_config tegra114_tmds_config[] = {
+	{ /* 480p/576p / 25.2MHz/27MHz modes */
+		.pclk = 27000000,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_0_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+	}, { /* 720p / 74.25MHz modes */
+		.pclk = 74250000,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+			SOR_PLL_TMDS_TERMADJ(0),
+		.pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_15_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+	}, { /* 1080p / 148.5MHz modes */
+		.pclk = 148500000,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+			SOR_PLL_TMDS_TERMADJ(0),
+		.pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_10_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+	}, { /* 225/297MHz modes */
+		.pclk = UINT_MAX,
+		.pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+			SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+		.pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+			| SOR_PLL_TMDS_TERM_ENABLE,
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+			PE_CURRENT3(PE_CURRENT_0_mA_T114),
+		.drive_current =
+			DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+			DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+			DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+			DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+		.peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+			PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+			PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+			PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+	},
+};
+
 static const struct tegra_hdmi_audio_config *
 tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
 {
@@ -511,7 +596,7 @@
 
 	err = hdmi_audio_infoframe_init(&frame);
 	if (err < 0) {
-		dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+		dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
 			err);
 		return;
 	}
@@ -531,7 +616,7 @@
 	 * contain 7 bytes. Including the 3 byte header only the first 10
 	 * bytes can be programmed.
 	 */
-	tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
+	tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
 
 	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
 			  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@
 	tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
 	tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
 
-	value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
-	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+	tegra_hdmi_writel(hdmi, tmds->drive_current,
+			  HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+	value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
+	value |= hdmi->config->fuse_override_value;
+	tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
+
+	if (hdmi->config->has_sor_io_peak_current)
+		tegra_hdmi_writel(hdmi, tmds->peak_current,
+				  HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
+}
+
+static bool tegra_output_is_hdmi(struct tegra_output *output)
+{
+	struct edid *edid;
+
+	if (!output->connector.edid_blob_ptr)
+		return false;
+
+	edid = (struct edid *)output->connector.edid_blob_ptr->data;
+
+	return drm_detect_hdmi_monitor(edid);
 }
 
 static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@
 	struct tegra_hdmi *hdmi = to_hdmi(output);
 	struct device_node *node = hdmi->dev->of_node;
 	unsigned int pulse_start, div82, pclk;
-	const struct tmds_config *tmds;
-	unsigned int num_tmds;
 	unsigned long value;
 	int retries = 1000;
 	int err;
 
+	hdmi->dvi = !tegra_output_is_hdmi(output);
+
 	pclk = mode->clock * 1000;
 	h_sync_width = mode->hsync_end - mode->hsync_start;
 	h_back_porch = mode->htotal - mode->hsync_end;
 	h_front_porch = mode->hsync_start - mode->hdisplay;
 
-	err = regulator_enable(hdmi->vdd);
-	if (err < 0) {
-		dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
-		return err;
-	}
-
 	err = regulator_enable(hdmi->pll);
 	if (err < 0) {
 		dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@
 	tegra_hdmi_setup_stereo_infoframe(hdmi);
 
 	/* TMDS CONFIG */
-	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-		num_tmds = ARRAY_SIZE(tegra3_tmds_config);
-		tmds = tegra3_tmds_config;
-	} else {
-		num_tmds = ARRAY_SIZE(tegra2_tmds_config);
-		tmds = tegra2_tmds_config;
-	}
-
-	for (i = 0; i < num_tmds; i++) {
-		if (pclk <= tmds[i].pclk) {
-			tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+	for (i = 0; i < hdmi->config->num_tmds; i++) {
+		if (pclk <= hdmi->config->tmds[i].pclk) {
+			tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
 			break;
 		}
 	}
@@ -824,7 +915,6 @@
 	tegra_periph_reset_assert(hdmi->clk);
 	clk_disable(hdmi->clk);
 	regulator_disable(hdmi->pll);
-	regulator_disable(hdmi->vdd);
 
 	return 0;
 }
@@ -1055,6 +1145,7 @@
 	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
 	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
 	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+	DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
 
 #undef DUMP_REG
 
@@ -1122,24 +1213,31 @@
 	return 0;
 }
 
-static int tegra_hdmi_drm_init(struct host1x_client *client,
-			       struct drm_device *drm)
+static int tegra_hdmi_init(struct host1x_client *client)
 {
+	struct tegra_drm *tegra = dev_get_drvdata(client->parent);
 	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
 	int err;
 
+	err = regulator_enable(hdmi->vdd);
+	if (err < 0) {
+		dev_err(client->dev, "failed to enable VDD regulator: %d\n",
+			err);
+		return err;
+	}
+
 	hdmi->output.type = TEGRA_OUTPUT_HDMI;
 	hdmi->output.dev = client->dev;
 	hdmi->output.ops = &hdmi_ops;
 
-	err = tegra_output_init(drm, &hdmi->output);
+	err = tegra_output_init(tegra->drm, &hdmi->output);
 	if (err < 0) {
 		dev_err(client->dev, "output setup failed: %d\n", err);
 		return err;
 	}
 
 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
-		err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+		err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
 		if (err < 0)
 			dev_err(client->dev, "debugfs setup failed: %d\n", err);
 	}
@@ -1147,7 +1245,7 @@
 	return 0;
 }
 
-static int tegra_hdmi_drm_exit(struct host1x_client *client)
+static int tegra_hdmi_exit(struct host1x_client *client)
 {
 	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
 	int err;
@@ -1171,25 +1269,63 @@
 		return err;
 	}
 
+	regulator_disable(hdmi->vdd);
+
 	return 0;
 }
 
 static const struct host1x_client_ops hdmi_client_ops = {
-	.drm_init = tegra_hdmi_drm_init,
-	.drm_exit = tegra_hdmi_drm_exit,
+	.init = tegra_hdmi_init,
+	.exit = tegra_hdmi_exit,
+};
+
+static const struct tegra_hdmi_config tegra20_hdmi_config = {
+	.tmds = tegra20_tmds_config,
+	.num_tmds = ARRAY_SIZE(tegra20_tmds_config),
+	.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+	.fuse_override_value = 1 << 31,
+	.has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra30_hdmi_config = {
+	.tmds = tegra30_tmds_config,
+	.num_tmds = ARRAY_SIZE(tegra30_tmds_config),
+	.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+	.fuse_override_value = 1 << 31,
+	.has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra114_hdmi_config = {
+	.tmds = tegra114_tmds_config,
+	.num_tmds = ARRAY_SIZE(tegra114_tmds_config),
+	.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+	.fuse_override_value = 1 << 31,
+	.has_sor_io_peak_current = true,
+};
+
+static const struct of_device_id tegra_hdmi_of_match[] = {
+	{ .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
+	{ .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
+	{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
+	{ },
 };
 
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	const struct of_device_id *match;
 	struct tegra_hdmi *hdmi;
 	struct resource *regs;
 	int err;
 
+	match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
+	if (!match)
+		return -ENODEV;
+
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
 	if (!hdmi)
 		return -ENOMEM;
 
+	hdmi->config = match->data;
 	hdmi->dev = &pdev->dev;
 	hdmi->audio_source = AUTO;
 	hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@
 
 	hdmi->output.dev = &pdev->dev;
 
-	err = tegra_output_parse_dt(&hdmi->output);
+	err = tegra_output_probe(&hdmi->output);
 	if (err < 0)
 		return err;
 
@@ -1252,11 +1388,11 @@
 
 	hdmi->irq = err;
 
-	hdmi->client.ops = &hdmi_client_ops;
 	INIT_LIST_HEAD(&hdmi->client.list);
+	hdmi->client.ops = &hdmi_client_ops;
 	hdmi->client.dev = &pdev->dev;
 
-	err = host1x_register_client(host1x, &hdmi->client);
+	err = host1x_client_register(&hdmi->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
@@ -1270,29 +1406,28 @@
 
 static int tegra_hdmi_remove(struct platform_device *pdev)
 {
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
 	struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
 	int err;
 
-	err = host1x_unregister_client(host1x, &hdmi->client);
+	err = host1x_client_unregister(&hdmi->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
 			err);
 		return err;
 	}
 
+	err = tegra_output_remove(&hdmi->output);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+		return err;
+	}
+
 	clk_unprepare(hdmi->clk_parent);
 	clk_unprepare(hdmi->clk);
 
 	return 0;
 }
 
-static struct of_device_id tegra_hdmi_of_match[] = {
-	{ .compatible = "nvidia,tegra30-hdmi", },
-	{ .compatible = "nvidia,tegra20-hdmi", },
-	{ },
-};
-
 struct platform_driver tegra_hdmi_driver = {
 	.driver = {
 		.name = "tegra-hdmi",
diff --git a/drivers/gpu/host1x/drm/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
similarity index 72%
rename from drivers/gpu/host1x/drm/hdmi.h
rename to drivers/gpu/drm/tegra/hdmi.h
index 52ac36e..0aebc48 100644
--- a/drivers/gpu/host1x/drm/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -233,7 +233,10 @@
 #define DRIVE_CURRENT_LANE1(x)      (((x) & 0x3f) <<  8)
 #define DRIVE_CURRENT_LANE2(x)      (((x) & 0x3f) << 16)
 #define DRIVE_CURRENT_LANE3(x)      (((x) & 0x3f) << 24)
-#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) <<  0)
+#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) <<  8)
+#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
+#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
 
 #define DRIVE_CURRENT_1_500_mA  0x00
 #define DRIVE_CURRENT_1_875_mA  0x01
@@ -299,6 +302,79 @@
 #define DRIVE_CURRENT_24_375_mA 0x3d
 #define DRIVE_CURRENT_24_750_mA 0x3e
 
+#define DRIVE_CURRENT_0_000_mA_T114 0x00
+#define DRIVE_CURRENT_0_400_mA_T114 0x01
+#define DRIVE_CURRENT_0_800_mA_T114 0x02
+#define DRIVE_CURRENT_1_200_mA_T114 0x03
+#define DRIVE_CURRENT_1_600_mA_T114 0x04
+#define DRIVE_CURRENT_2_000_mA_T114 0x05
+#define DRIVE_CURRENT_2_400_mA_T114 0x06
+#define DRIVE_CURRENT_2_800_mA_T114 0x07
+#define DRIVE_CURRENT_3_200_mA_T114 0x08
+#define DRIVE_CURRENT_3_600_mA_T114 0x09
+#define DRIVE_CURRENT_4_000_mA_T114 0x0a
+#define DRIVE_CURRENT_4_400_mA_T114 0x0b
+#define DRIVE_CURRENT_4_800_mA_T114 0x0c
+#define DRIVE_CURRENT_5_200_mA_T114 0x0d
+#define DRIVE_CURRENT_5_600_mA_T114 0x0e
+#define DRIVE_CURRENT_6_000_mA_T114 0x0f
+#define DRIVE_CURRENT_6_400_mA_T114 0x10
+#define DRIVE_CURRENT_6_800_mA_T114 0x11
+#define DRIVE_CURRENT_7_200_mA_T114 0x12
+#define DRIVE_CURRENT_7_600_mA_T114 0x13
+#define DRIVE_CURRENT_8_000_mA_T114 0x14
+#define DRIVE_CURRENT_8_400_mA_T114 0x15
+#define DRIVE_CURRENT_8_800_mA_T114 0x16
+#define DRIVE_CURRENT_9_200_mA_T114 0x17
+#define DRIVE_CURRENT_9_600_mA_T114 0x18
+#define DRIVE_CURRENT_10_000_mA_T114 0x19
+#define DRIVE_CURRENT_10_400_mA_T114 0x1a
+#define DRIVE_CURRENT_10_800_mA_T114 0x1b
+#define DRIVE_CURRENT_11_200_mA_T114 0x1c
+#define DRIVE_CURRENT_11_600_mA_T114 0x1d
+#define DRIVE_CURRENT_12_000_mA_T114 0x1e
+#define DRIVE_CURRENT_12_400_mA_T114 0x1f
+#define DRIVE_CURRENT_12_800_mA_T114 0x20
+#define DRIVE_CURRENT_13_200_mA_T114 0x21
+#define DRIVE_CURRENT_13_600_mA_T114 0x22
+#define DRIVE_CURRENT_14_000_mA_T114 0x23
+#define DRIVE_CURRENT_14_400_mA_T114 0x24
+#define DRIVE_CURRENT_14_800_mA_T114 0x25
+#define DRIVE_CURRENT_15_200_mA_T114 0x26
+#define DRIVE_CURRENT_15_600_mA_T114 0x27
+#define DRIVE_CURRENT_16_000_mA_T114 0x28
+#define DRIVE_CURRENT_16_400_mA_T114 0x29
+#define DRIVE_CURRENT_16_800_mA_T114 0x2a
+#define DRIVE_CURRENT_17_200_mA_T114 0x2b
+#define DRIVE_CURRENT_17_600_mA_T114 0x2c
+#define DRIVE_CURRENT_18_000_mA_T114 0x2d
+#define DRIVE_CURRENT_18_400_mA_T114 0x2e
+#define DRIVE_CURRENT_18_800_mA_T114 0x2f
+#define DRIVE_CURRENT_19_200_mA_T114 0x30
+#define DRIVE_CURRENT_19_600_mA_T114 0x31
+#define DRIVE_CURRENT_20_000_mA_T114 0x32
+#define DRIVE_CURRENT_20_400_mA_T114 0x33
+#define DRIVE_CURRENT_20_800_mA_T114 0x34
+#define DRIVE_CURRENT_21_200_mA_T114 0x35
+#define DRIVE_CURRENT_21_600_mA_T114 0x36
+#define DRIVE_CURRENT_22_000_mA_T114 0x37
+#define DRIVE_CURRENT_22_400_mA_T114 0x38
+#define DRIVE_CURRENT_22_800_mA_T114 0x39
+#define DRIVE_CURRENT_23_200_mA_T114 0x3a
+#define DRIVE_CURRENT_23_600_mA_T114 0x3b
+#define DRIVE_CURRENT_24_000_mA_T114 0x3c
+#define DRIVE_CURRENT_24_400_mA_T114 0x3d
+#define DRIVE_CURRENT_24_800_mA_T114 0x3e
+#define DRIVE_CURRENT_25_200_mA_T114 0x3f
+#define DRIVE_CURRENT_25_400_mA_T114 0x40
+#define DRIVE_CURRENT_25_800_mA_T114 0x41
+#define DRIVE_CURRENT_26_200_mA_T114 0x42
+#define DRIVE_CURRENT_26_600_mA_T114 0x43
+#define DRIVE_CURRENT_27_000_mA_T114 0x44
+#define DRIVE_CURRENT_27_400_mA_T114 0x45
+#define DRIVE_CURRENT_27_800_mA_T114 0x46
+#define DRIVE_CURRENT_28_200_mA_T114 0x47
+
 #define HDMI_NV_PDISP_AUDIO_DEBUG0				0x7f
 #define HDMI_NV_PDISP_AUDIO_DEBUG1				0x80
 #define HDMI_NV_PDISP_AUDIO_DEBUG2				0x81
@@ -358,6 +434,23 @@
 #define PE_CURRENT_7_0_mA 0xe
 #define PE_CURRENT_7_5_mA 0xf
 
+#define PE_CURRENT_0_mA_T114 0x0
+#define PE_CURRENT_1_mA_T114 0x1
+#define PE_CURRENT_2_mA_T114 0x2
+#define PE_CURRENT_3_mA_T114 0x3
+#define PE_CURRENT_4_mA_T114 0x4
+#define PE_CURRENT_5_mA_T114 0x5
+#define PE_CURRENT_6_mA_T114 0x6
+#define PE_CURRENT_7_mA_T114 0x7
+#define PE_CURRENT_8_mA_T114 0x8
+#define PE_CURRENT_9_mA_T114 0x9
+#define PE_CURRENT_10_mA_T114 0xa
+#define PE_CURRENT_11_mA_T114 0xb
+#define PE_CURRENT_12_mA_T114 0xc
+#define PE_CURRENT_13_mA_T114 0xd
+#define PE_CURRENT_14_mA_T114 0xe
+#define PE_CURRENT_15_mA_T114 0xf
+
 #define HDMI_NV_PDISP_KEY_CTRL					0x9a
 #define HDMI_NV_PDISP_KEY_DEBUG0				0x9b
 #define HDMI_NV_PDISP_KEY_DEBUG1				0x9c
@@ -383,4 +476,61 @@
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
 
+#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT		0xd1
+#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) <<  0)
+#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) <<  8)
+#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
+#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
+
+#define PEAK_CURRENT_0_000_mA 0x00
+#define PEAK_CURRENT_0_200_mA 0x01
+#define PEAK_CURRENT_0_400_mA 0x02
+#define PEAK_CURRENT_0_600_mA 0x03
+#define PEAK_CURRENT_0_800_mA 0x04
+#define PEAK_CURRENT_1_000_mA 0x05
+#define PEAK_CURRENT_1_200_mA 0x06
+#define PEAK_CURRENT_1_400_mA 0x07
+#define PEAK_CURRENT_1_600_mA 0x08
+#define PEAK_CURRENT_1_800_mA 0x09
+#define PEAK_CURRENT_2_000_mA 0x0a
+#define PEAK_CURRENT_2_200_mA 0x0b
+#define PEAK_CURRENT_2_400_mA 0x0c
+#define PEAK_CURRENT_2_600_mA 0x0d
+#define PEAK_CURRENT_2_800_mA 0x0e
+#define PEAK_CURRENT_3_000_mA 0x0f
+#define PEAK_CURRENT_3_200_mA 0x10
+#define PEAK_CURRENT_3_400_mA 0x11
+#define PEAK_CURRENT_3_600_mA 0x12
+#define PEAK_CURRENT_3_800_mA 0x13
+#define PEAK_CURRENT_4_000_mA 0x14
+#define PEAK_CURRENT_4_200_mA 0x15
+#define PEAK_CURRENT_4_400_mA 0x16
+#define PEAK_CURRENT_4_600_mA 0x17
+#define PEAK_CURRENT_4_800_mA 0x18
+#define PEAK_CURRENT_5_000_mA 0x19
+#define PEAK_CURRENT_5_200_mA 0x1a
+#define PEAK_CURRENT_5_400_mA 0x1b
+#define PEAK_CURRENT_5_600_mA 0x1c
+#define PEAK_CURRENT_5_800_mA 0x1d
+#define PEAK_CURRENT_6_000_mA 0x1e
+#define PEAK_CURRENT_6_200_mA 0x1f
+#define PEAK_CURRENT_6_400_mA 0x20
+#define PEAK_CURRENT_6_600_mA 0x21
+#define PEAK_CURRENT_6_800_mA 0x22
+#define PEAK_CURRENT_7_000_mA 0x23
+#define PEAK_CURRENT_7_200_mA 0x24
+#define PEAK_CURRENT_7_400_mA 0x25
+#define PEAK_CURRENT_7_600_mA 0x26
+#define PEAK_CURRENT_7_800_mA 0x27
+#define PEAK_CURRENT_8_000_mA 0x28
+#define PEAK_CURRENT_8_200_mA 0x29
+#define PEAK_CURRENT_8_400_mA 0x2a
+#define PEAK_CURRENT_8_600_mA 0x2b
+#define PEAK_CURRENT_8_800_mA 0x2c
+#define PEAK_CURRENT_9_000_mA 0x2d
+#define PEAK_CURRENT_9_200_mA 0x2e
+#define PEAK_CURRENT_9_400_mA 0x2f
+
+#define HDMI_NV_PDISP_SOR_PAD_CTLS0		0xd2
+
 #endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/drm/tegra/output.c
similarity index 91%
rename from drivers/gpu/host1x/drm/output.c
rename to drivers/gpu/drm/tegra/output.c
index 137ae81..2cb0065 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,9 +7,7 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
 #include <linux/of_gpio.h>
-#include <linux/i2c.h>
 
 #include "drm.h"
 
@@ -81,10 +79,16 @@
 	return status;
 }
 
+static void drm_connector_clear(struct drm_connector *connector)
+{
+	memset(connector, 0, sizeof(*connector));
+}
+
 static void tegra_connector_destroy(struct drm_connector *connector)
 {
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
+	drm_connector_clear(connector);
 }
 
 static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +98,15 @@
 	.destroy = tegra_connector_destroy,
 };
 
+static void drm_encoder_clear(struct drm_encoder *encoder)
+{
+	memset(encoder, 0, sizeof(*encoder));
+}
+
 static void tegra_encoder_destroy(struct drm_encoder *encoder)
 {
 	drm_encoder_cleanup(encoder);
+	drm_encoder_clear(encoder);
 }
 
 static const struct drm_encoder_funcs encoder_funcs = {
@@ -151,7 +161,7 @@
 	return IRQ_HANDLED;
 }
 
-int tegra_output_parse_dt(struct tegra_output *output)
+int tegra_output_probe(struct tegra_output *output)
 {
 	enum of_gpio_flags flags;
 	struct device_node *ddc;
@@ -181,14 +191,6 @@
 	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
 						   "nvidia,hpd-gpio", 0,
 						   &flags);
-
-	return 0;
-}
-
-int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
-{
-	int connector, encoder, err;
-
 	if (gpio_is_valid(output->hpd_gpio)) {
 		unsigned long flags;
 
@@ -202,7 +204,8 @@
 		err = gpio_to_irq(output->hpd_gpio);
 		if (err < 0) {
 			dev_err(output->dev, "gpio_to_irq(): %d\n", err);
-			goto free_hpd;
+			gpio_free(output->hpd_gpio);
+			return err;
 		}
 
 		output->hpd_irq = err;
@@ -215,12 +218,33 @@
 		if (err < 0) {
 			dev_err(output->dev, "failed to request IRQ#%u: %d\n",
 				output->hpd_irq, err);
-			goto free_hpd;
+			gpio_free(output->hpd_gpio);
+			return err;
 		}
 
 		output->connector.polled = DRM_CONNECTOR_POLL_HPD;
 	}
 
+	return 0;
+}
+
+int tegra_output_remove(struct tegra_output *output)
+{
+	if (gpio_is_valid(output->hpd_gpio)) {
+		free_irq(output->hpd_irq, output);
+		gpio_free(output->hpd_gpio);
+	}
+
+	if (output->ddc)
+		put_device(&output->ddc->dev);
+
+	return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+	int connector, encoder;
+
 	switch (output->type) {
 	case TEGRA_OUTPUT_RGB:
 		connector = DRM_MODE_CONNECTOR_LVDS;
@@ -241,6 +265,7 @@
 	drm_connector_init(drm, &output->connector, &connector_funcs,
 			   connector);
 	drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+	output->connector.dpms = DRM_MODE_DPMS_OFF;
 
 	drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
 	drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +276,9 @@
 	output->encoder.possible_crtcs = 0x3;
 
 	return 0;
-
-free_hpd:
-	gpio_free(output->hpd_gpio);
-
-	return err;
 }
 
 int tegra_output_exit(struct tegra_output *output)
 {
-	if (gpio_is_valid(output->hpd_gpio)) {
-		free_irq(output->hpd_irq, output);
-		gpio_free(output->hpd_gpio);
-	}
-
-	if (output->ddc)
-		put_device(&output->ddc->dev);
-
 	return 0;
 }
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/drm/tegra/rgb.c
similarity index 96%
rename from drivers/gpu/host1x/drm/rgb.c
rename to drivers/gpu/drm/tegra/rgb.c
index 5aa66ef..ba47ca4 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,9 +8,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -150,7 +147,7 @@
 	rgb->output.dev = dc->dev;
 	rgb->output.of_node = np;
 
-	err = tegra_output_parse_dt(&rgb->output);
+	err = tegra_output_probe(&rgb->output);
 	if (err < 0)
 		return err;
 
@@ -177,6 +174,20 @@
 	return 0;
 }
 
+int tegra_dc_rgb_remove(struct tegra_dc *dc)
+{
+	int err;
+
+	if (!dc->rgb)
+		return 0;
+
+	err = tegra_output_remove(dc->rgb);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
 int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
 {
 	struct tegra_rgb *rgb = to_rgb(dc->rgb);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 7a4d101..7c3ef79 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@
 	tristate "DRM Support for TI LCDC Display Controller"
 	depends on DRM && OF && ARM
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select DRM_KMS_CMA_HELPER
 	select DRM_GEM_CMA_HELPER
 	select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dd..b433b9f 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -5,10 +5,6 @@
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
 	ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
 	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
-	ttm_bo_manager.o
-
-ifeq ($(CONFIG_SWIOTLB),y)
-ttm-y += ttm_page_alloc_dma.o
-endif
+	ttm_bo_manager.o ttm_page_alloc_dma.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f1a857e..8d5a646 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -429,8 +429,20 @@
 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
 	spin_unlock(&bdev->fence_lock);
 
-	if (!ret)
+	if (!ret) {
+
+		/*
+		 * Make NO_EVICT bos immediately available to
+		 * shrinkers, now that they are queued for
+		 * destruction.
+		 */
+		if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+			bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+			ttm_bo_add_to_lru(bo);
+		}
+
 		ww_mutex_unlock(&bo->resv->lock);
+	}
 
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -986,24 +998,32 @@
 	return ret;
 }
 
-static int ttm_bo_mem_compat(struct ttm_placement *placement,
-			     struct ttm_mem_reg *mem)
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+			      struct ttm_mem_reg *mem,
+			      uint32_t *new_flags)
 {
 	int i;
 
 	if (mem->mm_node && placement->lpfn != 0 &&
 	    (mem->start < placement->fpfn ||
 	     mem->start + mem->num_pages > placement->lpfn))
-		return -1;
+		return false;
 
 	for (i = 0; i < placement->num_placement; i++) {
-		if ((placement->placement[i] & mem->placement &
-			TTM_PL_MASK_CACHING) &&
-			(placement->placement[i] & mem->placement &
-			TTM_PL_MASK_MEM))
-			return i;
+		*new_flags = placement->placement[i];
+		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+			return true;
 	}
-	return -1;
+
+	for (i = 0; i < placement->num_busy_placement; i++) {
+		*new_flags = placement->busy_placement[i];
+		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+			return true;
+	}
+
+	return false;
 }
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1012,6 +1032,7 @@
 			bool no_wait_gpu)
 {
 	int ret;
+	uint32_t new_flags;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 	/* Check that range is valid */
@@ -1022,8 +1043,7 @@
 	/*
 	 * Check whether we need to move buffer.
 	 */
-	ret = ttm_bo_mem_compat(placement, &bo->mem);
-	if (ret < 0) {
+	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
 		ret = ttm_bo_move_buffer(bo, placement, interruptible,
 					 no_wait_gpu);
 		if (ret)
@@ -1033,7 +1053,7 @@
 		 * Use the access and other non-mapping-related flag bits from
 		 * the compatible memory placement flags to the active flags
 		 */
-		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+		ttm_flag_masked(&bo->mem.placement, new_flags,
 				~TTM_PL_MASK_MEMTYPE);
 	}
 	/*
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cc904d..4834c46 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -343,19 +343,25 @@
 	if (ret)
 		goto out;
 
+	/*
+	 * Single TTM move. NOP.
+	 */
 	if (old_iomap == NULL && new_iomap == NULL)
 		goto out2;
+
+	/*
+	 * Move nonexistent data. NOP.
+	 */
 	if (old_iomap == NULL && ttm == NULL)
 		goto out2;
 
-	if (ttm->state == tt_unpopulated) {
+	/*
+	 * TTM might be null for moves within the same region.
+	 */
+	if (ttm && ttm->state == tt_unpopulated) {
 		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
-		if (ret) {
-			/* if we fail here don't nuke the mm node
-			 * as the bo still owns it */
-			old_copy.mm_node = NULL;
+		if (ret)
 			goto out1;
-		}
 	}
 
 	add = 0;
@@ -381,11 +387,8 @@
 						   prot);
 		} else
 			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
-		if (ret) {
-			/* failing here, means keep old copy as-is */
-			old_copy.mm_node = NULL;
+		if (ret)
 			goto out1;
-		}
 	}
 	mb();
 out2:
@@ -403,7 +406,12 @@
 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
 out:
 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
-	ttm_bo_mem_put(bo, &old_copy);
+
+	/*
+	 * On error, keep the mm node!
+	 */
+	if (!ret)
+		ttm_bo_mem_put(bo, &old_copy);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_move_memcpy);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1006c15..c03514b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -41,6 +41,51 @@
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
+static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+				struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	int ret = 0;
+
+	spin_lock(&bdev->fence_lock);
+	if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+		goto out_unlock;
+
+	/*
+	 * Quick non-stalling check for idle.
+	 */
+	ret = ttm_bo_wait(bo, false, false, true);
+	if (likely(ret == 0))
+		goto out_unlock;
+
+	/*
+	 * If possible, avoid waiting for GPU with mmap_sem
+	 * held.
+	 */
+	if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
+		ret = VM_FAULT_RETRY;
+		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+			goto out_unlock;
+
+		up_read(&vma->vm_mm->mmap_sem);
+		(void) ttm_bo_wait(bo, false, true, false);
+		goto out_unlock;
+	}
+
+	/*
+	 * Ordinary wait.
+	 */
+	ret = ttm_bo_wait(bo, false, true, false);
+	if (unlikely(ret != 0))
+		ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+			VM_FAULT_NOPAGE;
+
+out_unlock:
+	spin_unlock(&bdev->fence_lock);
+	return ret;
+}
+
 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -91,18 +136,11 @@
 	 * Wait for buffer data in transit, due to a pipelined
 	 * move.
 	 */
-
-	spin_lock(&bdev->fence_lock);
-	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
-		ret = ttm_bo_wait(bo, false, true, false);
-		spin_unlock(&bdev->fence_lock);
-		if (unlikely(ret != 0)) {
-			retval = (ret != -ERESTARTSYS) ?
-			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
-			goto out_unlock;
-		}
-	} else
-		spin_unlock(&bdev->fence_lock);
+	ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
+	if (unlikely(ret != 0)) {
+		retval = ret;
+		goto out_unlock;
+	}
 
 	ret = ttm_mem_io_lock(man, true);
 	if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7957bee..fb8259f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,6 +33,7 @@
  *   when freed).
  */
 
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 #define pr_fmt(fmt) "[TTM] " fmt
 
 #include <linux/dma-mapping.h>
@@ -1142,3 +1143,5 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
+
+#endif
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 6222af1..f025286 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -8,6 +8,7 @@
 	select FB_SYS_IMAGEBLIT
 	select FB_DEFERRED_IO
 	select DRM_KMS_HELPER
+        select DRM_KMS_FB_HELPER
 	help
 	  This is a KMS driver for the USB displaylink video adapters.
           Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 7650dc0..3ddd6cd 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -77,7 +77,6 @@
 	.unload = udl_driver_unload,
 
 	/* gem hooks */
-	.gem_init_object = udl_gem_init_object,
 	.gem_free_object = udl_gem_free_object,
 	.gem_vm_ops = &udl_gem_vm_ops,
 
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 56aec94..1fbf7b3 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -115,7 +115,6 @@
 int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
 		 uint32_t handle, uint64_t *offset);
 
-int udl_gem_init_object(struct drm_gem_object *obj);
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
 					    size_t size);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8bf6461..24ffbe9 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,13 +107,6 @@
 	}
 }
 
-int udl_gem_init_object(struct drm_gem_object *obj)
-{
-	BUG();
-
-	return 0;
-}
-
 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
 {
 	struct page **pages;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 7e3ad87..9278891 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -79,7 +79,7 @@
 
 	/* Linux specific until context tracking code gets ported to BSD */
 	/* Last context, perform cleanup */
-	if (dev->ctx_count == 1 && dev->dev_private) {
+	if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
 		DRM_DEBUG("Last Context\n");
 		drm_irq_uninstall(dev);
 		via_cleanup_futex(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 96dc84d..7776e6f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -141,37 +141,374 @@
 };
 
 struct vmw_ttm_tt {
-	struct ttm_tt ttm;
+	struct ttm_dma_tt dma_ttm;
 	struct vmw_private *dev_priv;
 	int gmr_id;
+	struct sg_table sgt;
+	struct vmw_sg_table vsgt;
+	uint64_t sg_alloc_size;
+	bool mapped;
 };
 
+/**
+ * Helper functions to advance a struct vmw_piter iterator.
+ *
+ * @viter: Pointer to the iterator.
+ *
+ * These functions return false if past the end of the list,
+ * true otherwise. Functions are selected depending on the current
+ * DMA mapping mode.
+ */
+static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
+{
+	return ++(viter->i) < viter->num_pages;
+}
+
+static bool __vmw_piter_sg_next(struct vmw_piter *viter)
+{
+	return __sg_page_iter_next(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return a pointer to the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
+{
+	return viter->pages[viter->i];
+}
+
+static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
+{
+	return sg_page_iter_page(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return the DMA address of the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
+{
+	return page_to_phys(viter->pages[viter->i]);
+}
+
+static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+	return viter->addrs[viter->i];
+}
+
+static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
+{
+	return sg_page_iter_dma_address(&viter->iter);
+}
+
+
+/**
+ * vmw_piter_start - Initialize a struct vmw_piter.
+ *
+ * @viter: Pointer to the iterator to initialize
+ * @vsgt: Pointer to a struct vmw_sg_table to initialize from
+ *
+ * Note that we're following the convention of __sg_page_iter_start, so that
+ * the iterator doesn't point to a valid page after initialization; it has
+ * to be advanced one step first.
+ */
+void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
+		     unsigned long p_offset)
+{
+	viter->i = p_offset - 1;
+	viter->num_pages = vsgt->num_pages;
+	switch (vsgt->mode) {
+	case vmw_dma_phys:
+		viter->next = &__vmw_piter_non_sg_next;
+		viter->dma_address = &__vmw_piter_phys_addr;
+		viter->page = &__vmw_piter_non_sg_page;
+		viter->pages = vsgt->pages;
+		break;
+	case vmw_dma_alloc_coherent:
+		viter->next = &__vmw_piter_non_sg_next;
+		viter->dma_address = &__vmw_piter_dma_addr;
+		viter->page = &__vmw_piter_non_sg_page;
+		viter->addrs = vsgt->addrs;
+		break;
+	case vmw_dma_map_populate:
+	case vmw_dma_map_bind:
+		viter->next = &__vmw_piter_sg_next;
+		viter->dma_address = &__vmw_piter_sg_addr;
+		viter->page = &__vmw_piter_sg_page;
+		__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
+				     vsgt->sgt->orig_nents, p_offset);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
+ * TTM pages
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
+ */
+static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct device *dev = vmw_tt->dev_priv->dev->dev;
+
+	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
+		DMA_BIDIRECTIONAL);
+	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+}
+
+/**
+ * vmw_ttm_map_for_dma - map TTM pages to get device addresses
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * This function is used to get device addresses from the kernel DMA layer.
+ * However, it's violating the DMA API in that when this operation has been
+ * performed, it's illegal for the CPU to write to the pages without first
+ * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
+ * therefore only legal to call this function if we know that the function
+ * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
+ * a CPU write buffer flush.
+ */
+static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct device *dev = vmw_tt->dev_priv->dev->dev;
+	int ret;
+
+	ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
+			 DMA_BIDIRECTIONAL);
+	if (unlikely(ret == 0))
+		return -ENOMEM;
+
+	vmw_tt->sgt.nents = ret;
+
+	return 0;
+}
+
+/**
+ * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Select the correct function for and make sure the TTM pages are
+ * visible to the device. Allocate storage for the device mappings.
+ * If a mapping has already been performed, indicated by the storage
+ * pointer being non NULL, the function returns success.
+ */
+static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+	struct vmw_piter iter;
+	dma_addr_t old;
+	int ret = 0;
+	static size_t sgl_size;
+	static size_t sgt_size;
+
+	if (vmw_tt->mapped)
+		return 0;
+
+	vsgt->mode = dev_priv->map_mode;
+	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
+	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+	vsgt->sgt = &vmw_tt->sgt;
+
+	switch (dev_priv->map_mode) {
+	case vmw_dma_map_bind:
+	case vmw_dma_map_populate:
+		if (unlikely(!sgl_size)) {
+			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
+			sgt_size = ttm_round_pot(sizeof(struct sg_table));
+		}
+		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
+		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
+					   true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
+						vsgt->num_pages, 0,
+						(unsigned long)
+						vsgt->num_pages << PAGE_SHIFT,
+						GFP_KERNEL);
+		if (unlikely(ret != 0))
+			goto out_sg_alloc_fail;
+
+		if (vsgt->num_pages > vmw_tt->sgt.nents) {
+			uint64_t over_alloc =
+				sgl_size * (vsgt->num_pages -
+					    vmw_tt->sgt.nents);
+
+			ttm_mem_global_free(glob, over_alloc);
+			vmw_tt->sg_alloc_size -= over_alloc;
+		}
+
+		ret = vmw_ttm_map_for_dma(vmw_tt);
+		if (unlikely(ret != 0))
+			goto out_map_fail;
+
+		break;
+	default:
+		break;
+	}
+
+	old = ~((dma_addr_t) 0);
+	vmw_tt->vsgt.num_regions = 0;
+	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
+		dma_addr_t cur = vmw_piter_dma_addr(&iter);
+
+		if (cur != old + PAGE_SIZE)
+			vmw_tt->vsgt.num_regions++;
+		old = cur;
+	}
+
+	vmw_tt->mapped = true;
+	return 0;
+
+out_map_fail:
+	sg_free_table(vmw_tt->vsgt.sgt);
+	vmw_tt->vsgt.sgt = NULL;
+out_sg_alloc_fail:
+	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+	return ret;
+}
+
+/**
+ * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Tear down any previously set up device DMA mappings and free
+ * any storage space allocated for them. If there are no mappings set up,
+ * this function is a NOP.
+ */
+static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
+{
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+
+	if (!vmw_tt->vsgt.sgt)
+		return;
+
+	switch (dev_priv->map_mode) {
+	case vmw_dma_map_bind:
+	case vmw_dma_map_populate:
+		vmw_ttm_unmap_from_dma(vmw_tt);
+		sg_free_table(vmw_tt->vsgt.sgt);
+		vmw_tt->vsgt.sgt = NULL;
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_tt->sg_alloc_size);
+		break;
+	default:
+		break;
+	}
+	vmw_tt->mapped = false;
+}
+
 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
-	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+	int ret;
+
+	ret = vmw_ttm_map_dma(vmw_be);
+	if (unlikely(ret != 0))
+		return ret;
 
 	vmw_be->gmr_id = bo_mem->start;
 
-	return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+	return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
 			    ttm->num_pages, vmw_be->gmr_id);
 }
 
 static int vmw_ttm_unbind(struct ttm_tt *ttm)
 {
-	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
 	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+
+	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
+		vmw_ttm_unmap_dma(vmw_be);
+
 	return 0;
 }
 
 static void vmw_ttm_destroy(struct ttm_tt *ttm)
 {
-	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+	struct vmw_ttm_tt *vmw_be =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
 
-	ttm_tt_fini(ttm);
+	vmw_ttm_unmap_dma(vmw_be);
+	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+		ttm_dma_tt_fini(&vmw_be->dma_ttm);
+	else
+		ttm_tt_fini(ttm);
 	kfree(vmw_be);
 }
 
+static int vmw_ttm_populate(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_tt =
+		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+		size_t size =
+			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+		ret = ttm_mem_global_alloc(glob, size, false, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+		if (unlikely(ret != 0))
+			ttm_mem_global_free(glob, size);
+	} else
+		ret = ttm_pool_populate(ttm);
+
+	return ret;
+}
+
+static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+						 dma_ttm.ttm);
+	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+	vmw_ttm_unmap_dma(vmw_tt);
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+		size_t size =
+			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+
+		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+		ttm_mem_global_free(glob, size);
+	} else
+		ttm_pool_unpopulate(ttm);
+}
+
 static struct ttm_backend_func vmw_ttm_func = {
 	.bind = vmw_ttm_bind,
 	.unbind = vmw_ttm_unbind,
@@ -183,20 +520,28 @@
 				 struct page *dummy_read_page)
 {
 	struct vmw_ttm_tt *vmw_be;
+	int ret;
 
-	vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
 	if (!vmw_be)
 		return NULL;
 
-	vmw_be->ttm.func = &vmw_ttm_func;
+	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
 	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 
-	if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
-		kfree(vmw_be);
-		return NULL;
-	}
+	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
+				      dummy_read_page);
+	else
+		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
+				  dummy_read_page);
+	if (unlikely(ret != 0))
+		goto out_no_init;
 
-	return &vmw_be->ttm;
+	return &vmw_be->dma_ttm.ttm;
+out_no_init:
+	kfree(vmw_be);
+	return NULL;
 }
 
 int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -332,8 +677,8 @@
 
 struct ttm_bo_driver vmw_bo_driver = {
 	.ttm_tt_create = &vmw_ttm_tt_create,
-	.ttm_tt_populate = &ttm_pool_populate,
-	.ttm_tt_unpopulate = &ttm_pool_unpopulate,
+	.ttm_tt_populate = &vmw_ttm_populate,
+	.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
 	.invalidate_caches = vmw_invalidate_caches,
 	.init_mem_type = vmw_init_mem_type,
 	.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0508f93..814665b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -32,6 +32,7 @@
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_module.h>
+#include <linux/dma_remapping.h>
 
 #define VMWGFX_DRIVER_NAME "vmwgfx"
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -185,6 +186,9 @@
 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
 
 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+static int vmw_force_iommu;
+static int vmw_restrict_iommu;
+static int vmw_force_coherent;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -193,6 +197,13 @@
 
 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
+module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
+module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+
 
 static void vmw_print_capabilities(uint32_t capabilities)
 {
@@ -427,12 +438,80 @@
 	dev_priv->initial_height = height;
 }
 
+/**
+ * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
+ * system.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * This functions tries to determine the IOMMU setup and what actions
+ * need to be taken by the driver to make system pages visible to the
+ * device.
+ * If this function decides that DMA is not possible, it returns -EINVAL.
+ * The driver may then try to disable features of the device that require
+ * DMA.
+ */
+static int vmw_dma_select_mode(struct vmw_private *dev_priv)
+{
+	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
+	static const char *names[vmw_dma_map_max] = {
+		[vmw_dma_phys] = "Using physical TTM page addresses.",
+		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
+		[vmw_dma_map_populate] = "Keeping DMA mappings.",
+		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+
+#ifdef CONFIG_INTEL_IOMMU
+	if (intel_iommu_enabled) {
+		dev_priv->map_mode = vmw_dma_map_populate;
+		goto out_fixup;
+	}
+#endif
+
+	if (!(vmw_force_iommu || vmw_force_coherent)) {
+		dev_priv->map_mode = vmw_dma_phys;
+		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+		return 0;
+	}
+
+	dev_priv->map_mode = vmw_dma_map_populate;
+
+	if (dma_ops->sync_single_for_cpu)
+		dev_priv->map_mode = vmw_dma_alloc_coherent;
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl() == 0)
+		dev_priv->map_mode = vmw_dma_map_populate;
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU
+out_fixup:
+#endif
+	if (dev_priv->map_mode == vmw_dma_map_populate &&
+	    vmw_restrict_iommu)
+		dev_priv->map_mode = vmw_dma_map_bind;
+
+	if (vmw_force_coherent)
+		dev_priv->map_mode = vmw_dma_alloc_coherent;
+
+#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
+	/*
+	 * No coherent page pool
+	 */
+	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+		return -EINVAL;
+#endif
+
+	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
+
+	return 0;
+}
+
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {
 	struct vmw_private *dev_priv;
 	int ret;
 	uint32_t svga_id;
 	enum vmw_res_type i;
+	bool refuse_dma = false;
 
 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 	if (unlikely(dev_priv == NULL)) {
@@ -481,6 +560,11 @@
 	}
 
 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+	ret = vmw_dma_select_mode(dev_priv);
+	if (unlikely(ret != 0)) {
+		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+		refuse_dma = true;
+	}
 
 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -558,8 +642,9 @@
 	}
 
 	dev_priv->has_gmr = true;
-	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
-			   dev_priv->max_gmr_ids) != 0) {
+	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
+	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+					 dev_priv->max_gmr_ids) != 0) {
 		DRM_INFO("No GMR memory available. "
 			 "Graphics memory resources are very limited.\n");
 		dev_priv->has_gmr = false;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 150ec64..e401d5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -177,6 +177,58 @@
 	struct vmw_resource_val_node *node;
 };
 
+/**
+ * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
+ */
+enum vmw_dma_map_mode {
+	vmw_dma_phys,           /* Use physical page addresses */
+	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
+	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
+	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
+	vmw_dma_map_max
+};
+
+/**
+ * struct vmw_sg_table - Scatter/gather table for binding, with additional
+ * device-specific information.
+ *
+ * @sgt: Pointer to a struct sg_table with binding information
+ * @num_regions: Number of regions with device-address contigous pages
+ */
+struct vmw_sg_table {
+	enum vmw_dma_map_mode mode;
+	struct page **pages;
+	const dma_addr_t *addrs;
+	struct sg_table *sgt;
+	unsigned long num_regions;
+	unsigned long num_pages;
+};
+
+/**
+ * struct vmw_piter - Page iterator that iterates over a list of pages
+ * and DMA addresses that could be either a scatter-gather list or
+ * arrays
+ *
+ * @pages: Array of page pointers to the pages.
+ * @addrs: DMA addresses to the pages if coherent pages are used.
+ * @iter: Scatter-gather page iterator. Current position in SG list.
+ * @i: Current position in arrays.
+ * @num_pages: Number of pages total.
+ * @next: Function to advance the iterator. Returns false if past the list
+ * of pages, true otherwise.
+ * @dma_address: Function to return the DMA address of the current page.
+ */
+struct vmw_piter {
+	struct page **pages;
+	const dma_addr_t *addrs;
+	struct sg_page_iter iter;
+	unsigned long i;
+	unsigned long num_pages;
+	bool (*next)(struct vmw_piter *);
+	dma_addr_t (*dma_address)(struct vmw_piter *);
+	struct page *(*page)(struct vmw_piter *);
+};
+
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
 	bool res_ht_initialized;
@@ -358,6 +410,11 @@
 
 	struct list_head res_lru[vmw_res_max];
 	uint32_t used_memory_size;
+
+	/*
+	 * DMA mapping stuff.
+	 */
+	enum vmw_dma_map_mode map_mode;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -405,7 +462,7 @@
  */
 
 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
-			struct page *pages[],
+			const struct vmw_sg_table *vsgt,
 			unsigned long num_pages,
 			int gmr_id);
 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
@@ -568,6 +625,45 @@
 extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
+extern void vmw_piter_start(struct vmw_piter *viter,
+			    const struct vmw_sg_table *vsgt,
+			    unsigned long p_offs);
+
+/**
+ * vmw_piter_next - Advance the iterator one page.
+ *
+ * @viter: Pointer to the iterator to advance.
+ *
+ * Returns false if past the list of pages, true otherwise.
+ */
+static inline bool vmw_piter_next(struct vmw_piter *viter)
+{
+	return viter->next(viter);
+}
+
+/**
+ * vmw_piter_dma_addr - Return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+	return viter->dma_address(viter);
+}
+
+/**
+ * vmw_piter_page - Return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * Returns the DMA address of the page pointed to by @viter.
+ */
+static inline struct page *vmw_piter_page(struct vmw_piter *viter)
+{
+	return viter->page(viter);
+}
 
 /**
  * Command submission - vmwgfx_execbuf.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 1a0bf07..6d09523 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -32,9 +32,11 @@
 #define VMW_PPN_SIZE (sizeof(unsigned long))
 /* A future safe maximum remap size. */
 #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+#define DMA_ADDR_INVALID ((dma_addr_t) 0)
+#define DMA_PAGE_INVALID 0UL
 
 static int vmw_gmr2_bind(struct vmw_private *dev_priv,
-			 struct page *pages[],
+			 struct vmw_piter *iter,
 			 unsigned long num_pages,
 			 int gmr_id)
 {
@@ -81,11 +83,13 @@
 
 		for (i = 0; i < nr; ++i) {
 			if (VMW_PPN_SIZE <= 4)
-				*cmd = page_to_pfn(*pages++);
+				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
 			else
-				*((uint64_t *)cmd) = page_to_pfn(*pages++);
+				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
+					PAGE_SHIFT;
 
 			cmd += VMW_PPN_SIZE / sizeof(*cmd);
+			vmw_piter_next(iter);
 		}
 
 		num_pages -= nr;
@@ -120,22 +124,54 @@
 	vmw_fifo_commit(dev_priv, define_size);
 }
 
+
+static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
+				     struct list_head *desc_pages)
+{
+	struct page *page, *next;
+	struct svga_guest_mem_descriptor *page_virtual;
+	unsigned int desc_per_page = PAGE_SIZE /
+		sizeof(struct svga_guest_mem_descriptor) - 1;
+
+	if (list_empty(desc_pages))
+		return;
+
+	list_for_each_entry_safe(page, next, desc_pages, lru) {
+		list_del_init(&page->lru);
+
+		if (likely(desc_dma != DMA_ADDR_INVALID)) {
+			dma_unmap_page(dev, desc_dma, PAGE_SIZE,
+				       DMA_TO_DEVICE);
+		}
+
+		page_virtual = kmap_atomic(page);
+		desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT;
+		kunmap_atomic(page_virtual);
+
+		__free_page(page);
+	}
+}
+
 /**
  * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  * the number of used descriptors.
+ *
  */
 
-static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
-				     struct page *pages[],
-				     unsigned long num_pages)
+static int vmw_gmr_build_descriptors(struct device *dev,
+				     struct list_head *desc_pages,
+				     struct vmw_piter *iter,
+				     unsigned long num_pages,
+				     dma_addr_t *first_dma)
 {
-	struct page *page, *next;
+	struct page *page;
 	struct svga_guest_mem_descriptor *page_virtual = NULL;
 	struct svga_guest_mem_descriptor *desc_virtual = NULL;
 	unsigned int desc_per_page;
 	unsigned long prev_pfn;
 	unsigned long pfn;
 	int ret;
+	dma_addr_t desc_dma;
 
 	desc_per_page = PAGE_SIZE /
 	    sizeof(struct svga_guest_mem_descriptor) - 1;
@@ -148,23 +184,12 @@
 		}
 
 		list_add_tail(&page->lru, desc_pages);
-
-		/*
-		 * Point previous page terminating descriptor to this
-		 * page before unmapping it.
-		 */
-
-		if (likely(page_virtual != NULL)) {
-			desc_virtual->ppn = page_to_pfn(page);
-			kunmap_atomic(page_virtual);
-		}
-
 		page_virtual = kmap_atomic(page);
 		desc_virtual = page_virtual - 1;
 		prev_pfn = ~(0UL);
 
 		while (likely(num_pages != 0)) {
-			pfn = page_to_pfn(*pages);
+			pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
 
 			if (pfn != prev_pfn + 1) {
 
@@ -181,104 +206,81 @@
 			}
 			prev_pfn = pfn;
 			--num_pages;
-			++pages;
+			vmw_piter_next(iter);
 		}
 
-		(++desc_virtual)->ppn = cpu_to_le32(0);
+		(++desc_virtual)->ppn = DMA_PAGE_INVALID;
 		desc_virtual->num_pages = cpu_to_le32(0);
+		kunmap_atomic(page_virtual);
 	}
 
-	if (likely(page_virtual != NULL))
+	desc_dma = 0;
+	list_for_each_entry_reverse(page, desc_pages, lru) {
+		page_virtual = kmap_atomic(page);
+		page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT;
 		kunmap_atomic(page_virtual);
+		desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
+					DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(dev, desc_dma)))
+			goto out_err;
+	}
+	*first_dma = desc_dma;
 
 	return 0;
 out_err:
-	list_for_each_entry_safe(page, next, desc_pages, lru) {
-		list_del_init(&page->lru);
-		__free_page(page);
-	}
+	vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
 	return ret;
 }
 
-static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
-{
-	struct page *page, *next;
-
-	list_for_each_entry_safe(page, next, desc_pages, lru) {
-		list_del_init(&page->lru);
-		__free_page(page);
-	}
-}
-
 static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
-				     int gmr_id, struct list_head *desc_pages)
+				     int gmr_id, dma_addr_t desc_dma)
 {
-	struct page *page;
-
-	if (unlikely(list_empty(desc_pages)))
-		return;
-
-	page = list_entry(desc_pages->next, struct page, lru);
-
 	mutex_lock(&dev_priv->hw_mutex);
 
 	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
 	wmb();
-	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
 	mb();
 
 	mutex_unlock(&dev_priv->hw_mutex);
 
 }
 
-/**
- * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
- * the number of used descriptors.
- */
-
-static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
-					unsigned long num_pages)
-{
-	unsigned long prev_pfn = ~(0UL);
-	unsigned long pfn;
-	unsigned long descriptors = 0;
-
-	while (num_pages--) {
-		pfn = page_to_pfn(*pages++);
-		if (prev_pfn + 1 != pfn)
-			++descriptors;
-		prev_pfn = pfn;
-	}
-
-	return descriptors;
-}
-
 int vmw_gmr_bind(struct vmw_private *dev_priv,
-		 struct page *pages[],
+		 const struct vmw_sg_table *vsgt,
 		 unsigned long num_pages,
 		 int gmr_id)
 {
 	struct list_head desc_pages;
+	dma_addr_t desc_dma = 0;
+	struct device *dev = dev_priv->dev->dev;
+	struct vmw_piter data_iter;
 	int ret;
 
+	vmw_piter_start(&data_iter, vsgt, 0);
+
+	if (unlikely(!vmw_piter_next(&data_iter)))
+		return 0;
+
 	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
-		return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
+		return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
 
 	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
 		return -EINVAL;
 
-	if (vmw_gmr_count_descriptors(pages, num_pages) >
-	    dev_priv->max_gmr_descriptors)
+	if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
 		return -EINVAL;
 
 	INIT_LIST_HEAD(&desc_pages);
 
-	ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
+	ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
+					num_pages, &desc_dma);
 	if (unlikely(ret != 0))
 		return ret;
 
-	vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
-	vmw_gmr_free_descriptors(&desc_pages);
+	vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
+	vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index c509d40..a51f48e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -168,7 +168,7 @@
 	fb = drm_framebuffer_lookup(dev, arg->fb_id);
 	if (!fb) {
 		DRM_ERROR("Invalid framebuffer id.\n");
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_no_fb;
 	}
 	vfb = vmw_framebuffer_to_vfb(fb);
@@ -252,7 +252,7 @@
 	fb = drm_framebuffer_lookup(dev, arg->fb_id);
 	if (!fb) {
 		DRM_ERROR("Invalid framebuffer id.\n");
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out_no_fb;
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index fc43c06..ecb3d86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1508,7 +1508,7 @@
 
 	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
 	if (!obj) {
-		ret = -EINVAL;
+		ret = -ENOENT;
 		goto out;
 	}
 
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index ccfd42b..7d6bed22 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -19,6 +19,4 @@
 
 	  If unsure, choose Y.
 
-source "drivers/gpu/host1x/drm/Kconfig"
-
 endif
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 3b037b6..afa1e9e 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,6 +1,5 @@
-ccflags-y = -Idrivers/gpu/host1x
-
 host1x-y = \
+	bus.o \
 	syncpt.o \
 	dev.o \
 	intr.o \
@@ -8,13 +7,7 @@
 	channel.o \
 	job.o \
 	debug.o \
-	hw/host1x01.o
+	hw/host1x01.o \
+	hw/host1x02.o
 
-ccflags-y += -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
-
-host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644
index 0000000..509383f
--- /dev/null
+++ b/drivers/gpu/host1x/bus.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/host1x.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+static DEFINE_MUTEX(clients_lock);
+static LIST_HEAD(clients);
+
+static DEFINE_MUTEX(drivers_lock);
+static LIST_HEAD(drivers);
+
+static DEFINE_MUTEX(devices_lock);
+static LIST_HEAD(devices);
+
+struct host1x_subdev {
+	struct host1x_client *client;
+	struct device_node *np;
+	struct list_head list;
+};
+
+/**
+ * host1x_subdev_add() - add a new subdevice with an associated device node
+ */
+static int host1x_subdev_add(struct host1x_device *device,
+			     struct device_node *np)
+{
+	struct host1x_subdev *subdev;
+
+	subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
+	if (!subdev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&subdev->list);
+	subdev->np = of_node_get(np);
+
+	mutex_lock(&device->subdevs_lock);
+	list_add_tail(&subdev->list, &device->subdevs);
+	mutex_unlock(&device->subdevs_lock);
+
+	return 0;
+}
+
+/**
+ * host1x_subdev_del() - remove subdevice
+ */
+static void host1x_subdev_del(struct host1x_subdev *subdev)
+{
+	list_del(&subdev->list);
+	of_node_put(subdev->np);
+	kfree(subdev);
+}
+
+/**
+ * host1x_device_parse_dt() - scan device tree and add matching subdevices
+ */
+static int host1x_device_parse_dt(struct host1x_device *device)
+{
+	struct device_node *np;
+	int err;
+
+	for_each_child_of_node(device->dev.parent->of_node, np) {
+		if (of_match_node(device->driver->subdevs, np) &&
+		    of_device_is_available(np)) {
+			err = host1x_subdev_add(device, np);
+			if (err < 0)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
+static void host1x_subdev_register(struct host1x_device *device,
+				   struct host1x_subdev *subdev,
+				   struct host1x_client *client)
+{
+	int err;
+
+	/*
+	 * Move the subdevice to the list of active (registered) subdevices
+	 * and associate it with a client. At the same time, associate the
+	 * client with its parent device.
+	 */
+	mutex_lock(&device->subdevs_lock);
+	mutex_lock(&device->clients_lock);
+	list_move_tail(&client->list, &device->clients);
+	list_move_tail(&subdev->list, &device->active);
+	client->parent = &device->dev;
+	subdev->client = client;
+	mutex_unlock(&device->clients_lock);
+	mutex_unlock(&device->subdevs_lock);
+
+	/*
+	 * When all subdevices have been registered, the composite device is
+	 * ready to be probed.
+	 */
+	if (list_empty(&device->subdevs)) {
+		err = device->driver->probe(device);
+		if (err < 0)
+			dev_err(&device->dev, "probe failed: %d\n", err);
+	}
+}
+
+static void __host1x_subdev_unregister(struct host1x_device *device,
+				       struct host1x_subdev *subdev)
+{
+	struct host1x_client *client = subdev->client;
+	int err;
+
+	/*
+	 * If all subdevices have been activated, we're about to remove the
+	 * first active subdevice, so unload the driver first.
+	 */
+	if (list_empty(&device->subdevs)) {
+		err = device->driver->remove(device);
+		if (err < 0)
+			dev_err(&device->dev, "remove failed: %d\n", err);
+	}
+
+	/*
+	 * Move the subdevice back to the list of idle subdevices and remove
+	 * it from list of clients.
+	 */
+	mutex_lock(&device->clients_lock);
+	subdev->client = NULL;
+	client->parent = NULL;
+	list_move_tail(&subdev->list, &device->subdevs);
+	/*
+	 * XXX: Perhaps don't do this here, but rather explicitly remove it
+	 * when the device is about to be deleted.
+	 *
+	 * This is somewhat complicated by the fact that this function is
+	 * used to remove the subdevice when a client is unregistered but
+	 * also when the composite device is about to be removed.
+	 */
+	list_del_init(&client->list);
+	mutex_unlock(&device->clients_lock);
+}
+
+static void host1x_subdev_unregister(struct host1x_device *device,
+				     struct host1x_subdev *subdev)
+{
+	mutex_lock(&device->subdevs_lock);
+	__host1x_subdev_unregister(device, subdev);
+	mutex_unlock(&device->subdevs_lock);
+}
+
+int host1x_device_init(struct host1x_device *device)
+{
+	struct host1x_client *client;
+	int err;
+
+	mutex_lock(&device->clients_lock);
+
+	list_for_each_entry(client, &device->clients, list) {
+		if (client->ops && client->ops->init) {
+			err = client->ops->init(client);
+			if (err < 0) {
+				dev_err(&device->dev,
+					"failed to initialize %s: %d\n",
+					dev_name(client->dev), err);
+				mutex_unlock(&device->clients_lock);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&device->clients_lock);
+
+	return 0;
+}
+
+int host1x_device_exit(struct host1x_device *device)
+{
+	struct host1x_client *client;
+	int err;
+
+	mutex_lock(&device->clients_lock);
+
+	list_for_each_entry_reverse(client, &device->clients, list) {
+		if (client->ops && client->ops->exit) {
+			err = client->ops->exit(client);
+			if (err < 0) {
+				dev_err(&device->dev,
+					"failed to cleanup %s: %d\n",
+					dev_name(client->dev), err);
+				mutex_unlock(&device->clients_lock);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&device->clients_lock);
+
+	return 0;
+}
+
+static int host1x_register_client(struct host1x *host1x,
+				  struct host1x_client *client)
+{
+	struct host1x_device *device;
+	struct host1x_subdev *subdev;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry(device, &host1x->devices, list) {
+		list_for_each_entry(subdev, &device->subdevs, list) {
+			if (subdev->np == client->dev->of_node) {
+				host1x_subdev_register(device, subdev, client);
+				mutex_unlock(&host1x->devices_lock);
+				return 0;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->devices_lock);
+	return -ENODEV;
+}
+
+static int host1x_unregister_client(struct host1x *host1x,
+				    struct host1x_client *client)
+{
+	struct host1x_device *device, *dt;
+	struct host1x_subdev *subdev;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry_safe(device, dt, &host1x->devices, list) {
+		list_for_each_entry(subdev, &device->active, list) {
+			if (subdev->client == client) {
+				host1x_subdev_unregister(device, subdev);
+				mutex_unlock(&host1x->devices_lock);
+				return 0;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->devices_lock);
+	return -ENODEV;
+}
+
+struct bus_type host1x_bus_type = {
+	.name = "host1x",
+};
+
+int host1x_bus_init(void)
+{
+	return bus_register(&host1x_bus_type);
+}
+
+void host1x_bus_exit(void)
+{
+	bus_unregister(&host1x_bus_type);
+}
+
+static void host1x_device_release(struct device *dev)
+{
+	struct host1x_device *device = to_host1x_device(dev);
+
+	kfree(device);
+}
+
+static int host1x_device_add(struct host1x *host1x,
+			     struct host1x_driver *driver)
+{
+	struct host1x_client *client, *tmp;
+	struct host1x_subdev *subdev;
+	struct host1x_device *device;
+	int err;
+
+	device = kzalloc(sizeof(*device), GFP_KERNEL);
+	if (!device)
+		return -ENOMEM;
+
+	mutex_init(&device->subdevs_lock);
+	INIT_LIST_HEAD(&device->subdevs);
+	INIT_LIST_HEAD(&device->active);
+	mutex_init(&device->clients_lock);
+	INIT_LIST_HEAD(&device->clients);
+	INIT_LIST_HEAD(&device->list);
+	device->driver = driver;
+
+	device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
+	device->dev.dma_mask = &device->dev.coherent_dma_mask;
+	device->dev.release = host1x_device_release;
+	dev_set_name(&device->dev, driver->name);
+	device->dev.bus = &host1x_bus_type;
+	device->dev.parent = host1x->dev;
+
+	err = device_register(&device->dev);
+	if (err < 0)
+		return err;
+
+	err = host1x_device_parse_dt(device);
+	if (err < 0) {
+		device_unregister(&device->dev);
+		return err;
+	}
+
+	mutex_lock(&host1x->devices_lock);
+	list_add_tail(&device->list, &host1x->devices);
+	mutex_unlock(&host1x->devices_lock);
+
+	mutex_lock(&clients_lock);
+
+	list_for_each_entry_safe(client, tmp, &clients, list) {
+		list_for_each_entry(subdev, &device->subdevs, list) {
+			if (subdev->np == client->dev->of_node) {
+				host1x_subdev_register(device, subdev, client);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&clients_lock);
+
+	return 0;
+}
+
+/*
+ * Removes a device by first unregistering any subdevices and then removing
+ * itself from the list of devices.
+ *
+ * This function must be called with the host1x->devices_lock held.
+ */
+static void host1x_device_del(struct host1x *host1x,
+			      struct host1x_device *device)
+{
+	struct host1x_subdev *subdev, *sd;
+	struct host1x_client *client, *cl;
+
+	mutex_lock(&device->subdevs_lock);
+
+	/* unregister subdevices */
+	list_for_each_entry_safe(subdev, sd, &device->active, list) {
+		/*
+		 * host1x_subdev_unregister() will remove the client from
+		 * any lists, so we'll need to manually add it back to the
+		 * list of idle clients.
+		 *
+		 * XXX: Alternatively, perhaps don't remove the client from
+		 * any lists in host1x_subdev_unregister() and instead do
+		 * that explicitly from host1x_unregister_client()?
+		 */
+		client = subdev->client;
+
+		__host1x_subdev_unregister(device, subdev);
+
+		/* add the client to the list of idle clients */
+		mutex_lock(&clients_lock);
+		list_add_tail(&client->list, &clients);
+		mutex_unlock(&clients_lock);
+	}
+
+	/* remove subdevices */
+	list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
+		host1x_subdev_del(subdev);
+
+	mutex_unlock(&device->subdevs_lock);
+
+	/* move clients to idle list */
+	mutex_lock(&clients_lock);
+	mutex_lock(&device->clients_lock);
+
+	list_for_each_entry_safe(client, cl, &device->clients, list)
+		list_move_tail(&client->list, &clients);
+
+	mutex_unlock(&device->clients_lock);
+	mutex_unlock(&clients_lock);
+
+	/* finally remove the device */
+	list_del_init(&device->list);
+	device_unregister(&device->dev);
+}
+
+static void host1x_attach_driver(struct host1x *host1x,
+				 struct host1x_driver *driver)
+{
+	struct host1x_device *device;
+	int err;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry(device, &host1x->devices, list) {
+		if (device->driver == driver) {
+			mutex_unlock(&host1x->devices_lock);
+			return;
+		}
+	}
+
+	mutex_unlock(&host1x->devices_lock);
+
+	err = host1x_device_add(host1x, driver);
+	if (err < 0)
+		dev_err(host1x->dev, "failed to allocate device: %d\n", err);
+}
+
+static void host1x_detach_driver(struct host1x *host1x,
+				 struct host1x_driver *driver)
+{
+	struct host1x_device *device, *tmp;
+
+	mutex_lock(&host1x->devices_lock);
+
+	list_for_each_entry_safe(device, tmp, &host1x->devices, list)
+		if (device->driver == driver)
+			host1x_device_del(host1x, device);
+
+	mutex_unlock(&host1x->devices_lock);
+}
+
+int host1x_register(struct host1x *host1x)
+{
+	struct host1x_driver *driver;
+
+	mutex_lock(&devices_lock);
+	list_add_tail(&host1x->list, &devices);
+	mutex_unlock(&devices_lock);
+
+	mutex_lock(&drivers_lock);
+
+	list_for_each_entry(driver, &drivers, list)
+		host1x_attach_driver(host1x, driver);
+
+	mutex_unlock(&drivers_lock);
+
+	return 0;
+}
+
+int host1x_unregister(struct host1x *host1x)
+{
+	struct host1x_driver *driver;
+
+	mutex_lock(&drivers_lock);
+
+	list_for_each_entry(driver, &drivers, list)
+		host1x_detach_driver(host1x, driver);
+
+	mutex_unlock(&drivers_lock);
+
+	mutex_lock(&devices_lock);
+	list_del_init(&host1x->list);
+	mutex_unlock(&devices_lock);
+
+	return 0;
+}
+
+int host1x_driver_register(struct host1x_driver *driver)
+{
+	struct host1x *host1x;
+
+	INIT_LIST_HEAD(&driver->list);
+
+	mutex_lock(&drivers_lock);
+	list_add_tail(&driver->list, &drivers);
+	mutex_unlock(&drivers_lock);
+
+	mutex_lock(&devices_lock);
+
+	list_for_each_entry(host1x, &devices, list)
+		host1x_attach_driver(host1x, driver);
+
+	mutex_unlock(&devices_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(host1x_driver_register);
+
+void host1x_driver_unregister(struct host1x_driver *driver)
+{
+	mutex_lock(&drivers_lock);
+	list_del_init(&driver->list);
+	mutex_unlock(&drivers_lock);
+}
+EXPORT_SYMBOL(host1x_driver_unregister);
+
+int host1x_client_register(struct host1x_client *client)
+{
+	struct host1x *host1x;
+	int err;
+
+	mutex_lock(&devices_lock);
+
+	list_for_each_entry(host1x, &devices, list) {
+		err = host1x_register_client(host1x, client);
+		if (!err) {
+			mutex_unlock(&devices_lock);
+			return 0;
+		}
+	}
+
+	mutex_unlock(&devices_lock);
+
+	mutex_lock(&clients_lock);
+	list_add_tail(&client->list, &clients);
+	mutex_unlock(&clients_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(host1x_client_register);
+
+int host1x_client_unregister(struct host1x_client *client)
+{
+	struct host1x_client *c;
+	struct host1x *host1x;
+	int err;
+
+	mutex_lock(&devices_lock);
+
+	list_for_each_entry(host1x, &devices, list) {
+		err = host1x_unregister_client(host1x, client);
+		if (!err) {
+			mutex_unlock(&devices_lock);
+			return 0;
+		}
+	}
+
+	mutex_unlock(&devices_lock);
+	mutex_lock(&clients_lock);
+
+	list_for_each_entry(c, &clients, list) {
+		if (c == client) {
+			list_del_init(&c->list);
+			break;
+		}
+	}
+
+	mutex_unlock(&clients_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(host1x_client_unregister);
diff --git a/drivers/gpu/host1x/host1x_client.h b/drivers/gpu/host1x/bus.h
similarity index 60%
rename from drivers/gpu/host1x/host1x_client.h
rename to drivers/gpu/host1x/bus.h
index 9b85f10..4099e99 100644
--- a/drivers/gpu/host1x/host1x_client.h
+++ b/drivers/gpu/host1x/bus.h
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -14,22 +15,15 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef HOST1X_CLIENT_H
-#define HOST1X_CLIENT_H
+#ifndef HOST1X_BUS_H
+#define HOST1X_BUS_H
 
-struct device;
-struct platform_device;
+struct host1x;
 
-#ifdef CONFIG_DRM_TEGRA
-int host1x_drm_alloc(struct platform_device *pdev);
-#else
-static inline int host1x_drm_alloc(struct platform_device *pdev)
-{
-	return 0;
-}
-#endif
+int host1x_bus_init(void);
+void host1x_bus_exit(void);
 
-void host1x_set_drm_data(struct device *dev, void *data);
-void *host1x_get_drm_data(struct device *dev);
+int host1x_register(struct host1x *host1x);
+int host1x_unregister(struct host1x *host1x);
 
 #endif
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index de72172..3995255 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/host1x.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/kfifo.h>
@@ -30,7 +31,6 @@
 #include "channel.h"
 #include "dev.h"
 #include "debug.h"
-#include "host1x_bo.h"
 #include "job.h"
 
 /*
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 48723b8..df767cf 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -40,12 +40,6 @@
 /* channel list operations */
 int host1x_channel_list_init(struct host1x *host);
 
-struct host1x_channel *host1x_channel_request(struct device *dev);
-void host1x_channel_free(struct host1x_channel *channel);
-struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
-void host1x_channel_put(struct host1x_channel *channel);
-int host1x_job_submit(struct host1x_job *job);
-
 #define host1x_for_each_channel(host, channel)				\
 	list_for_each_entry(channel, &host->chlist.list, list)
 
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 4716302..80da003 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -27,24 +27,13 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/host1x.h>
 
+#include "bus.h"
 #include "dev.h"
 #include "intr.h"
 #include "channel.h"
 #include "debug.h"
 #include "hw/host1x01.h"
-#include "host1x_client.h"
-
-void host1x_set_drm_data(struct device *dev, void *data)
-{
-	struct host1x *host1x = dev_get_drvdata(dev);
-	host1x->drm_data = data;
-}
-
-void *host1x_get_drm_data(struct device *dev)
-{
-	struct host1x *host1x = dev_get_drvdata(dev);
-	return host1x ? host1x->drm_data : NULL;
-}
+#include "hw/host1x02.h"
 
 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -79,7 +68,17 @@
 	.sync_offset	= 0x3000,
 };
 
+static const struct host1x_info host1x02_info = {
+	.nb_channels = 9,
+	.nb_pts = 32,
+	.nb_mlocks = 16,
+	.nb_bases = 12,
+	.init = host1x02_init,
+	.sync_offset = 0x3000,
+};
+
 static struct of_device_id host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
 	{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
 	{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
 	{ },
@@ -114,6 +113,9 @@
 	if (!host)
 		return -ENOMEM;
 
+	mutex_init(&host->devices_lock);
+	INIT_LIST_HEAD(&host->devices);
+	INIT_LIST_HEAD(&host->list);
 	host->dev = &pdev->dev;
 	host->info = id->data;
 
@@ -152,7 +154,7 @@
 	err = host1x_syncpt_init(host);
 	if (err) {
 		dev_err(&pdev->dev, "failed to initialize syncpts\n");
-		return err;
+		goto fail_unprepare_disable;
 	}
 
 	err = host1x_intr_init(host, syncpt_irq);
@@ -163,19 +165,26 @@
 
 	host1x_debug_init(host);
 
-	host1x_drm_alloc(pdev);
+	err = host1x_register(host);
+	if (err < 0)
+		goto fail_deinit_intr;
 
 	return 0;
 
+fail_deinit_intr:
+	host1x_intr_deinit(host);
 fail_deinit_syncpt:
 	host1x_syncpt_deinit(host);
+fail_unprepare_disable:
+	clk_disable_unprepare(host->clk);
 	return err;
 }
 
-static int __exit host1x_remove(struct platform_device *pdev)
+static int host1x_remove(struct platform_device *pdev)
 {
 	struct host1x *host = platform_get_drvdata(pdev);
 
+	host1x_unregister(host);
 	host1x_intr_deinit(host);
 	host1x_syncpt_deinit(host);
 	clk_disable_unprepare(host->clk);
@@ -184,59 +193,36 @@
 }
 
 static struct platform_driver tegra_host1x_driver = {
-	.probe = host1x_probe,
-	.remove = __exit_p(host1x_remove),
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "tegra-host1x",
 		.of_match_table = host1x_of_match,
 	},
+	.probe = host1x_probe,
+	.remove = host1x_remove,
 };
 
 static int __init tegra_host1x_init(void)
 {
 	int err;
 
-	err = platform_driver_register(&tegra_host1x_driver);
+	err = host1x_bus_init();
 	if (err < 0)
 		return err;
 
-#ifdef CONFIG_DRM_TEGRA
-	err = platform_driver_register(&tegra_dc_driver);
-	if (err < 0)
-		goto unregister_host1x;
-
-	err = platform_driver_register(&tegra_hdmi_driver);
-	if (err < 0)
-		goto unregister_dc;
-
-	err = platform_driver_register(&tegra_gr2d_driver);
-	if (err < 0)
-		goto unregister_hdmi;
-#endif
+	err = platform_driver_register(&tegra_host1x_driver);
+	if (err < 0) {
+		host1x_bus_exit();
+		return err;
+	}
 
 	return 0;
-
-#ifdef CONFIG_DRM_TEGRA
-unregister_hdmi:
-	platform_driver_unregister(&tegra_hdmi_driver);
-unregister_dc:
-	platform_driver_unregister(&tegra_dc_driver);
-unregister_host1x:
-	platform_driver_unregister(&tegra_host1x_driver);
-	return err;
-#endif
 }
 module_init(tegra_host1x_init);
 
 static void __exit tegra_host1x_exit(void)
 {
-#ifdef CONFIG_DRM_TEGRA
-	platform_driver_unregister(&tegra_gr2d_driver);
-	platform_driver_unregister(&tegra_hdmi_driver);
-	platform_driver_unregister(&tegra_dc_driver);
-#endif
 	platform_driver_unregister(&tegra_host1x_driver);
+	host1x_bus_exit();
 }
 module_exit(tegra_host1x_exit);
 
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index bed90a8..a61a976 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -27,6 +27,7 @@
 #include "job.h"
 
 struct host1x_syncpt;
+struct host1x_syncpt_base;
 struct host1x_channel;
 struct host1x_cdma;
 struct host1x_job;
@@ -102,6 +103,7 @@
 
 	void __iomem *regs;
 	struct host1x_syncpt *syncpt;
+	struct host1x_syncpt_base *bases;
 	struct device *dev;
 	struct clk *clk;
 
@@ -125,7 +127,10 @@
 
 	struct dentry *debugfs;
 
-	void *drm_data;
+	struct mutex devices_lock;
+	struct list_head devices;
+
+	struct list_head list;
 };
 
 void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +306,4 @@
 	host->debug_op->show_mlocks(host, o);
 }
 
-extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_gr2d_driver;
-
 #endif
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
deleted file mode 100644
index 8c61cee..0000000
--- a/drivers/gpu/host1x/drm/drm.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include <drm/drm.h>
-#include <drm/drmP.h>
-
-#include "host1x_client.h"
-#include "dev.h"
-#include "drm.h"
-#include "gem.h"
-#include "syncpt.h"
-
-#define DRIVER_NAME "tegra"
-#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-struct host1x_drm_client {
-	struct host1x_client *client;
-	struct device_node *np;
-	struct list_head list;
-};
-
-static int host1x_add_drm_client(struct host1x_drm *host1x,
-				 struct device_node *np)
-{
-	struct host1x_drm_client *client;
-
-	client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (!client)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&client->list);
-	client->np = of_node_get(np);
-
-	list_add_tail(&client->list, &host1x->drm_clients);
-
-	return 0;
-}
-
-static int host1x_activate_drm_client(struct host1x_drm *host1x,
-				      struct host1x_drm_client *drm,
-				      struct host1x_client *client)
-{
-	mutex_lock(&host1x->drm_clients_lock);
-	list_del_init(&drm->list);
-	list_add_tail(&drm->list, &host1x->drm_active);
-	drm->client = client;
-	mutex_unlock(&host1x->drm_clients_lock);
-
-	return 0;
-}
-
-static int host1x_remove_drm_client(struct host1x_drm *host1x,
-				    struct host1x_drm_client *client)
-{
-	mutex_lock(&host1x->drm_clients_lock);
-	list_del_init(&client->list);
-	mutex_unlock(&host1x->drm_clients_lock);
-
-	of_node_put(client->np);
-	kfree(client);
-
-	return 0;
-}
-
-static int host1x_parse_dt(struct host1x_drm *host1x)
-{
-	static const char * const compat[] = {
-		"nvidia,tegra20-dc",
-		"nvidia,tegra20-hdmi",
-		"nvidia,tegra20-gr2d",
-		"nvidia,tegra30-dc",
-		"nvidia,tegra30-hdmi",
-		"nvidia,tegra30-gr2d",
-	};
-	unsigned int i;
-	int err;
-
-	for (i = 0; i < ARRAY_SIZE(compat); i++) {
-		struct device_node *np;
-
-		for_each_child_of_node(host1x->dev->of_node, np) {
-			if (of_device_is_compatible(np, compat[i]) &&
-			    of_device_is_available(np)) {
-				err = host1x_add_drm_client(host1x, np);
-				if (err < 0)
-					return err;
-			}
-		}
-	}
-
-	return 0;
-}
-
-int host1x_drm_alloc(struct platform_device *pdev)
-{
-	struct host1x_drm *host1x;
-	int err;
-
-	host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
-	if (!host1x)
-		return -ENOMEM;
-
-	mutex_init(&host1x->drm_clients_lock);
-	INIT_LIST_HEAD(&host1x->drm_clients);
-	INIT_LIST_HEAD(&host1x->drm_active);
-	mutex_init(&host1x->clients_lock);
-	INIT_LIST_HEAD(&host1x->clients);
-	host1x->dev = &pdev->dev;
-
-	err = host1x_parse_dt(host1x);
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
-		return err;
-	}
-
-	host1x_set_drm_data(&pdev->dev, host1x);
-
-	return 0;
-}
-
-int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
-{
-	struct host1x_client *client;
-
-	mutex_lock(&host1x->clients_lock);
-
-	list_for_each_entry(client, &host1x->clients, list) {
-		if (client->ops && client->ops->drm_init) {
-			int err = client->ops->drm_init(client, drm);
-			if (err < 0) {
-				dev_err(host1x->dev,
-					"DRM setup failed for %s: %d\n",
-					dev_name(client->dev), err);
-				mutex_unlock(&host1x->clients_lock);
-				return err;
-			}
-		}
-	}
-
-	mutex_unlock(&host1x->clients_lock);
-
-	return 0;
-}
-
-int host1x_drm_exit(struct host1x_drm *host1x)
-{
-	struct platform_device *pdev = to_platform_device(host1x->dev);
-	struct host1x_client *client;
-
-	if (!host1x->drm)
-		return 0;
-
-	mutex_lock(&host1x->clients_lock);
-
-	list_for_each_entry_reverse(client, &host1x->clients, list) {
-		if (client->ops && client->ops->drm_exit) {
-			int err = client->ops->drm_exit(client);
-			if (err < 0) {
-				dev_err(host1x->dev,
-					"DRM cleanup failed for %s: %d\n",
-					dev_name(client->dev), err);
-				mutex_unlock(&host1x->clients_lock);
-				return err;
-			}
-		}
-	}
-
-	mutex_unlock(&host1x->clients_lock);
-
-	drm_platform_exit(&tegra_drm_driver, pdev);
-	host1x->drm = NULL;
-
-	return 0;
-}
-
-int host1x_register_client(struct host1x_drm *host1x,
-			   struct host1x_client *client)
-{
-	struct host1x_drm_client *drm, *tmp;
-	int err;
-
-	mutex_lock(&host1x->clients_lock);
-	list_add_tail(&client->list, &host1x->clients);
-	mutex_unlock(&host1x->clients_lock);
-
-	list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
-		if (drm->np == client->dev->of_node)
-			host1x_activate_drm_client(host1x, drm, client);
-
-	if (list_empty(&host1x->drm_clients)) {
-		struct platform_device *pdev = to_platform_device(host1x->dev);
-
-		err = drm_platform_init(&tegra_drm_driver, pdev);
-		if (err < 0) {
-			dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-int host1x_unregister_client(struct host1x_drm *host1x,
-			     struct host1x_client *client)
-{
-	struct host1x_drm_client *drm, *tmp;
-	int err;
-
-	list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
-		if (drm->client == client) {
-			err = host1x_drm_exit(host1x);
-			if (err < 0) {
-				dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
-					err);
-				return err;
-			}
-
-			host1x_remove_drm_client(host1x, drm);
-			break;
-		}
-	}
-
-	mutex_lock(&host1x->clients_lock);
-	list_del_init(&client->list);
-	mutex_unlock(&host1x->clients_lock);
-
-	return 0;
-}
-
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
-	struct host1x_drm *host1x;
-	int err;
-
-	host1x = host1x_get_drm_data(drm->dev);
-	drm->dev_private = host1x;
-	host1x->drm = drm;
-
-	drm_mode_config_init(drm);
-
-	err = host1x_drm_init(host1x, drm);
-	if (err < 0)
-		return err;
-
-	/*
-	 * We don't use the drm_irq_install() helpers provided by the DRM
-	 * core, so we need to set this manually in order to allow the
-	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
-	 */
-	drm->irq_enabled = 1;
-
-	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
-	if (err < 0)
-		return err;
-
-	err = tegra_drm_fb_init(drm);
-	if (err < 0)
-		return err;
-
-	drm_kms_helper_poll_init(drm);
-
-	return 0;
-}
-
-static int tegra_drm_unload(struct drm_device *drm)
-{
-	drm_kms_helper_poll_fini(drm);
-	tegra_drm_fb_exit(drm);
-
-	drm_mode_config_cleanup(drm);
-
-	return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
-	struct host1x_drm_file *fpriv;
-
-	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-	if (!fpriv)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&fpriv->contexts);
-	filp->driver_priv = fpriv;
-
-	return 0;
-}
-
-static void host1x_drm_context_free(struct host1x_drm_context *context)
-{
-	context->client->ops->close_channel(context);
-	kfree(context);
-}
-
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
-	struct host1x_drm *host1x = drm->dev_private;
-
-	tegra_fbdev_restore_mode(host1x->fbdev);
-}
-
-#ifdef CONFIG_DRM_TEGRA_STAGING
-static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
-					 struct host1x_drm_context *context)
-{
-	struct host1x_drm_context *ctx;
-
-	list_for_each_entry(ctx, &file->contexts, list)
-		if (ctx == context)
-			return true;
-
-	return false;
-}
-
-static int tegra_gem_create(struct drm_device *drm, void *data,
-			    struct drm_file *file)
-{
-	struct drm_tegra_gem_create *args = data;
-	struct tegra_bo *bo;
-
-	bo = tegra_bo_create_with_handle(file, drm, args->size,
-					 &args->handle);
-	if (IS_ERR(bo))
-		return PTR_ERR(bo);
-
-	return 0;
-}
-
-static int tegra_gem_mmap(struct drm_device *drm, void *data,
-			  struct drm_file *file)
-{
-	struct drm_tegra_gem_mmap *args = data;
-	struct drm_gem_object *gem;
-	struct tegra_bo *bo;
-
-	gem = drm_gem_object_lookup(drm, file, args->handle);
-	if (!gem)
-		return -EINVAL;
-
-	bo = to_tegra_bo(gem);
-
-	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
-
-	drm_gem_object_unreference(gem);
-
-	return 0;
-}
-
-static int tegra_syncpt_read(struct drm_device *drm, void *data,
-			     struct drm_file *file)
-{
-	struct drm_tegra_syncpt_read *args = data;
-	struct host1x *host = dev_get_drvdata(drm->dev);
-	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
-	if (!sp)
-		return -EINVAL;
-
-	args->value = host1x_syncpt_read_min(sp);
-	return 0;
-}
-
-static int tegra_syncpt_incr(struct drm_device *drm, void *data,
-			     struct drm_file *file)
-{
-	struct drm_tegra_syncpt_incr *args = data;
-	struct host1x *host = dev_get_drvdata(drm->dev);
-	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
-	if (!sp)
-		return -EINVAL;
-
-	return host1x_syncpt_incr(sp);
-}
-
-static int tegra_syncpt_wait(struct drm_device *drm, void *data,
-			     struct drm_file *file)
-{
-	struct drm_tegra_syncpt_wait *args = data;
-	struct host1x *host = dev_get_drvdata(drm->dev);
-	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
-
-	if (!sp)
-		return -EINVAL;
-
-	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
-				  &args->value);
-}
-
-static int tegra_open_channel(struct drm_device *drm, void *data,
-			      struct drm_file *file)
-{
-	struct drm_tegra_open_channel *args = data;
-	struct host1x_client *client;
-	struct host1x_drm_context *context;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm *host1x = drm->dev_private;
-	int err = -ENODEV;
-
-	context = kzalloc(sizeof(*context), GFP_KERNEL);
-	if (!context)
-		return -ENOMEM;
-
-	list_for_each_entry(client, &host1x->clients, list)
-		if (client->class == args->client) {
-			err = client->ops->open_channel(client, context);
-			if (err)
-				break;
-
-			context->client = client;
-			list_add(&context->list, &fpriv->contexts);
-			args->context = (uintptr_t)context;
-			return 0;
-		}
-
-	kfree(context);
-	return err;
-}
-
-static int tegra_close_channel(struct drm_device *drm, void *data,
-			       struct drm_file *file)
-{
-	struct drm_tegra_close_channel *args = data;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context =
-		(struct host1x_drm_context *)(uintptr_t)args->context;
-
-	if (!host1x_drm_file_owns_context(fpriv, context))
-		return -EINVAL;
-
-	list_del(&context->list);
-	host1x_drm_context_free(context);
-
-	return 0;
-}
-
-static int tegra_get_syncpt(struct drm_device *drm, void *data,
-			    struct drm_file *file)
-{
-	struct drm_tegra_get_syncpt *args = data;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context =
-		(struct host1x_drm_context *)(uintptr_t)args->context;
-	struct host1x_syncpt *syncpt;
-
-	if (!host1x_drm_file_owns_context(fpriv, context))
-		return -ENODEV;
-
-	if (args->index >= context->client->num_syncpts)
-		return -EINVAL;
-
-	syncpt = context->client->syncpts[args->index];
-	args->id = host1x_syncpt_id(syncpt);
-
-	return 0;
-}
-
-static int tegra_submit(struct drm_device *drm, void *data,
-			struct drm_file *file)
-{
-	struct drm_tegra_submit *args = data;
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context =
-		(struct host1x_drm_context *)(uintptr_t)args->context;
-
-	if (!host1x_drm_file_owns_context(fpriv, context))
-		return -ENODEV;
-
-	return context->client->ops->submit(context, args, drm, file);
-}
-#endif
-
-static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
-#ifdef CONFIG_DRM_TEGRA_STAGING
-	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
-#endif
-};
-
-static const struct file_operations tegra_drm_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.release = drm_release,
-	.unlocked_ioctl = drm_ioctl,
-	.mmap = tegra_drm_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = drm_compat_ioctl,
-#endif
-	.llseek = noop_llseek,
-};
-
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
-{
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
-		struct tegra_dc *dc = to_tegra_dc(crtc);
-
-		if (dc->pipe == pipe)
-			return crtc;
-	}
-
-	return NULL;
-}
-
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
-{
-	/* TODO: implement real hardware counter using syncpoints */
-	return drm_vblank_count(dev, crtc);
-}
-
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
-{
-	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-
-	if (!crtc)
-		return -ENODEV;
-
-	tegra_dc_enable_vblank(dc);
-
-	return 0;
-}
-
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
-{
-	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-
-	if (crtc)
-		tegra_dc_disable_vblank(dc);
-}
-
-static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
-{
-	struct host1x_drm_file *fpriv = file->driver_priv;
-	struct host1x_drm_context *context, *tmp;
-	struct drm_crtc *crtc;
-
-	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-		tegra_dc_cancel_page_flip(crtc, file);
-
-	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
-		host1x_drm_context_free(context);
-
-	kfree(fpriv);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
-{
-	struct drm_info_node *node = (struct drm_info_node *)s->private;
-	struct drm_device *drm = node->minor->dev;
-	struct drm_framebuffer *fb;
-
-	mutex_lock(&drm->mode_config.fb_lock);
-
-	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
-		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
-			   fb->base.id, fb->width, fb->height, fb->depth,
-			   fb->bits_per_pixel,
-			   atomic_read(&fb->refcount.refcount));
-	}
-
-	mutex_unlock(&drm->mode_config.fb_lock);
-
-	return 0;
-}
-
-static struct drm_info_list tegra_debugfs_list[] = {
-	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
-};
-
-static int tegra_debugfs_init(struct drm_minor *minor)
-{
-	return drm_debugfs_create_files(tegra_debugfs_list,
-					ARRAY_SIZE(tegra_debugfs_list),
-					minor->debugfs_root, minor);
-}
-
-static void tegra_debugfs_cleanup(struct drm_minor *minor)
-{
-	drm_debugfs_remove_files(tegra_debugfs_list,
-				 ARRAY_SIZE(tegra_debugfs_list), minor);
-}
-#endif
-
-struct drm_driver tegra_drm_driver = {
-	.driver_features = DRIVER_MODESET | DRIVER_GEM,
-	.load = tegra_drm_load,
-	.unload = tegra_drm_unload,
-	.open = tegra_drm_open,
-	.preclose = tegra_drm_preclose,
-	.lastclose = tegra_drm_lastclose,
-
-	.get_vblank_counter = tegra_drm_get_vblank_counter,
-	.enable_vblank = tegra_drm_enable_vblank,
-	.disable_vblank = tegra_drm_disable_vblank,
-
-#if defined(CONFIG_DEBUG_FS)
-	.debugfs_init = tegra_debugfs_init,
-	.debugfs_cleanup = tegra_debugfs_cleanup,
-#endif
-
-	.gem_free_object = tegra_bo_free_object,
-	.gem_vm_ops = &tegra_bo_vm_ops,
-	.dumb_create = tegra_bo_dumb_create,
-	.dumb_map_offset = tegra_bo_dumb_map_offset,
-	.dumb_destroy = drm_gem_dumb_destroy,
-
-	.ioctls = tegra_drm_ioctls,
-	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
-	.fops = &tegra_drm_fops,
-
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-	.date = DRIVER_DATE,
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-	.patchlevel = DRIVER_PATCHLEVEL,
-};
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644
index 27ffcf1..0000000
--- a/drivers/gpu/host1x/drm/gr2d.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * drivers/video/tegra/host/gr2d/gr2d.c
- *
- * Tegra Graphics 2D
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-
-#include "channel.h"
-#include "drm.h"
-#include "gem.h"
-#include "job.h"
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "host1x_client.h"
-#include "syncpt.h"
-
-struct gr2d {
-	struct host1x_client client;
-	struct clk *clk;
-	struct host1x_channel *channel;
-	unsigned long *addr_regs;
-};
-
-static inline struct gr2d *to_gr2d(struct host1x_client *client)
-{
-	return container_of(client, struct gr2d, client);
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
-
-static int gr2d_client_init(struct host1x_client *client,
-			    struct drm_device *drm)
-{
-	return 0;
-}
-
-static int gr2d_client_exit(struct host1x_client *client)
-{
-	return 0;
-}
-
-static int gr2d_open_channel(struct host1x_client *client,
-			     struct host1x_drm_context *context)
-{
-	struct gr2d *gr2d = to_gr2d(client);
-
-	context->channel = host1x_channel_get(gr2d->channel);
-
-	if (!context->channel)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void gr2d_close_channel(struct host1x_drm_context *context)
-{
-	host1x_channel_put(context->channel);
-}
-
-static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
-					  struct drm_file *file,
-					  u32 handle)
-{
-	struct drm_gem_object *gem;
-	struct tegra_bo *bo;
-
-	gem = drm_gem_object_lookup(drm, file, handle);
-	if (!gem)
-		return NULL;
-
-	mutex_lock(&drm->struct_mutex);
-	drm_gem_object_unreference(gem);
-	mutex_unlock(&drm->struct_mutex);
-
-	bo = to_tegra_bo(gem);
-	return &bo->base;
-}
-
-static int gr2d_submit(struct host1x_drm_context *context,
-		       struct drm_tegra_submit *args, struct drm_device *drm,
-		       struct drm_file *file)
-{
-	struct host1x_job *job;
-	unsigned int num_cmdbufs = args->num_cmdbufs;
-	unsigned int num_relocs = args->num_relocs;
-	unsigned int num_waitchks = args->num_waitchks;
-	struct drm_tegra_cmdbuf __user *cmdbufs =
-		(void * __user)(uintptr_t)args->cmdbufs;
-	struct drm_tegra_reloc __user *relocs =
-		(void * __user)(uintptr_t)args->relocs;
-	struct drm_tegra_waitchk __user *waitchks =
-		(void * __user)(uintptr_t)args->waitchks;
-	struct drm_tegra_syncpt syncpt;
-	int err;
-
-	/* We don't yet support other than one syncpt_incr struct per submit */
-	if (args->num_syncpts != 1)
-		return -EINVAL;
-
-	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
-			       args->num_relocs, args->num_waitchks);
-	if (!job)
-		return -ENOMEM;
-
-	job->num_relocs = args->num_relocs;
-	job->num_waitchk = args->num_waitchks;
-	job->client = (u32)args->context;
-	job->class = context->client->class;
-	job->serialize = true;
-
-	while (num_cmdbufs) {
-		struct drm_tegra_cmdbuf cmdbuf;
-		struct host1x_bo *bo;
-
-		err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
-		if (err)
-			goto fail;
-
-		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
-		if (!bo) {
-			err = -ENOENT;
-			goto fail;
-		}
-
-		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
-		num_cmdbufs--;
-		cmdbufs++;
-	}
-
-	err = copy_from_user(job->relocarray, relocs,
-			     sizeof(*relocs) * num_relocs);
-	if (err)
-		goto fail;
-
-	while (num_relocs--) {
-		struct host1x_reloc *reloc = &job->relocarray[num_relocs];
-		struct host1x_bo *cmdbuf, *target;
-
-		cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
-		target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
-		reloc->cmdbuf = cmdbuf;
-		reloc->target = target;
-
-		if (!reloc->target || !reloc->cmdbuf) {
-			err = -ENOENT;
-			goto fail;
-		}
-	}
-
-	err = copy_from_user(job->waitchk, waitchks,
-			     sizeof(*waitchks) * num_waitchks);
-	if (err)
-		goto fail;
-
-	err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
-			     sizeof(syncpt));
-	if (err)
-		goto fail;
-
-	job->syncpt_id = syncpt.id;
-	job->syncpt_incrs = syncpt.incrs;
-	job->timeout = 10000;
-	job->is_addr_reg = gr2d_is_addr_reg;
-
-	if (args->timeout && args->timeout < 10000)
-		job->timeout = args->timeout;
-
-	err = host1x_job_pin(job, context->client->dev);
-	if (err)
-		goto fail;
-
-	err = host1x_job_submit(job);
-	if (err)
-		goto fail_submit;
-
-	args->fence = job->syncpt_end;
-
-	host1x_job_put(job);
-	return 0;
-
-fail_submit:
-	host1x_job_unpin(job);
-fail:
-	host1x_job_put(job);
-	return err;
-}
-
-static struct host1x_client_ops gr2d_client_ops = {
-	.drm_init = gr2d_client_init,
-	.drm_exit = gr2d_client_exit,
-	.open_channel = gr2d_open_channel,
-	.close_channel = gr2d_close_channel,
-	.submit = gr2d_submit,
-};
-
-static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
-{
-	const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
-				      0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
-	unsigned long *bitmap;
-	int i;
-
-	bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
-			      GFP_KERNEL);
-
-	for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
-		u32 reg = gr2d_addr_regs[i];
-		bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
-	}
-
-	gr2d->addr_regs = bitmap;
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
-{
-	struct gr2d *gr2d = dev_get_drvdata(dev);
-
-	switch (class) {
-	case HOST1X_CLASS_HOST1X:
-		return reg == 0x2b;
-	case HOST1X_CLASS_GR2D:
-	case HOST1X_CLASS_GR2D_SB:
-		reg &= 0xff;
-		if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
-			return 1;
-	default:
-		return 0;
-	}
-}
-
-static const struct of_device_id gr2d_match[] = {
-	{ .compatible = "nvidia,tegra30-gr2d" },
-	{ .compatible = "nvidia,tegra20-gr2d" },
-	{ },
-};
-
-static int gr2d_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
-	int err;
-	struct gr2d *gr2d = NULL;
-	struct host1x_syncpt **syncpts;
-
-	gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
-	if (!gr2d)
-		return -ENOMEM;
-
-	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
-	if (!syncpts)
-		return -ENOMEM;
-
-	gr2d->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(gr2d->clk)) {
-		dev_err(dev, "cannot get clock\n");
-		return PTR_ERR(gr2d->clk);
-	}
-
-	err = clk_prepare_enable(gr2d->clk);
-	if (err) {
-		dev_err(dev, "cannot turn on clock\n");
-		return err;
-	}
-
-	gr2d->channel = host1x_channel_request(dev);
-	if (!gr2d->channel)
-		return -ENOMEM;
-
-	*syncpts = host1x_syncpt_request(dev, false);
-	if (!(*syncpts)) {
-		host1x_channel_free(gr2d->channel);
-		return -ENOMEM;
-	}
-
-	gr2d->client.ops = &gr2d_client_ops;
-	gr2d->client.dev = dev;
-	gr2d->client.class = HOST1X_CLASS_GR2D;
-	gr2d->client.syncpts = syncpts;
-	gr2d->client.num_syncpts = 1;
-
-	err = host1x_register_client(host1x, &gr2d->client);
-	if (err < 0) {
-		dev_err(dev, "failed to register host1x client: %d\n", err);
-		return err;
-	}
-
-	gr2d_init_addr_reg_map(dev, gr2d);
-
-	platform_set_drvdata(pdev, gr2d);
-
-	return 0;
-}
-
-static int __exit gr2d_remove(struct platform_device *pdev)
-{
-	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
-	struct gr2d *gr2d = platform_get_drvdata(pdev);
-	unsigned int i;
-	int err;
-
-	err = host1x_unregister_client(host1x, &gr2d->client);
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
-		return err;
-	}
-
-	for (i = 0; i < gr2d->client.num_syncpts; i++)
-		host1x_syncpt_free(gr2d->client.syncpts[i]);
-
-	host1x_channel_free(gr2d->channel);
-	clk_disable_unprepare(gr2d->clk);
-
-	return 0;
-}
-
-struct platform_driver tegra_gr2d_driver = {
-	.probe = gr2d_probe,
-	.remove = __exit_p(gr2d_remove),
-	.driver = {
-		.owner = THIS_MODULE,
-		.name = "gr2d",
-		.of_match_table = gr2d_match,
-	}
-};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644
index a2bc1e6..0000000
--- a/drivers/gpu/host1x/host1x.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Tegra host1x driver
- *
- * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#ifndef __LINUX_HOST1X_H
-#define __LINUX_HOST1X_H
-
-enum host1x_class {
-	HOST1X_CLASS_HOST1X	= 0x1,
-	HOST1X_CLASS_GR2D	= 0x51,
-	HOST1X_CLASS_GR2D_SB    = 0x52
-};
-
-#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644
index 4c1f10b..0000000
--- a/drivers/gpu/host1x/host1x_bo.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Tegra host1x Memory Management Abstraction header
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HOST1X_BO_H
-#define _HOST1X_BO_H
-
-struct host1x_bo;
-
-struct host1x_bo_ops {
-	struct host1x_bo *(*get)(struct host1x_bo *bo);
-	void (*put)(struct host1x_bo *bo);
-	dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
-	void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
-	void *(*mmap)(struct host1x_bo *bo);
-	void (*munmap)(struct host1x_bo *bo, void *addr);
-	void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
-	void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
-};
-
-struct host1x_bo {
-	const struct host1x_bo_ops *ops;
-};
-
-static inline void host1x_bo_init(struct host1x_bo *bo,
-				  const struct host1x_bo_ops *ops)
-{
-	bo->ops = ops;
-}
-
-static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
-{
-	return bo->ops->get(bo);
-}
-
-static inline void host1x_bo_put(struct host1x_bo *bo)
-{
-	bo->ops->put(bo);
-}
-
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
-				       struct sg_table **sgt)
-{
-	return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
-	bo->ops->unpin(bo, sgt);
-}
-
-static inline void *host1x_bo_mmap(struct host1x_bo *bo)
-{
-	return bo->ops->mmap(bo);
-}
-
-static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
-{
-	bo->ops->munmap(bo, addr);
-}
-
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
-	return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
-				    unsigned int pagenum, void *addr)
-{
-	bo->ops->kunmap(bo, pagenum, addr);
-}
-
-#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644
index 9b50863..0000000
--- a/drivers/gpu/host1x/hw/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-y = -Idrivers/gpu/host1x
-
-host1x-hw-objs  = \
-	host1x01.o
-
-obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 2ee4ad5..37e2a63 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -20,10 +20,10 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 
-#include "cdma.h"
-#include "channel.h"
-#include "dev.h"
-#include "debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../debug.h"
 
 /*
  * Put the restart at the end of pushbuffer memor
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index ee19962..4608257 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -16,15 +16,15 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/host1x.h>
 #include <linux/slab.h>
+
 #include <trace/events/host1x.h>
 
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "channel.h"
-#include "dev.h"
-#include "intr.h"
-#include "job.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../intr.h"
+#include "../job.h"
 
 #define HOST1X_CHANNEL_SIZE 16384
 #define TRACE_MAX_LENGTH 128U
@@ -67,6 +67,22 @@
 	}
 }
 
+static inline void synchronize_syncpt_base(struct host1x_job *job)
+{
+	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+	struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+	u32 id, value;
+
+	value = host1x_syncpt_read_max(sp);
+	id = sp->base->id;
+
+	host1x_cdma_push(&job->channel->cdma,
+			 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+				HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
+			 HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
+			 HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
+}
+
 static int channel_submit(struct host1x_job *job)
 {
 	struct host1x_channel *ch = job->channel;
@@ -118,6 +134,10 @@
 					host1x_syncpt_read_max(sp)));
 	}
 
+	/* Synchronize base register to allow using it for relative waiting */
+	if (sp->base)
+		synchronize_syncpt_base(job);
+
 	syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
 
 	job->syncpt_end = syncval;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 334c038..640c75c 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -15,18 +15,10 @@
  *
  */
 
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-#include <linux/io.h>
-
-#include "dev.h"
-#include "debug.h"
-#include "cdma.h"
-#include "channel.h"
-#include "host1x_bo.h"
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
 
 #define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
 
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index a14e91c..859b73b 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -17,17 +17,17 @@
  */
 
 /* include hw specification */
-#include "hw/host1x01.h"
-#include "hw/host1x01_hardware.h"
+#include "host1x01.h"
+#include "host1x01_hardware.h"
 
 /* include code */
-#include "hw/cdma_hw.c"
-#include "hw/channel_hw.c"
-#include "hw/debug_hw.c"
-#include "hw/intr_hw.c"
-#include "hw/syncpt_hw.c"
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
 
-#include "dev.h"
+#include "../dev.h"
 
 int host1x01_init(struct host1x *host)
 {
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644
index 0000000..e98caca
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x01.h"
+#include "host1x01_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x02_init(struct host1x *host)
+{
+	host->channel_op = &host1x_channel_ops;
+	host->cdma_op = &host1x_cdma_ops;
+	host->cdma_pb_op = &host1x_pushbuffer_ops;
+	host->syncpt_op = &host1x_syncpt_ops;
+	host->intr_op = &host1x_intr_ops;
+	host->debug_op = &host1x_debug_ops;
+
+	return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x02.h b/drivers/gpu/host1x/hw/host1x02.h
new file mode 100644
index 0000000..f748660
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x02.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X02_H
+#define HOST1X_HOST1X02_H
+
+struct host1x;
+
+int host1x02_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
index 42f3ce1..f755359 100644
--- a/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -111,6 +111,12 @@
 }
 #define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
 	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+	return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+	host1x_uclass_load_syncpt_base_r()
 static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
 {
 	return (v & 0xff) << 24;
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644
index 0000000..e490bcd
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
+#define HOST1X_HW_HOST1X02_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+	host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+	return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+	host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+	return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+	host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+	return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+	host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+	return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+	host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+	return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+	host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+	return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+	host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+	return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+	host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+	host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+	return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+	host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+	return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+	host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644
index 0000000..4495401
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_SYNC_H
+#define HOST1X_HW_HOST1X02_SYNC_H
+
+#define REGISTER_STRIDE	4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+	return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+	host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+	return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+	host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+	return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+	host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+	return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+	host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+	return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+	host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+	host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+	host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+	return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+	host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+	return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+	host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+	return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+	host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+	return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+	host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+	return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+	host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+	return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+	host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+	return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+	host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+	host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+	host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+	return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+	host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+	return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+	host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+	return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+	host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+	return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+	host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+	return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+	host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+	return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+	host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+	return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+	host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+	host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+	return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+	host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+	return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+	host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+	return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+	return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+	host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+	host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+	host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644
index 0000000..a3b3c98
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_UCLASS_H
+#define HOST1X_HW_HOST1X02_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+	host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+	host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+	host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+	host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+	host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+	host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+	host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+	host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+	host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+	host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+	host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+	host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index b592eef..b26dcc8 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -22,8 +22,8 @@
 #include <linux/io.h>
 #include <asm/mach/irq.h>
 
-#include "intr.h"
-#include "dev.h"
+#include "../intr.h"
+#include "../dev.h"
 
 /*
  * Sync point threshold interrupt service function
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 0cf6095..56e8539 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -18,8 +18,8 @@
 
 #include <linux/io.h>
 
-#include "dev.h"
-#include "syncpt.h"
+#include "../dev.h"
+#include "../syncpt.h"
 
 /*
  * Write the current syncpoint value back to hw.
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index c4e1050..de5ec33 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -18,6 +18,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/host1x.h>
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
 
 #include "channel.h"
 #include "dev.h"
-#include "host1x_bo.h"
 #include "job.h"
 #include "syncpt.h"
 
@@ -264,7 +264,7 @@
 }
 
 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
-		       unsigned int offset)
+			unsigned int offset)
 {
 	offset *= sizeof(u32);
 
@@ -281,7 +281,7 @@
 	unsigned int num_relocs;
 	struct host1x_reloc *reloc;
 
-	struct host1x_bo *cmdbuf_id;
+	struct host1x_bo *cmdbuf;
 	unsigned int offset;
 
 	u32 words;
@@ -291,25 +291,37 @@
 	u32 count;
 };
 
+static int check_register(struct host1x_firewall *fw, unsigned long offset)
+{
+	if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
+		if (!fw->num_relocs)
+			return -EINVAL;
+
+		if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
+			return -EINVAL;
+
+		fw->num_relocs--;
+		fw->reloc++;
+	}
+
+	return 0;
+}
+
 static int check_mask(struct host1x_firewall *fw)
 {
 	u32 mask = fw->mask;
 	u32 reg = fw->reg;
+	int ret;
 
 	while (mask) {
 		if (fw->words == 0)
 			return -EINVAL;
 
 		if (mask & 1) {
-			if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
-				if (!fw->num_relocs)
-					return -EINVAL;
-				if (!check_reloc(fw->reloc, fw->cmdbuf_id,
-						 fw->offset))
-					return -EINVAL;
-				fw->reloc++;
-				fw->num_relocs--;
-			}
+			ret = check_register(fw, reg);
+			if (ret < 0)
+				return ret;
+
 			fw->words--;
 			fw->offset++;
 		}
@@ -324,19 +336,16 @@
 {
 	u32 count = fw->count;
 	u32 reg = fw->reg;
+	int ret;
 
 	while (count) {
 		if (fw->words == 0)
 			return -EINVAL;
 
-		if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
-			if (!fw->num_relocs)
-				return -EINVAL;
-			if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
-				return -EINVAL;
-			fw->reloc++;
-			fw->num_relocs--;
-		}
+		ret = check_register(fw, reg);
+		if (ret < 0)
+			return ret;
+
 		reg++;
 		fw->words--;
 		fw->offset++;
@@ -348,21 +357,17 @@
 
 static int check_nonincr(struct host1x_firewall *fw)
 {
-	int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
 	u32 count = fw->count;
+	int ret;
 
 	while (count) {
 		if (fw->words == 0)
 			return -EINVAL;
 
-		if (is_addr_reg) {
-			if (!fw->num_relocs)
-				return -EINVAL;
-			if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
-				return -EINVAL;
-			fw->reloc++;
-			fw->num_relocs--;
-		}
+		ret = check_register(fw, fw->reg);
+		if (ret < 0)
+			return ret;
+
 		fw->words--;
 		fw->offset++;
 		count--;
@@ -381,7 +386,7 @@
 		return 0;
 
 	fw->words = g->words;
-	fw->cmdbuf_id = g->bo;
+	fw->cmdbuf = g->bo;
 	fw->offset = 0;
 
 	while (fw->words && !err) {
@@ -436,10 +441,6 @@
 		}
 	}
 
-	/* No relocs should remain at this point */
-	if (fw->num_relocs)
-		err = -EINVAL;
-
 out:
 	return err;
 }
@@ -493,6 +494,10 @@
 		offset += g->words * sizeof(u32);
 	}
 
+	/* No relocs should remain at this point */
+	if (fw.num_relocs)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index fba45f2..33a697d 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -34,15 +34,6 @@
 	u32 pad;
 };
 
-struct host1x_reloc {
-	struct host1x_bo *cmdbuf;
-	u32 cmdbuf_offset;
-	struct host1x_bo *target;
-	u32 target_offset;
-	u32 shift;
-	u32 pad;
-};
-
 struct host1x_waitchk {
 	struct host1x_bo *bo;
 	u32 offset;
@@ -56,105 +47,6 @@
 };
 
 /*
- * Each submit is tracked as a host1x_job.
- */
-struct host1x_job {
-	/* When refcount goes to zero, job can be freed */
-	struct kref ref;
-
-	/* List entry */
-	struct list_head list;
-
-	/* Channel where job is submitted to */
-	struct host1x_channel *channel;
-
-	u32 client;
-
-	/* Gathers and their memory */
-	struct host1x_job_gather *gathers;
-	unsigned int num_gathers;
-
-	/* Wait checks to be processed at submit time */
-	struct host1x_waitchk *waitchk;
-	unsigned int num_waitchk;
-	u32 waitchk_mask;
-
-	/* Array of handles to be pinned & unpinned */
-	struct host1x_reloc *relocarray;
-	unsigned int num_relocs;
-	struct host1x_job_unpin_data *unpins;
-	unsigned int num_unpins;
-
-	dma_addr_t *addr_phys;
-	dma_addr_t *gather_addr_phys;
-	dma_addr_t *reloc_addr_phys;
-
-	/* Sync point id, number of increments and end related to the submit */
-	u32 syncpt_id;
-	u32 syncpt_incrs;
-	u32 syncpt_end;
-
-	/* Maximum time to wait for this job */
-	unsigned int timeout;
-
-	/* Index and number of slots used in the push buffer */
-	unsigned int first_get;
-	unsigned int num_slots;
-
-	/* Copy of gathers */
-	size_t gather_copy_size;
-	dma_addr_t gather_copy;
-	u8 *gather_copy_mapped;
-
-	/* Check if register is marked as an address reg */
-	int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
-
-	/* Request a SETCLASS to this class */
-	u32 class;
-
-	/* Add a channel wait for previous ops to complete */
-	bool serialize;
-};
-/*
- * Allocate memory for a job. Just enough memory will be allocated to
- * accomodate the submit.
- */
-struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-				    u32 num_cmdbufs, u32 num_relocs,
-				    u32 num_waitchks);
-
-/*
- * Add a gather to a job.
- */
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
-			   u32 words, u32 offset);
-
-/*
- * Increment reference going to host1x_job.
- */
-struct host1x_job *host1x_job_get(struct host1x_job *job);
-
-/*
- * Decrement reference job, free if goes to zero.
- */
-void host1x_job_put(struct host1x_job *job);
-
-/*
- * Pin memory related to job. This handles relocation of addresses to the
- * host1x address space. Handles both the gather memory and any other memory
- * referred to from the gather buffers.
- *
- * Handles also patching out host waits that would wait for an expired sync
- * point value.
- */
-int host1x_job_pin(struct host1x_job *job, struct device *dev);
-
-/*
- * Unpin memory related to job.
- */
-void host1x_job_unpin(struct host1x_job *job);
-
-/*
  * Dump contents of job to debug output.
  */
 void host1x_job_dump(struct device *dev, struct host1x_job *job);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 409745b..159c479 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -30,9 +30,32 @@
 #define SYNCPT_CHECK_PERIOD (2 * HZ)
 #define MAX_STUCK_CHECK_COUNT 15
 
-static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
-						  struct device *dev,
-						  bool client_managed)
+static struct host1x_syncpt_base *
+host1x_syncpt_base_request(struct host1x *host)
+{
+	struct host1x_syncpt_base *bases = host->bases;
+	unsigned int i;
+
+	for (i = 0; i < host->info->nb_bases; i++)
+		if (!bases[i].requested)
+			break;
+
+	if (i >= host->info->nb_bases)
+		return NULL;
+
+	bases[i].requested = true;
+	return &bases[i];
+}
+
+static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
+{
+	if (base)
+		base->requested = false;
+}
+
+static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+						 struct device *dev,
+						 unsigned long flags)
 {
 	int i;
 	struct host1x_syncpt *sp = host->syncpt;
@@ -44,6 +67,12 @@
 	if (i >= host->info->nb_pts)
 		return NULL;
 
+	if (flags & HOST1X_SYNCPT_HAS_BASE) {
+		sp->base = host1x_syncpt_base_request(host);
+		if (!sp->base)
+			return NULL;
+	}
+
 	name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
 			dev ? dev_name(dev) : NULL);
 	if (!name)
@@ -51,7 +80,11 @@
 
 	sp->dev = dev;
 	sp->name = name;
-	sp->client_managed = client_managed;
+
+	if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
+		sp->client_managed = true;
+	else
+		sp->client_managed = false;
 
 	return sp;
 }
@@ -303,25 +336,35 @@
 
 int host1x_syncpt_init(struct host1x *host)
 {
+	struct host1x_syncpt_base *bases;
 	struct host1x_syncpt *syncpt;
 	int i;
 
 	syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
-		GFP_KERNEL);
+			      GFP_KERNEL);
 	if (!syncpt)
 		return -ENOMEM;
 
-	for (i = 0; i < host->info->nb_pts; ++i) {
+	bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+			     GFP_KERNEL);
+	if (!bases)
+		return -ENOMEM;
+
+	for (i = 0; i < host->info->nb_pts; i++) {
 		syncpt[i].id = i;
 		syncpt[i].host = host;
 	}
 
+	for (i = 0; i < host->info->nb_bases; i++)
+		bases[i].id = i;
+
 	host->syncpt = syncpt;
+	host->bases = bases;
 
 	host1x_syncpt_restore(host);
 
 	/* Allocate sync point to use for clearing waits for expired fences */
-	host->nop_sp = _host1x_syncpt_alloc(host, NULL, false);
+	host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
 	if (!host->nop_sp)
 		return -ENOMEM;
 
@@ -329,10 +372,10 @@
 }
 
 struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
-					    bool client_managed)
+					    unsigned long flags)
 {
 	struct host1x *host = dev_get_drvdata(dev->parent);
-	return _host1x_syncpt_alloc(host, dev, client_managed);
+	return host1x_syncpt_alloc(host, dev, flags);
 }
 
 void host1x_syncpt_free(struct host1x_syncpt *sp)
@@ -340,7 +383,9 @@
 	if (!sp)
 		return;
 
+	host1x_syncpt_base_free(sp->base);
 	kfree(sp->name);
+	sp->base = NULL;
 	sp->dev = NULL;
 	sp->name = NULL;
 	sp->client_managed = false;
@@ -354,6 +399,25 @@
 		kfree(sp->name);
 }
 
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->min_val);
+}
+
 int host1x_syncpt_nb_pts(struct host1x *host)
 {
 	return host->info->nb_pts;
@@ -375,3 +439,13 @@
 		return NULL;
 	return host->syncpt + id;
 }
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
+{
+	return sp ? sp->base : NULL;
+}
+
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
+{
+	return base->id;
+}
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 267c0b9..9056465 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -20,6 +20,7 @@
 #define __HOST1X_SYNCPT_H
 
 #include <linux/atomic.h>
+#include <linux/host1x.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
@@ -30,6 +31,11 @@
 /* Reserved for replacing an expired wait with a NOP */
 #define HOST1X_SYNCPT_RESERVED			0
 
+struct host1x_syncpt_base {
+	unsigned int id;
+	bool requested;
+};
+
 struct host1x_syncpt {
 	int id;
 	atomic_t min_val;
@@ -39,6 +45,7 @@
 	bool client_managed;
 	struct host1x *host;
 	struct device *dev;
+	struct host1x_syncpt_base *base;
 
 	/* interrupt data */
 	struct host1x_syncpt_intr intr;
@@ -50,25 +57,6 @@
 /*  Free sync point array */
 void host1x_syncpt_deinit(struct host1x *host);
 
-/*
- * Read max. It indicates how many operations there are in queue, either in
- * channel or in a software thread.
- * */
-static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
-{
-	smp_rmb();
-	return (u32)atomic_read(&sp->max_val);
-}
-
-/*
- * Read min, which is a shadow of the current sync point value in hardware.
- */
-static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
-{
-	smp_rmb();
-	return (u32)atomic_read(&sp->min_val);
-}
-
 /* Return number of sync point supported. */
 int host1x_syncpt_nb_pts(struct host1x *host);
 
@@ -112,9 +100,6 @@
 	return (min == max);
 }
 
-/* Return pointer to struct denoting sync point id. */
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
-
 /* Load current value from hardware to the shadow register. */
 u32 host1x_syncpt_load(struct host1x_syncpt *sp);
 
@@ -130,16 +115,9 @@
 /* Read current wait base value into shadow register and return it. */
 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
 
-/* Request incrementing a sync point. */
-int host1x_syncpt_incr(struct host1x_syncpt *sp);
-
 /* Indicate future operations by incrementing the sync point max. */
 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
 
-/* Wait until sync point reaches a threshold value, or a timeout. */
-int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
-			long timeout, u32 *value);
-
 /* Check if sync point id is valid. */
 static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
 {
@@ -149,14 +127,4 @@
 /* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
 int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
 
-/* Return id of the sync point */
-u32 host1x_syncpt_id(struct host1x_syncpt *sp);
-
-/* Allocate a sync point for a device. */
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
-					    bool client_managed);
-
-/* Free a sync point. */
-void host1x_syncpt_free(struct host1x_syncpt *sp);
-
 #endif
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c044699..e75d015 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1734,6 +1734,7 @@
  */
 struct input_dev *input_allocate_device(void)
 {
+	static atomic_t input_no = ATOMIC_INIT(0);
 	struct input_dev *dev;
 
 	dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@
 		device_initialize(&dev->dev);
 		mutex_init(&dev->mutex);
 		spin_lock_init(&dev->event_lock);
+		init_timer(&dev->timer);
 		INIT_LIST_HEAD(&dev->h_list);
 		INIT_LIST_HEAD(&dev->node);
 
+		dev_set_name(&dev->dev, "input%ld",
+			     (unsigned long) atomic_inc_return(&input_no) - 1);
+
 		__module_get(THIS_MODULE);
 	}
 
@@ -2019,7 +2024,6 @@
  */
 int input_register_device(struct input_dev *dev)
 {
-	static atomic_t input_no = ATOMIC_INIT(0);
 	struct input_devres *devres = NULL;
 	struct input_handler *handler;
 	unsigned int packet_size;
@@ -2059,7 +2063,6 @@
 	 * If delay and period are pre-set by the driver, then autorepeating
 	 * is handled by the driver itself and we don't do it in input.c.
 	 */
-	init_timer(&dev->timer);
 	if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
 		dev->timer.data = (long) dev;
 		dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@
 	if (!dev->setkeycode)
 		dev->setkeycode = input_default_setkeycode;
 
-	dev_set_name(&dev->dev, "input%ld",
-		     (unsigned long) atomic_inc_return(&input_no) - 1);
-
 	error = device_add(&dev->dev);
 	if (error)
 		goto err_free_vals;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 134c3b4..a2e758d 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -786,10 +786,17 @@
 	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
 	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
 
-	if (pdata)
+	if (pdata) {
 		error = pxa27x_keypad_build_keycode(keypad);
-	else
+	} else {
 		error = pxa27x_keypad_build_keycode_from_dt(keypad);
+		/*
+		 * Data that we get from DT resides in dynamically
+		 * allocated memory so we need to update our pdata
+		 * pointer.
+		 */
+		pdata = keypad->pdata;
+	}
 	if (error) {
 		dev_err(&pdev->dev, "failed to build keycode\n");
 		goto failed_put_clk;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 082684e..9365535 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -351,7 +351,9 @@
 	if (status) {
 		if (status == -ESHUTDOWN)
 			return;
-		dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+		dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+				    __func__, status);
+		goto out;
 	}
 
 	/* Special keys */
@@ -418,8 +420,12 @@
 	     dev->ctl_data->byte[2],
 	     dev->ctl_data->byte[3]);
 
-	if (status)
-		dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+	if (status) {
+		if (status == -ESHUTDOWN)
+			return;
+		dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+				    __func__, status);
+	}
 
 	spin_lock(&dev->ctl_submit_lock);
 
@@ -427,7 +433,7 @@
 
 	if (likely(!dev->shutdown)) {
 
-		if (dev->buzzer_pending) {
+		if (dev->buzzer_pending || status) {
 			dev->buzzer_pending = 0;
 			dev->ctl_urb_pending = 1;
 			cm109_submit_buzz_toggle(dev);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7c5d72a..8365847 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@
 	/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
 	{ { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
 		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+	{ { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_DUALPOINT },		/* Dell XT2 */
 	{ { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },		/* Dell Vostro 1400 */
 	{ { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
 		ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },				/* Toshiba Tecra A11-11L */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 78e4de4..52c9ebf9 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -223,21 +223,26 @@
 {
 	unsigned long flags;
 	unsigned char data, str;
-	int i = 0;
+	int count = 0;
+	int retval = 0;
 
 	spin_lock_irqsave(&i8042_lock, flags);
 
-	while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) {
-		udelay(50);
-		data = i8042_read_data();
-		i++;
-		dbg("%02x <- i8042 (flush, %s)\n",
-		    data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+	while ((str = i8042_read_status()) & I8042_STR_OBF) {
+		if (count++ < I8042_BUFFER_SIZE) {
+			udelay(50);
+			data = i8042_read_data();
+			dbg("%02x <- i8042 (flush, %s)\n",
+			    data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+		} else {
+			retval = -EIO;
+			break;
+		}
 	}
 
 	spin_unlock_irqrestore(&i8042_lock, flags);
 
-	return i;
+	return retval;
 }
 
 /*
@@ -849,7 +854,7 @@
 
 static int i8042_controller_check(void)
 {
-	if (i8042_flush() == I8042_BUFFER_SIZE) {
+	if (i8042_flush()) {
 		pr_err("No controller found\n");
 		return -ENODEV;
 	}
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79b69ea..e53416a 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1031,6 +1031,7 @@
 }
 
 static enum power_supply_property wacom_battery_props[] = {
+	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_CAPACITY
 };
 
@@ -1042,6 +1043,9 @@
 	int ret = 0;
 
 	switch (psp) {
+		case POWER_SUPPLY_PROP_SCOPE:
+			val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+			break;
 		case POWER_SUPPLY_PROP_CAPACITY:
 			val->intval =
 				wacom->wacom_wac.battery_capacity * 100 / 31;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b2aa503..c59b797 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2054,6 +2054,12 @@
 static const struct wacom_features wacom_features_0x10D =
 	{ "Wacom ISDv4 10D",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
 	  0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10E =
+	{ "Wacom ISDv4 10E",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+	  0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10F =
+	{ "Wacom ISDv4 10F",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+	  0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x4001 =
 	{ "Wacom ISDv4 4001",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
 	  0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2248,6 +2254,8 @@
 	{ USB_DEVICE_WACOM(0x100) },
 	{ USB_DEVICE_WACOM(0x101) },
 	{ USB_DEVICE_WACOM(0x10D) },
+	{ USB_DEVICE_WACOM(0x10E) },
+	{ USB_DEVICE_WACOM(0x10F) },
 	{ USB_DEVICE_WACOM(0x300) },
 	{ USB_DEVICE_WACOM(0x301) },
 	{ USB_DEVICE_WACOM(0x304) },
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index be12fbf..1ea7523 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -552,9 +552,8 @@
 	struct acpiphp_func *func;
 	int max, pass;
 	LIST_HEAD(add_list);
-	int nr_found;
 
-	nr_found = acpiphp_rescan_slot(slot);
+	acpiphp_rescan_slot(slot);
 	max = acpiphp_max_busnr(bus);
 	for (pass = 0; pass < 2; pass++) {
 		list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -574,9 +573,6 @@
 		}
 	}
 	__pci_bus_assign_resources(bus, &add_list, NULL);
-	/* Nothing more to do here if there are no new devices on this bus. */
-	if (!nr_found && (slot->flags & SLOT_ENABLED))
-		return;
 
 	acpiphp_sanitize_bus(bus);
 	acpiphp_set_hpp_values(bus);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 408a42e..f0d432c1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -771,6 +771,8 @@
 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
 	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
 	return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
 }
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5cbc4bb..df5e961 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,8 +105,11 @@
 static int sg_add(struct device *, struct class_interface *);
 static void sg_remove(struct device *, struct class_interface *);
 
+static DEFINE_SPINLOCK(sg_open_exclusive_lock);
+
 static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock);
+static DEFINE_RWLOCK(sg_index_lock);	/* Also used to lock
+							   file descriptor list for device */
 
 static struct class_interface sg_interface = {
 	.add_dev	= sg_add,
@@ -143,7 +146,8 @@
 } Sg_request;
 
 typedef struct sg_fd {		/* holds the state of a file descriptor */
-	struct list_head sfd_siblings; /* protected by sfd_lock of device */
+	/* sfd_siblings is protected by sg_index_lock */
+	struct list_head sfd_siblings;
 	struct sg_device *parentdp;	/* owning device */
 	wait_queue_head_t read_wait;	/* queue read until command done */
 	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
@@ -166,12 +170,13 @@
 
 typedef struct sg_device { /* holds the state of each scsi generic device */
 	struct scsi_device *device;
+	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */
 	int sg_tablesize;	/* adapter's max scatter-gather table size */
 	u32 index;		/* device index number */
-	spinlock_t sfd_lock;	/* protect file descriptor list for device */
+	/* sfds is protected by sg_index_lock */
 	struct list_head sfds;
-	struct rw_semaphore o_sem;	/* exclude open should hold this rwsem */
 	volatile char detached;	/* 0->attached, 1->detached pending removal */
+	/* exclude protected by sg_open_exclusive_lock */
 	char exclude;		/* opened for exclusive access */
 	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */
 	struct gendisk *disk;
@@ -220,14 +225,35 @@
 	return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
 }
 
+static int get_exclude(Sg_device *sdp)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+	ret = sdp->exclude;
+	spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+	return ret;
+}
+
+static int set_exclude(Sg_device *sdp, char val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sg_open_exclusive_lock, flags);
+	sdp->exclude = val;
+	spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
+	return val;
+}
+
 static int sfds_list_empty(Sg_device *sdp)
 {
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&sdp->sfd_lock, flags);
+	read_lock_irqsave(&sg_index_lock, flags);
 	ret = list_empty(&sdp->sfds);
-	spin_unlock_irqrestore(&sdp->sfd_lock, flags);
+	read_unlock_irqrestore(&sg_index_lock, flags);
 	return ret;
 }
 
@@ -239,6 +265,7 @@
 	struct request_queue *q;
 	Sg_device *sdp;
 	Sg_fd *sfp;
+	int res;
 	int retval;
 
 	nonseekable_open(inode, filp);
@@ -267,52 +294,54 @@
 		goto error_out;
 	}
 
-	if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
-		retval = -EPERM; /* Can't lock it with read only access */
+	if (flags & O_EXCL) {
+		if (O_RDONLY == (flags & O_ACCMODE)) {
+			retval = -EPERM; /* Can't lock it with read only access */
+			goto error_out;
+		}
+		if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
+			retval = -EBUSY;
+			goto error_out;
+		}
+		res = wait_event_interruptible(sdp->o_excl_wait,
+					   ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
+		if (res) {
+			retval = res;	/* -ERESTARTSYS because signal hit process */
+			goto error_out;
+		}
+	} else if (get_exclude(sdp)) {	/* some other fd has an exclusive lock on dev */
+		if (flags & O_NONBLOCK) {
+			retval = -EBUSY;
+			goto error_out;
+		}
+		res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
+		if (res) {
+			retval = res;	/* -ERESTARTSYS because signal hit process */
+			goto error_out;
+		}
+	}
+	if (sdp->detached) {
+		retval = -ENODEV;
 		goto error_out;
 	}
-	if (flags & O_NONBLOCK) {
-		if (flags & O_EXCL) {
-			if (!down_write_trylock(&sdp->o_sem)) {
-				retval = -EBUSY;
-				goto error_out;
-			}
-		} else {
-			if (!down_read_trylock(&sdp->o_sem)) {
-				retval = -EBUSY;
-				goto error_out;
-			}
-		}
-	} else {
-		if (flags & O_EXCL)
-			down_write(&sdp->o_sem);
-		else
-			down_read(&sdp->o_sem);
-	}
-	/* Since write lock is held, no need to check sfd_list */
-	if (flags & O_EXCL)
-		sdp->exclude = 1;	/* used by release lock */
-
 	if (sfds_list_empty(sdp)) {	/* no existing opens on this device */
 		sdp->sgdebug = 0;
 		q = sdp->device->request_queue;
 		sdp->sg_tablesize = queue_max_segments(q);
 	}
-	sfp = sg_add_sfp(sdp, dev);
-	if (!IS_ERR(sfp))
+	if ((sfp = sg_add_sfp(sdp, dev)))
 		filp->private_data = sfp;
-		/* retval is already provably zero at this point because of the
-		 * check after retval = scsi_autopm_get_device(sdp->device))
-		 */
 	else {
-		retval = PTR_ERR(sfp);
-
 		if (flags & O_EXCL) {
-			sdp->exclude = 0;	/* undo if error */
-			up_write(&sdp->o_sem);
-		} else
-			up_read(&sdp->o_sem);
+			set_exclude(sdp, 0);	/* undo if error */
+			wake_up_interruptible(&sdp->o_excl_wait);
+		}
+		retval = -ENOMEM;
+		goto error_out;
+	}
+	retval = 0;
 error_out:
+	if (retval) {
 		scsi_autopm_put_device(sdp->device);
 sdp_put:
 		scsi_device_put(sdp->device);
@@ -329,18 +358,13 @@
 {
 	Sg_device *sdp;
 	Sg_fd *sfp;
-	int excl;
 
 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
 		return -ENXIO;
 	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
 
-	excl = sdp->exclude;
-	sdp->exclude = 0;
-	if (excl)
-		up_write(&sdp->o_sem);
-	else
-		up_read(&sdp->o_sem);
+	set_exclude(sdp, 0);
+	wake_up_interruptible(&sdp->o_excl_wait);
 
 	scsi_autopm_put_device(sdp->device);
 	kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1391,9 +1415,8 @@
 	disk->first_minor = k;
 	sdp->disk = disk;
 	sdp->device = scsidp;
-	spin_lock_init(&sdp->sfd_lock);
 	INIT_LIST_HEAD(&sdp->sfds);
-	init_rwsem(&sdp->o_sem);
+	init_waitqueue_head(&sdp->o_excl_wait);
 	sdp->sg_tablesize = queue_max_segments(q);
 	sdp->index = k;
 	kref_init(&sdp->d_ref);
@@ -1526,13 +1549,11 @@
 
 	/* Need a write lock to set sdp->detached. */
 	write_lock_irqsave(&sg_index_lock, iflags);
-	spin_lock(&sdp->sfd_lock);
 	sdp->detached = 1;
 	list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
 		wake_up_interruptible(&sfp->read_wait);
 		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
 	}
-	spin_unlock(&sdp->sfd_lock);
 	write_unlock_irqrestore(&sg_index_lock, iflags);
 
 	sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2043,7 +2064,7 @@
 
 	sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
 	if (!sfp)
-		return ERR_PTR(-ENOMEM);
+		return NULL;
 
 	init_waitqueue_head(&sfp->read_wait);
 	rwlock_init(&sfp->rq_list_lock);
@@ -2057,13 +2078,9 @@
 	sfp->cmd_q = SG_DEF_COMMAND_Q;
 	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
 	sfp->parentdp = sdp;
-	spin_lock_irqsave(&sdp->sfd_lock, iflags);
-	if (sdp->detached) {
-		spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
-		return ERR_PTR(-ENODEV);
-	}
+	write_lock_irqsave(&sg_index_lock, iflags);
 	list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
-	spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+	write_unlock_irqrestore(&sg_index_lock, iflags);
 	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
 	if (unlikely(sg_big_buff != def_reserved_size))
 		sg_big_buff = def_reserved_size;
@@ -2113,9 +2130,10 @@
 	struct sg_device *sdp = sfp->parentdp;
 	unsigned long iflags;
 
-	spin_lock_irqsave(&sdp->sfd_lock, iflags);
+	write_lock_irqsave(&sg_index_lock, iflags);
 	list_del(&sfp->sfd_siblings);
-	spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+	write_unlock_irqrestore(&sg_index_lock, iflags);
+	wake_up_interruptible(&sdp->o_excl_wait);
 
 	INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
 	schedule_work(&sfp->ew.work);
@@ -2502,7 +2520,7 @@
 	return 0;
 }
 
-/* must be called while holding sg_index_lock and sfd_lock */
+/* must be called while holding sg_index_lock */
 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
 {
 	int k, m, new_interface, blen, usg;
@@ -2587,26 +2605,22 @@
 
 	read_lock_irqsave(&sg_index_lock, iflags);
 	sdp = it ? sg_lookup_dev(it->index) : NULL;
-	if (sdp) {
-		spin_lock(&sdp->sfd_lock);
-		if (!list_empty(&sdp->sfds)) {
-			struct scsi_device *scsidp = sdp->device;
+	if (sdp && !list_empty(&sdp->sfds)) {
+		struct scsi_device *scsidp = sdp->device;
 
-			seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
-			if (sdp->detached)
-				seq_printf(s, "detached pending close ");
-			else
-				seq_printf
-				    (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
-				     scsidp->host->host_no,
-				     scsidp->channel, scsidp->id,
-				     scsidp->lun,
-				     scsidp->host->hostt->emulated);
-			seq_printf(s, " sg_tablesize=%d excl=%d\n",
-				   sdp->sg_tablesize, sdp->exclude);
-			sg_proc_debug_helper(s, sdp);
-		}
-		spin_unlock(&sdp->sfd_lock);
+		seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+		if (sdp->detached)
+			seq_printf(s, "detached pending close ");
+		else
+			seq_printf
+			    (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
+			     scsidp->host->host_no,
+			     scsidp->channel, scsidp->id,
+			     scsidp->lun,
+			     scsidp->host->hostt->emulated);
+		seq_printf(s, " sg_tablesize=%d excl=%d\n",
+			   sdp->sg_tablesize, get_exclude(sdp));
+		sg_proc_debug_helper(s, sdp);
 	}
 	read_unlock_irqrestore(&sg_index_lock, iflags);
 	return 0;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f91bc1f..639ba96 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1960,6 +1960,7 @@
 
 		BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
 
+		memset(&DevInfo, 0, sizeof(DevInfo));
 		DevInfo.MaxRDMBufferSize = BUFFER_4K;
 		DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
 		DevInfo.u32RxAlignmentCorrection = 0;
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 394254f..5032ff7 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
 config DRM_IMX
 	tristate "DRM Support for Freescale i.MX"
 	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
 	select VIDEOMODE_HELPERS
 	select DRM_GEM_CMA_HELPER
 	select DRM_KMS_CMA_HELPER
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index a2e52a0..c1014eb 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -396,14 +396,14 @@
 
 	/*
 	 * enable drm irq mode.
-	 * - with irq_enabled = 1, we can use the vblank feature.
+	 * - with irq_enabled = true, we can use the vblank feature.
 	 *
 	 * P.S. note that we wouldn't use drm irq handler but
 	 *      just specific driver own one instead because
 	 *      drm framework supports only one irq handler and
 	 *      drivers can well take care of their interrupts
 	 */
-	drm->irq_enabled = 1;
+	drm->irq_enabled = true;
 
 	drm_mode_config_init(drm);
 	imx_drm_mode_config_init(drm);
@@ -423,11 +423,11 @@
 		goto err_init;
 
 	/*
-	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * with vblank_disable_allowed = true, vblank interrupt will be disabled
 	 * by drm timer once a current process gives up ownership of
 	 * vblank event.(after drm_vblank_put function is called)
 	 */
-	imxdrm->drm->vblank_disable_allowed = 1;
+	imxdrm->drm->vblank_disable_allowed = true;
 
 	if (!imx_drm_device_get())
 		ret = -EINVAL;
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 6ccb64f..6ce0af9 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -155,6 +155,9 @@
 	struct oz_app_hdr *app_hdr;
 	struct oz_serial_ctx *ctx;
 
+	if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+		return -EINVAL;
+
 	spin_lock_bh(&g_cdev.lock);
 	pd = g_cdev.active_pd;
 	if (pd)
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index 23db32f..a10cdb1 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -1063,7 +1063,7 @@
 
 static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
 {
-	struct serial_icounter_struct icount;
+	struct serial_icounter_struct icount = {};
 	struct sb_uart_icount cnow;
 	struct sb_uart_port *port = state->port;
 
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index c97e0e1..7e10dcd 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -570,6 +570,7 @@
 	ltv_t                   *pLtv;
 	bool_t                  ltvAllocated = FALSE;
 	ENCSTRCT                sEncryption;
+	size_t			len;
 
 #ifdef USE_WDS
 	hcf_16                  hcfPort  = HCF_PORT_0;
@@ -686,7 +687,8 @@
 					break;
 				case CFG_CNF_OWN_NAME:
 					memset(lp->StationName, 0, sizeof(lp->StationName));
-					memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
+					len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
+					strlcpy(lp->StationName, &pLtv->u.u8[2], len);
 					pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
 					break;
 				case CFG_CNF_LOAD_BALANCING:
@@ -1783,6 +1785,7 @@
 {
 	struct wl_private *lp = wl_priv(dev);
 	unsigned long flags;
+	size_t len;
 	int         ret = 0;
 	/*------------------------------------------------------------------------*/
 
@@ -1793,8 +1796,8 @@
 	wl_lock(lp, &flags);
 
 	memset(lp->StationName, 0, sizeof(lp->StationName));
-
-	memcpy(lp->StationName, extra, wrqu->data.length);
+	len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
+	strlcpy(lp->StationName, extra, len);
 
 	/* Commit the adapter parameters */
 	wl_apply(lp);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d067285..6b0f75e 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1499,7 +1499,7 @@
 /*
  * Get ip name usart or uart
  */
-static int atmel_get_ip_name(struct uart_port *port)
+static void atmel_get_ip_name(struct uart_port *port)
 {
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 	int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@
 		atmel_port->is_usart = false;
 	} else {
 		dev_err(port->dev, "Not supported ip name, set to uart\n");
-		return -EINVAL;
 	}
-
-	return 0;
 }
 
 /*
@@ -2405,9 +2402,7 @@
 	/*
 	 * Get port name of usart or uart
 	 */
-	ret = atmel_get_ip_name(&port->uart);
-	if (ret < 0)
-		goto err_add_port;
+	atmel_get_ip_name(&port->uart);
 
 	return 0;
 
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ba47563..0e808cf 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -642,16 +642,29 @@
 {
 	struct uio_device *idev = vma->vm_private_data;
 	int mi = uio_find_mem_index(vma);
+	struct uio_mem *mem;
 	if (mi < 0)
 		return -EINVAL;
+	mem = idev->info->mem + mi;
+
+	if (vma->vm_end - vma->vm_start > mem->size)
+		return -EINVAL;
 
 	vma->vm_ops = &uio_physical_vm_ops;
-
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
+	/*
+	 * We cannot use the vm_iomap_memory() helper here,
+	 * because vma->vm_pgoff is the map index we looked
+	 * up above in uio_find_mem_index(), rather than an
+	 * actual page offset into the mmap.
+	 *
+	 * So we just do the physical mmap without a page
+	 * offset.
+	 */
 	return remap_pfn_range(vma,
 			       vma->vm_start,
-			       idev->info->mem[mi].addr >> PAGE_SHIFT,
+			       mem->addr >> PAGE_SHIFT,
 			       vma->vm_end - vma->vm_start,
 			       vma->vm_page_prot);
 }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c45f9c0..b21d553 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -904,6 +904,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
 	/* Crucible Devices */
 	{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1b8af46..a7019d1 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1307,3 +1307,9 @@
  * Manufacturer: Crucible Technologies
  */
 #define FTDI_CT_COMET_PID	0x8e08
+
+/*
+ * Product: Z3X Box
+ * Manufacturer: Smart GSM Team
+ */
+#define FTDI_Z3X_PID		0x0011
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index bedf8e4..1e6de4c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -4,11 +4,6 @@
  * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
  * Copyright (C) 2003 IBM Corp.
  *
- * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
- *  - fixes, improvements and documentation for the baud rate encoding methods
- * Copyright (C) 2013 Reinhard Max <max@suse.de>
- *  - fixes and improvements for the divisor based baud rate encoding method
- *
  * Original driver for 2.2.x by anonymous
  *
  *	This program is free software; you can redistribute it and/or
@@ -134,18 +129,10 @@
 
 
 enum pl2303_type {
-	type_0,		/* H version ? */
-	type_1,		/* H version ? */
-	HX_TA,		/* HX(A) / X(A) / TA version  */ /* TODO: improve */
-	HXD_EA_RA_SA,	/* HXD / EA / RA / SA version */ /* TODO: improve */
-	TB,		/* TB version */
-	HX_CLONE,	/* Cheap and less functional clone of the HX chip */
+	type_0,		/* don't know the difference between type 0 and */
+	type_1,		/* type 1, until someone from prolific tells us... */
+	HX,		/* HX version of the pl2303 chip */
 };
-/*
- * NOTE: don't know the difference between type 0 and type 1,
- * until someone from Prolific tells us...
- * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
- */
 
 struct pl2303_serial_private {
 	enum pl2303_type type;
@@ -185,7 +172,6 @@
 {
 	struct pl2303_serial_private *spriv;
 	enum pl2303_type type = type_0;
-	char *type_str = "unknown (treating as type_0)";
 	unsigned char *buf;
 
 	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@@ -198,53 +184,15 @@
 		return -ENOMEM;
 	}
 
-	if (serial->dev->descriptor.bDeviceClass == 0x02) {
+	if (serial->dev->descriptor.bDeviceClass == 0x02)
 		type = type_0;
-		type_str = "type_0";
-	} else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) {
-		/*
-		 * NOTE: The bcdDevice version is the only difference between
-		 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
-		 */
-		if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
-			/* Check if the device is a clone */
-			pl2303_vendor_read(0x9494, 0, serial, buf);
-			/*
-			 * NOTE: Not sure if this read is really needed.
-			 * The HX returns 0x00, the clone 0x02, but the Windows
-			 * driver seems to ignore the value and continues.
-			 */
-			pl2303_vendor_write(0x0606, 0xaa, serial);
-			pl2303_vendor_read(0x8686, 0, serial, buf);
-			if (buf[0] != 0xaa) {
-				type = HX_CLONE;
-				type_str = "X/HX clone (limited functionality)";
-			} else {
-				type = HX_TA;
-				type_str = "X/HX/TA";
-			}
-			pl2303_vendor_write(0x0606, 0x00, serial);
-		} else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
-								     == 0x400) {
-			type = HXD_EA_RA_SA;
-			type_str = "HXD/EA/RA/SA";
-		} else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
-								     == 0x500) {
-			type = TB;
-			type_str = "TB";
-		} else {
-			dev_info(&serial->interface->dev,
-					   "unknown/unsupported device type\n");
-			kfree(spriv);
-			kfree(buf);
-			return -ENODEV;
-		}
-	} else if (serial->dev->descriptor.bDeviceClass == 0x00
-		   || serial->dev->descriptor.bDeviceClass == 0xFF) {
+	else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
+		type = HX;
+	else if (serial->dev->descriptor.bDeviceClass == 0x00)
 		type = type_1;
-		type_str = "type_1";
-	}
-	dev_dbg(&serial->interface->dev, "device type: %s\n", type_str);
+	else if (serial->dev->descriptor.bDeviceClass == 0xFF)
+		type = type_1;
+	dev_dbg(&serial->interface->dev, "device type: %d\n", type);
 
 	spriv->type = type;
 	usb_set_serial_data(serial, spriv);
@@ -259,10 +207,10 @@
 	pl2303_vendor_read(0x8383, 0, serial, buf);
 	pl2303_vendor_write(0, 1, serial);
 	pl2303_vendor_write(1, 0, serial);
-	if (type == type_0 || type == type_1)
-		pl2303_vendor_write(2, 0x24, serial);
-	else
+	if (type == HX)
 		pl2303_vendor_write(2, 0x44, serial);
+	else
+		pl2303_vendor_write(2, 0x24, serial);
 
 	kfree(buf);
 	return 0;
@@ -316,174 +264,65 @@
 	return retval;
 }
 
-static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
-								      u8 buf[4])
+static void pl2303_encode_baudrate(struct tty_struct *tty,
+					struct usb_serial_port *port,
+					u8 buf[4])
 {
-	/*
-	 * NOTE: Only the values defined in baud_sup are supported !
-	 * => if unsupported values are set, the PL2303 uses 9600 baud instead
-	 * => HX clones just don't work at unsupported baud rates < 115200 baud,
-	 *    for baud rates > 115200 they run at 115200 baud
-	 */
 	const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
-				 4800, 7200, 9600, 14400, 19200, 28800, 38400,
-				 57600, 115200, 230400, 460800, 614400, 921600,
-				 1228800, 2457600, 3000000, 6000000, 12000000 };
-	/*
-	 * NOTE: With the exception of type_0/1 devices, the following
-	 * additional baud rates are supported (tested with HX rev. 3A only):
-	 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
-	 * 403200, 806400.	(*: not HX and HX clones)
-	 *
-	 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
-	 *                 type_0+1: 1228800; RA: 921600; HX clones, SA: 115200
-	 *
-	 * As long as we are not using this encoding method for anything else
-	 * than the type_0+1, HX and HX clone chips, there is no point in
-	 * complicating the code to support them.
-	 */
+	                         4800, 7200, 9600, 14400, 19200, 28800, 38400,
+	                         57600, 115200, 230400, 460800, 500000, 614400,
+	                         921600, 1228800, 2457600, 3000000, 6000000 };
+
+	struct usb_serial *serial = port->serial;
+	struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
+	int baud;
 	int i;
 
+	/*
+	 * NOTE: Only the values defined in baud_sup are supported!
+	 *       => if unsupported values are set, the PL2303 seems to use
+	 *          9600 baud (at least my PL2303X always does)
+	 */
+	baud = tty_get_baud_rate(tty);
+	dev_dbg(&port->dev, "baud requested = %d\n", baud);
+	if (!baud)
+		return;
+
 	/* Set baudrate to nearest supported value */
 	for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
 		if (baud_sup[i] > baud)
 			break;
 	}
+
 	if (i == ARRAY_SIZE(baud_sup))
 		baud = baud_sup[i - 1];
 	else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
 		baud = baud_sup[i - 1];
 	else
 		baud = baud_sup[i];
-	/* Respect the chip type specific baud rate limits */
-	/*
-	 * FIXME: as long as we don't know how to distinguish between the
-	 * HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
-	 */
-	if (type == HX_TA)
-		baud = min_t(int, baud, 6000000);
-	else if (type == type_0 || type == type_1)
+
+	/* type_0, type_1 only support up to 1228800 baud */
+	if (spriv->type != HX)
 		baud = min_t(int, baud, 1228800);
-	else if (type == HX_CLONE)
-		baud = min_t(int, baud, 115200);
-	/* Direct (standard) baud rate encoding method */
-	put_unaligned_le32(baud, buf);
 
-	return baud;
-}
-
-static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
-								      u8 buf[4])
-{
-	/*
-	 * Divisor based baud rate encoding method
-	 *
-	 * NOTE: HX clones do NOT support this method.
-	 * It's not clear if the type_0/1 chips support it.
-	 *
-	 * divisor = 12MHz * 32 / baudrate = 2^A * B
-	 *
-	 * with
-	 *
-	 * A = buf[1] & 0x0e
-	 * B = buf[0]  +  (buf[1] & 0x01) << 8
-	 *
-	 * Special cases:
-	 * => 8 < B < 16: device seems to work not properly
-	 * => B <= 8: device uses the max. value B = 512 instead
-	 */
-	unsigned int A, B;
-
-	/*
-	 * NOTE: The Windows driver allows maximum baud rates of 110% of the
-	 * specified maximium value.
-	 * Quick tests with early (2004) HX (rev. A) chips suggest, that even
-	 * higher baud rates (up to the maximum of 24M baud !) are working fine,
-	 * but that should really be tested carefully in "real life" scenarios
-	 * before removing the upper limit completely.
-	 * Baud rates smaller than the specified 75 baud are definitely working
-	 * fine.
-	 */
-	if (type == type_0 || type == type_1)
-		baud = min_t(int, baud, 1228800 * 1.1);
-	else if (type == HX_TA)
-		baud = min_t(int, baud, 6000000 * 1.1);
-	else if (type == HXD_EA_RA_SA)
-		/* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
-		/*
-		 * FIXME: as long as we don't know how to distinguish between
-		 * these chip variants, allow the max. of these values
-		 */
-		baud = min_t(int, baud, 12000000 * 1.1);
-	else if (type == TB)
-		baud = min_t(int, baud, 12000000 * 1.1);
-	/* Determine factors A and B */
-	A = 0;
-	B = 12000000 * 32 / baud;  /* 12MHz */
-	B <<= 1; /* Add one bit for rounding */
-	while (B > (512 << 1) && A <= 14) {
-		A += 2;
-		B >>= 2;
-	}
-	if (A > 14) { /* max. divisor = min. baudrate reached */
-		A = 14;
-		B = 512;
-		/* => ~45.78 baud */
+	if (baud <= 115200) {
+		put_unaligned_le32(baud, buf);
 	} else {
-		B = (B + 1) >> 1; /* Round the last bit */
-	}
-	/* Handle special cases */
-	if (B == 512)
-		B = 0; /* also: 1 to 8 */
-	else if (B < 16)
 		/*
-		 * NOTE: With the current algorithm this happens
-		 * only for A=0 and means that the min. divisor
-		 * (respectively: the max. baudrate) is reached.
+		 * Apparently the formula for higher speeds is:
+		 * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
 		 */
-		B = 16;		/* => 24 MBaud */
-	/* Encode the baud rate */
-	buf[3] = 0x80;     /* Select divisor encoding method */
-	buf[2] = 0;
-	buf[1] = (A & 0x0e);		/* A */
-	buf[1] |= ((B & 0x100) >> 8);	/* MSB of B */
-	buf[0] = B & 0xff;		/* 8 LSBs of B */
-	/* Calculate the actual/resulting baud rate */
-	if (B <= 8)
-		B = 512;
-	baud = 12000000 * 32 / ((1 << A) * B);
+		unsigned tmp = 12000000 * 32 / baud;
+		buf[3] = 0x80;
+		buf[2] = 0;
+		buf[1] = (tmp >= 256);
+		while (tmp >= 256) {
+			tmp >>= 2;
+			buf[1] <<= 1;
+		}
+		buf[0] = tmp;
+	}
 
-	return baud;
-}
-
-static void pl2303_encode_baudrate(struct tty_struct *tty,
-					struct usb_serial_port *port,
-					enum pl2303_type type,
-					u8 buf[4])
-{
-	int baud;
-
-	baud = tty_get_baud_rate(tty);
-	dev_dbg(&port->dev, "baud requested = %d\n", baud);
-	if (!baud)
-		return;
-	/*
-	 * There are two methods for setting/encoding the baud rate
-	 * 1) Direct method: encodes the baud rate value directly
-	 *    => supported by all chip types
-	 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
-	 *    => not supported by HX clones (and likely type_0/1 chips)
-	 *
-	 * NOTE: Although the divisor based baud rate encoding method is much
-	 * more flexible, some of the standard baud rate values can not be
-	 * realized exactly. But the difference is very small (max. 0.2%) and
-	 * the device likely uses the same baud rate generator for both methods
-	 * so that there is likley no difference.
-	 */
-	if (type == type_0 || type == type_1 || type == HX_CLONE)
-		baud = pl2303_baudrate_encode_direct(baud, type, buf);
-	else
-		baud = pl2303_baudrate_encode_divisor(baud, type, buf);
 	/* Save resulting baud rate */
 	tty_encode_baud_rate(tty, baud, baud);
 	dev_dbg(&port->dev, "baud set = %d\n", baud);
@@ -540,8 +379,8 @@
 		dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
 	}
 
-	/* For reference:   buf[0]:buf[3] baud rate value */
-	pl2303_encode_baudrate(tty, port, spriv->type, buf);
+	/* For reference buf[0]:buf[3] baud rate value */
+	pl2303_encode_baudrate(tty, port, &buf[0]);
 
 	/* For reference buf[4]=0 is 1 stop bits */
 	/* For reference buf[4]=1 is 1.5 stop bits */
@@ -618,10 +457,10 @@
 	dev_dbg(&port->dev, "0xa1:0x21:0:0  %d - %7ph\n", i, buf);
 
 	if (C_CRTSCTS(tty)) {
-		if (spriv->type == type_0 || spriv->type == type_1)
-			pl2303_vendor_write(0x0, 0x41, serial);
-		else
+		if (spriv->type == HX)
 			pl2303_vendor_write(0x0, 0x61, serial);
+		else
+			pl2303_vendor_write(0x0, 0x41, serial);
 	} else {
 		pl2303_vendor_write(0x0, 0x0, serial);
 	}
@@ -658,7 +497,7 @@
 	struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
 	int result;
 
-	if (spriv->type == type_0 || spriv->type == type_1) {
+	if (spriv->type != HX) {
 		usb_clear_halt(serial->dev, port->write_urb->pipe);
 		usb_clear_halt(serial->dev, port->read_urb->pipe);
 	} else {
@@ -833,7 +672,6 @@
 	result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
 				 BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
 				 0, NULL, 0, 100);
-	/* NOTE: HX clones don't support sending breaks, -EPIPE is returned */
 	if (result)
 		dev_err(&port->dev, "error sending break = %d\n", result);
 }
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 84b685f..a312f04 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -19,10 +19,10 @@
 
 source "drivers/gpu/vga/Kconfig"
 
-source "drivers/gpu/drm/Kconfig"
-
 source "drivers/gpu/host1x/Kconfig"
 
+source "drivers/gpu/drm/Kconfig"
+
 config VGASTATE
        tristate
        default n
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index a54ccdc..22ad8524 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -361,37 +361,13 @@
 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 {
 	struct au1100fb_device *fbdev;
-	unsigned int len;
-	unsigned long start=0, off;
 
 	fbdev = to_au1100fb_device(fbi);
 
-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
-		return -EINVAL;
-	}
-
-	start = fbdev->fb_phys & PAGE_MASK;
-	len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
-	off = vma->vm_pgoff << PAGE_SHIFT;
-
-	if ((vma->vm_end - vma->vm_start + off) > len) {
-		return -EINVAL;
-	}
-
-	off += start;
-	vma->vm_pgoff = off >> PAGE_SHIFT;
-
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
 
-	if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-				vma->vm_end - vma->vm_start,
-				vma->vm_page_prot)) {
-		return -EAGAIN;
-	}
-
-	return 0;
+	return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
 }
 
 static struct fb_ops au1100fb_ops =
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 301224e..1d02897 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1233,34 +1233,13 @@
  * method mainly to allow the use of the TLB streaming flag (CCA=6)
  */
 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
-
 {
-	unsigned int len;
-	unsigned long start=0, off;
 	struct au1200fb_device *fbdev = info->par;
 
-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
-		return -EINVAL;
-	}
-
-	start = fbdev->fb_phys & PAGE_MASK;
-	len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
-
-	off = vma->vm_pgoff << PAGE_SHIFT;
-
-	if ((vma->vm_end - vma->vm_start + off) > len) {
-		return -EINVAL;
-	}
-
-	off += start;
-	vma->vm_pgoff = off >> PAGE_SHIFT;
-
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
 
-	return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
-				  vma->vm_end - vma->vm_start,
-				  vma->vm_page_prot);
+	return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
 }
 
 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
diff --git a/fs/dcache.c b/fs/dcache.c
index 20532cb..ae6ebb8 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -542,7 +542,7 @@
  * If ref is non-zero, then decrement the refcount too.
  * Returns dentry requiring refcount drop, or NULL if we're done.
  */
-static inline struct dentry *
+static struct dentry *
 dentry_kill(struct dentry *dentry, int unlock_on_failure)
 	__releases(dentry->d_lock)
 {
@@ -630,7 +630,8 @@
 			goto kill_it;
 	}
 
-	dentry->d_flags |= DCACHE_REFERENCED;
+	if (!(dentry->d_flags & DCACHE_REFERENCED))
+		dentry->d_flags |= DCACHE_REFERENCED;
 	dentry_lru_add(dentry);
 
 	dentry->d_lockref.count--;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 473e09d..810c28f 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,7 +34,6 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
-#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1605,8 +1604,7 @@
 			}
 
 			spin_unlock_irqrestore(&ep->lock, flags);
-			if (!freezable_schedule_hrtimeout_range(to, slack,
-								HRTIMER_MODE_ABS))
+			if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
 				timed_out = 1;
 
 			spin_lock_irqsave(&ep->lock, flags);
diff --git a/fs/select.c b/fs/select.c
index 35d4adc7..dfd5cb1 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -238,8 +238,7 @@
 
 	set_current_state(state);
 	if (!pwq->triggered)
-		rc = freezable_schedule_hrtimeout_range(expires, slack,
-							HRTIMER_MODE_ABS);
+		rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
 	__set_current_state(TASK_RUNNING);
 
 	/*
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index b46fb45..1d4a920 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -150,6 +150,7 @@
 #define DRIVER_BUS_PCI 0x1
 #define DRIVER_BUS_PLATFORM 0x2
 #define DRIVER_BUS_USB 0x3
+#define DRIVER_BUS_HOST1X 0x4
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -412,7 +413,12 @@
 
 /** File private data */
 struct drm_file {
-	int authenticated;
+	unsigned always_authenticated :1;
+	unsigned authenticated :1;
+	unsigned is_master :1; /* this file private is a master for a minor */
+	/* true when the client has asked us to expose stereo 3D mode flags */
+	unsigned stereo_allowed :1;
+
 	struct pid *pid;
 	kuid_t uid;
 	drm_magic_t magic;
@@ -429,10 +435,8 @@
 	struct file *filp;
 	void *driver_priv;
 
-	int is_master; /* this file private is a master for a minor */
 	struct drm_master *master; /* master this node is currently associated with
 				      N.B. not always minor->master */
-
 	/**
 	 * fbs - List of framebuffers associated with this file.
 	 *
@@ -667,8 +671,6 @@
 	uint32_t pending_read_domains;
 	uint32_t pending_write_domain;
 
-	void *driver_private;
-
 	/**
 	 * dma_buf - dma buf associated with this GEM object
 	 *
@@ -834,12 +836,17 @@
 	/**
 	 * Called by vblank timestamping code.
 	 *
-	 * Return the current display scanout position from a crtc.
+	 * Return the current display scanout position from a crtc, and an
+	 * optional accurate ktime_get timestamp of when position was measured.
 	 *
 	 * \param dev  DRM device.
 	 * \param crtc Id of the crtc to query.
 	 * \param *vpos Target location for current vertical scanout position.
 	 * \param *hpos Target location for current horizontal scanout position.
+	 * \param *stime Target location for timestamp taken immediately before
+	 *               scanout position query. Can be NULL to skip timestamp.
+	 * \param *etime Target location for timestamp taken immediately after
+	 *               scanout position query. Can be NULL to skip timestamp.
 	 *
 	 * Returns vpos as a positive number while in active scanout area.
 	 * Returns vpos as a negative number inside vblank, counting the number
@@ -856,7 +863,8 @@
 	 *
 	 */
 	int (*get_scanout_position) (struct drm_device *dev, int crtc,
-				     int *vpos, int *hpos);
+				     int *vpos, int *hpos, ktime_t *stime,
+				     ktime_t *etime);
 
 	/**
 	 * Called by \c drm_get_last_vbltimestamp. Should return a precise
@@ -922,7 +930,6 @@
 	 *
 	 * Returns 0 on success.
 	 */
-	int (*gem_init_object) (struct drm_gem_object *obj);
 	void (*gem_free_object) (struct drm_gem_object *obj);
 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -997,27 +1004,6 @@
 #define DRM_MINOR_CONTROL 2
 #define DRM_MINOR_RENDER 3
 
-
-/**
- * debugfs node list. This structure represents a debugfs file to
- * be created by the drm core
- */
-struct drm_debugfs_list {
-	const char *name; /** file name */
-	int (*show)(struct seq_file*, void*); /** show callback */
-	u32 driver_features; /**< Required driver features for this entry */
-};
-
-/**
- * debugfs node structure. This structure represents a debugfs file.
- */
-struct drm_debugfs_node {
-	struct list_head list;
-	struct drm_minor *minor;
-	struct drm_debugfs_list *debugfs_ent;
-	struct dentry *dent;
-};
-
 /**
  * Info file list entry. This structure represents a debugfs or proc file to
  * be created by the drm core
@@ -1046,7 +1032,7 @@
 	int index;			/**< Minor device number */
 	int type;                       /**< Control or render */
 	dev_t device;			/**< Device number for mknod */
-	struct device kdev;		/**< Linux device */
+	struct device *kdev;		/**< Linux device */
 	struct drm_device *dev;
 
 	struct dentry *debugfs_root;
@@ -1081,6 +1067,19 @@
 	struct drm_event_vblank event;
 };
 
+struct drm_vblank_crtc {
+	wait_queue_head_t queue;	/**< VBLANK wait queue */
+	struct timeval time[DRM_VBLANKTIME_RBSIZE];	/**< timestamp of current count */
+	atomic_t count;			/**< number of VBLANK interrupts */
+	atomic_t refcount;		/* number of users of vblank interruptsper crtc */
+	u32 last;			/* protected by dev->vbl_lock, used */
+					/* for wraparound handling */
+	u32 last_wait;			/* Last vblank seqno waited per CRTC */
+	unsigned int inmodeset;		/* Display driver is setting mode */
+	bool enabled;			/* so we don't call enable more than
+					   once per disable */
+};
+
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
@@ -1105,25 +1104,16 @@
 	atomic_t buf_alloc;		/**< Buffer allocation in progress */
 	/*@} */
 
-	/** \name Performance counters */
-	/*@{ */
-	unsigned long counters;
-	enum drm_stat_type types[15];
-	atomic_t counts[15];
-	/*@} */
-
 	struct list_head filelist;
 
 	/** \name Memory management */
 	/*@{ */
 	struct list_head maplist;	/**< Linked list of regions */
-	int map_count;			/**< Number of mappable regions */
 	struct drm_open_hash map_hash;	/**< User token hash table for maps */
 
 	/** \name Context handle management */
 	/*@{ */
 	struct list_head ctxlist;	/**< Linked list of context handles */
-	int ctx_count;			/**< Number of context handles */
 	struct mutex ctxlist_mutex;	/**< For ctxlist */
 
 	struct idr ctx_idr;
@@ -1139,12 +1129,11 @@
 
 	/** \name Context support */
 	/*@{ */
-	int irq_enabled;		/**< True if irq handler is enabled */
+	bool irq_enabled;		/**< True if irq handler is enabled */
 	__volatile__ long context_flag;	/**< Context swapping flag */
 	int last_context;		/**< Last current context */
 	/*@} */
 
-	struct work_struct work;
 	/** \name VBLANK IRQ support */
 	/*@{ */
 
@@ -1154,20 +1143,13 @@
 	 * Once the modeset ioctl *has* been called though, we can safely
 	 * disable them when unused.
 	 */
-	int vblank_disable_allowed;
+	bool vblank_disable_allowed;
 
-	wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
-	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
-	struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+	/* array of size num_crtcs */
+	struct drm_vblank_crtc *vblank;
+
 	spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
 	spinlock_t vbl_lock;
-	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
-	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
-					/* for wraparound handling */
-	int *vblank_enabled;            /* so we don't call enable more than
-					   once per disable */
-	int *vblank_inmodeset;          /* Display driver is setting mode */
-	u32 *last_vblank_wait;		/* Last vblank seqno waited per CRTC */
 	struct timer_list vblank_disable_timer;
 
 	u32 max_vblank_count;           /**< size of vblank counter register */
@@ -1184,8 +1166,6 @@
 
 	struct device *dev;             /**< Device structure */
 	struct pci_dev *pdev;		/**< PCI device structure */
-	int pci_vendor;			/**< PCI vendor id */
-	int pci_device;			/**< PCI device id */
 #ifdef __alpha__
 	struct pci_controller *hose;
 #endif
@@ -1303,6 +1283,8 @@
 			struct drm_file *file_priv);
 extern int drm_getcap(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv);
+extern int drm_setclientcap(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
 extern int drm_setversion(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv);
 extern int drm_noop(struct drm_device *dev, void *data,
@@ -1454,7 +1436,6 @@
 extern void drm_master_put(struct drm_master **master);
 
 extern void drm_put_dev(struct drm_device *dev);
-extern int drm_put_minor(struct drm_minor **minor);
 extern void drm_unplug_dev(struct drm_device *dev);
 extern unsigned int drm_debug;
 extern unsigned int drm_rnodes;
@@ -1474,10 +1455,11 @@
 #if defined(CONFIG_DEBUG_FS)
 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
 			    struct dentry *root);
-extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
-				    struct dentry *root, struct drm_minor *minor);
-extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
-                                    struct drm_minor *minor);
+extern int drm_debugfs_create_files(const struct drm_info_list *files,
+				    int count, struct dentry *root,
+				    struct drm_minor *minor);
+extern int drm_debugfs_remove_files(const struct drm_info_list *files,
+				    int count, struct drm_minor *minor);
 extern int drm_debugfs_cleanup(struct drm_minor *minor);
 #endif
 
@@ -1556,8 +1538,6 @@
 void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
-					    size_t size);
 int drm_gem_object_init(struct drm_device *dev,
 			struct drm_gem_object *obj, size_t size);
 void drm_gem_private_object_init(struct drm_device *dev,
@@ -1645,10 +1625,11 @@
 
 #include <drm/drm_mem_util.h>
 
-extern int drm_fill_in_dev(struct drm_device *dev,
-			   const struct pci_device_id *ent,
-			   struct drm_driver *driver);
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+				 struct device *parent);
+void drm_dev_free(struct drm_device *dev);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
 /*@}*/
 
 /* PCI section */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 24f4995..f32c5cd 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -108,6 +108,7 @@
     MODE_ONE_HEIGHT,    /* only one height is supported */
     MODE_ONE_SIZE,      /* only one resolution is supported */
     MODE_NO_REDUCED,    /* monitor doesn't accept reduced blanking */
+    MODE_NO_STEREO,	/* stereo modes not supported */
     MODE_UNVERIFIED = -3, /* mode needs to reverified */
     MODE_BAD = -2,	/* unspecified reason */
     MODE_ERROR	= -1	/* error condition */
@@ -124,7 +125,10 @@
 	.vscan = (vs), .flags = (f), \
 	.base.type = DRM_MODE_OBJECT_MODE
 
-#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+#define CRTC_INTERLACE_HALVE_V	(1 << 0) /* halve V values for interlacing */
+#define CRTC_STEREO_DOUBLE	(1 << 1) /* adjust timings for stereo modes */
+
+#define DRM_MODE_FLAG_3D_MAX	DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
 struct drm_display_mode {
 	/* Header */
@@ -155,8 +159,7 @@
 	int height_mm;
 
 	/* Actual mode we give to hw */
-	int clock_index;
-	int synth_clock;
+	int crtc_clock;		/* in KHz */
 	int crtc_hdisplay;
 	int crtc_hblank_start;
 	int crtc_hblank_end;
@@ -180,6 +183,11 @@
 	int hsync;		/* in kHz */
 };
 
+static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
+{
+	return mode->flags & DRM_MODE_FLAG_3D_MASK;
+}
+
 enum drm_connector_status {
 	connector_status_connected = 1,
 	connector_status_disconnected = 2,
@@ -587,7 +595,7 @@
  */
 struct drm_connector {
 	struct drm_device *dev;
-	struct device kdev;
+	struct device *kdev;
 	struct device_attribute *attr;
 	struct list_head head;
 
@@ -597,6 +605,7 @@
 	int connector_type_id;
 	bool interlace_allowed;
 	bool doublescan_allowed;
+	bool stereo_allowed;
 	struct list_head modes; /* list of modes on this connector */
 
 	enum drm_connector_status status;
@@ -964,6 +973,7 @@
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
+extern struct edid *drm_edid_duplicate(const struct edid *edid);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
@@ -975,7 +985,7 @@
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
-extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
 extern int drm_mode_width(const struct drm_display_mode *mode);
 extern int drm_mode_height(const struct drm_display_mode *mode);
 
@@ -1108,6 +1118,8 @@
 				int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
 				int hdisplay, int vdisplay);
+extern void drm_set_preferred_mode(struct drm_connector *connector,
+				   int hpref, int vpref);
 
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
 extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
@@ -1135,4 +1147,21 @@
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
 extern const char *drm_get_format_name(uint32_t format);
 
+/* Helpers */
+static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
+	uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
+	return mo ? obj_to_crtc(mo) : NULL;
+}
+
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+	uint32_t id)
+{
+	struct drm_mode_object *mo;
+	mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+	return mo ? obj_to_encoder(mo) : NULL;
+}
+
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index f43d556..ef6ad3a 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -163,7 +163,7 @@
 extern int drm_helper_resume_force_mode(struct drm_device *dev);
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
-extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
 extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index ae8dbfb..a92c375 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -77,10 +77,10 @@
 #define DP_DOWNSTREAMPORT_PRESENT           0x005
 # define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
 # define DP_DWN_STRM_PORT_TYPE_MASK         0x06
-/* 00b = DisplayPort */
-/* 01b = Analog */
-/* 10b = TMDS or HDMI */
-/* 11b = Other */
+# define DP_DWN_STRM_PORT_TYPE_DP           (0 << 1)
+# define DP_DWN_STRM_PORT_TYPE_ANALOG       (1 << 1)
+# define DP_DWN_STRM_PORT_TYPE_TMDS         (2 << 1)
+# define DP_DWN_STRM_PORT_TYPE_OTHER        (3 << 1)
 # define DP_FORMAT_CONVERSION               (1 << 3)
 # define DP_DETAILED_CAP_INFO_AVAILABLE	    (1 << 4) /* DPI */
 
@@ -333,20 +333,20 @@
 
 
 #define DP_LINK_STATUS_SIZE	   6
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			  int lane_count);
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			      int lane_count);
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
 				     int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane);
 
 #define DP_RECEIVER_CAP_SIZE		0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE	2
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 
 u8 drm_dp_link_rate_to_bw_code(int link_rate);
 int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -379,15 +379,22 @@
 #define EDP_VSC_PSR_CRC_VALUES_VALID	(1<<2)
 
 static inline int
-drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
 	return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
 }
 
 static inline u8
-drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
 	return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
 }
 
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+	return dpcd[DP_DPCD_REV] >= 0x11 &&
+		(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 706b962..d1f61bf 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -62,7 +62,7 @@
 extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
 
 
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
 /**
  * Initialize pool allocator.
  */
@@ -94,6 +94,15 @@
 {
 	return 0;
 }
+static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
+				   struct device *dev)
+{
+	return -ENOMEM;
+}
+static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
+				      struct device *dev)
+{
+}
 #endif
 
 #endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index fcabc42..5ad9a4e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -93,8 +93,16 @@
 #define CPUFREQ_SHARED_TYPE_ALL	 (2) /* All dependent CPUs should set freq */
 #define CPUFREQ_SHARED_TYPE_ANY	 (3) /* Freq can be set from any dependent CPU*/
 
+#ifdef CONFIG_CPU_FREQ
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
 void cpufreq_cpu_put(struct cpufreq_policy *policy);
+#else
+static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+	return NULL;
+}
+static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
+#endif
 
 static inline bool policy_is_shared(struct cpufreq_policy *policy)
 {
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 0000000..f5b9b87
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum host1x_class {
+	HOST1X_CLASS_HOST1X = 0x1,
+	HOST1X_CLASS_GR2D = 0x51,
+	HOST1X_CLASS_GR2D_SB = 0x52,
+	HOST1X_CLASS_GR3D = 0x60,
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+	int (*init)(struct host1x_client *client);
+	int (*exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+	struct list_head list;
+	struct device *parent;
+	struct device *dev;
+
+	const struct host1x_client_ops *ops;
+
+	enum host1x_class class;
+	struct host1x_channel *channel;
+
+	struct host1x_syncpt **syncpts;
+	unsigned int num_syncpts;
+};
+
+/*
+ * host1x buffer objects
+ */
+
+struct host1x_bo;
+struct sg_table;
+
+struct host1x_bo_ops {
+	struct host1x_bo *(*get)(struct host1x_bo *bo);
+	void (*put)(struct host1x_bo *bo);
+	dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+	void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+	void *(*mmap)(struct host1x_bo *bo);
+	void (*munmap)(struct host1x_bo *bo, void *addr);
+	void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+	void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+	const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+				  const struct host1x_bo_ops *ops)
+{
+	bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+	return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+	bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+				       struct sg_table **sgt)
+{
+	return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+	bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+	return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+	bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+	return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+				    unsigned int pagenum, void *addr)
+{
+	bo->ops->kunmap(bo, pagenum, addr);
+}
+
+/*
+ * host1x syncpoints
+ */
+
+#define HOST1X_SYNCPT_CLIENT_MANAGED	(1 << 0)
+#define HOST1X_SYNCPT_HAS_BASE		(1 << 1)
+
+struct host1x_syncpt_base;
+struct host1x_syncpt;
+struct host1x;
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+int host1x_syncpt_incr(struct host1x_syncpt *sp);
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+		       u32 *value);
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+					    unsigned long flags);
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+
+/*
+ * host1x channel
+ */
+
+struct host1x_channel;
+struct host1x_job;
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+/*
+ * host1x job
+ */
+
+struct host1x_reloc {
+	struct host1x_bo *cmdbuf;
+	u32 cmdbuf_offset;
+	struct host1x_bo *target;
+	u32 target_offset;
+	u32 shift;
+	u32 pad;
+};
+
+struct host1x_job {
+	/* When refcount goes to zero, job can be freed */
+	struct kref ref;
+
+	/* List entry */
+	struct list_head list;
+
+	/* Channel where job is submitted to */
+	struct host1x_channel *channel;
+
+	u32 client;
+
+	/* Gathers and their memory */
+	struct host1x_job_gather *gathers;
+	unsigned int num_gathers;
+
+	/* Wait checks to be processed at submit time */
+	struct host1x_waitchk *waitchk;
+	unsigned int num_waitchk;
+	u32 waitchk_mask;
+
+	/* Array of handles to be pinned & unpinned */
+	struct host1x_reloc *relocarray;
+	unsigned int num_relocs;
+	struct host1x_job_unpin_data *unpins;
+	unsigned int num_unpins;
+
+	dma_addr_t *addr_phys;
+	dma_addr_t *gather_addr_phys;
+	dma_addr_t *reloc_addr_phys;
+
+	/* Sync point id, number of increments and end related to the submit */
+	u32 syncpt_id;
+	u32 syncpt_incrs;
+	u32 syncpt_end;
+
+	/* Maximum time to wait for this job */
+	unsigned int timeout;
+
+	/* Index and number of slots used in the push buffer */
+	unsigned int first_get;
+	unsigned int num_slots;
+
+	/* Copy of gathers */
+	size_t gather_copy_size;
+	dma_addr_t gather_copy;
+	u8 *gather_copy_mapped;
+
+	/* Check if register is marked as an address reg */
+	int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+	/* Request a SETCLASS to this class */
+	u32 class;
+
+	/* Add a channel wait for previous ops to complete */
+	bool serialize;
+};
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+				    u32 num_cmdbufs, u32 num_relocs,
+				    u32 num_waitchks);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+			   u32 words, u32 offset);
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+void host1x_job_put(struct host1x_job *job);
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * subdevice probe infrastructure
+ */
+
+struct host1x_device;
+
+struct host1x_driver {
+	const struct of_device_id *subdevs;
+	struct list_head list;
+	const char *name;
+
+	int (*probe)(struct host1x_device *device);
+	int (*remove)(struct host1x_device *device);
+};
+
+int host1x_driver_register(struct host1x_driver *driver);
+void host1x_driver_unregister(struct host1x_driver *driver);
+
+struct host1x_device {
+	struct host1x_driver *driver;
+	struct list_head list;
+	struct device dev;
+
+	struct mutex subdevs_lock;
+	struct list_head subdevs;
+	struct list_head active;
+
+	struct mutex clients_lock;
+	struct list_head clients;
+};
+
+static inline struct host1x_device *to_host1x_device(struct device *dev)
+{
+	return container_of(dev, struct host1x_device, dev);
+}
+
+int host1x_device_init(struct host1x_device *device);
+int host1x_device_exit(struct host1x_device *device);
+
+int host1x_client_register(struct host1x_client *client);
+int host1x_client_unregister(struct host1x_client *client);
+
+#endif
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 19c19a5..f6c82de 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -34,9 +34,9 @@
 	int		sem_ctls[4];
 	int		used_sems;
 
-	int		msg_ctlmax;
-	int		msg_ctlmnb;
-	int		msg_ctlmni;
+	unsigned int	msg_ctlmax;
+	unsigned int	msg_ctlmnb;
+	unsigned int	msg_ctlmni;
 	atomic_t	msg_bytes;
 	atomic_t	msg_hdrs;
 	int		auto_msgmni;
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cc88172..c74088a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -332,7 +332,7 @@
 #endif
 
 #ifndef this_cpu_sub
-# define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(val))
+# define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(typeof(pcp))(val))
 #endif
 
 #ifndef this_cpu_inc
@@ -418,7 +418,7 @@
 # define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
 #endif
 
-#define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
+#define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(typeof(pcp))(val))
 #define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
 #define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
 
@@ -586,7 +586,7 @@
 #endif
 
 #ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(val))
+# define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(typeof(pcp))(val))
 #endif
 
 #ifndef __this_cpu_inc
@@ -668,7 +668,7 @@
 	__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
 #endif
 
-#define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(val))
+#define __this_cpu_sub_return(pcp, val)	__this_cpu_add_return(pcp, -(typeof(pcp))(val))
 #define __this_cpu_inc_return(pcp)	__this_cpu_add_return(pcp, 1)
 #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
 
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
new file mode 100644
index 0000000..8dec3fd
--- /dev/null
+++ b/include/uapi/drm/armada_drm.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  With inspiration from the i915 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DRM_ARMADA_IOCTL_H
+#define DRM_ARMADA_IOCTL_H
+
+#define DRM_ARMADA_GEM_CREATE		0x00
+#define DRM_ARMADA_GEM_MMAP		0x02
+#define DRM_ARMADA_GEM_PWRITE		0x03
+
+#define ARMADA_IOCTL(dir, name, str) \
+	DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
+
+struct drm_armada_gem_create {
+	uint32_t handle;
+	uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_CREATE \
+	ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
+
+struct drm_armada_gem_mmap {
+	uint32_t handle;
+	uint32_t pad;
+	uint64_t offset;
+	uint64_t size;
+	uint64_t addr;
+};
+#define DRM_IOCTL_ARMADA_GEM_MMAP \
+	ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
+
+struct drm_armada_gem_pwrite {
+	uint64_t ptr;
+	uint32_t handle;
+	uint32_t offset;
+	uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_PWRITE \
+	ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
+
+#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ece8678..9b24d65 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -611,12 +611,37 @@
 	__u64 size;
 };
 
+#define DRM_CAP_DUMB_BUFFER		0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC	0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW	0x4
+#define DRM_CAP_PRIME			0x5
+#define  DRM_PRIME_CAP_IMPORT		0x1
+#define  DRM_PRIME_CAP_EXPORT		0x2
+#define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP		0x7
+
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
 	__u64 capability;
 	__u64 value;
 };
 
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D	1
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+	__u64 capability;
+	__u64 value;
+};
+
 #define DRM_CLOEXEC O_CLOEXEC
 struct drm_prime_handle {
 	__u32 handle;
@@ -649,6 +674,7 @@
 #define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
 #define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
 #define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
 
 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@
 	__u32 reserved;
 };
 
-#define DRM_CAP_DUMB_BUFFER 0x1
-#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
-#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
-#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
-#define DRM_CAP_PRIME 0x5
-#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
-#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-
-#define DRM_PRIME_CAP_IMPORT 0x1
-#define DRM_PRIME_CAP_EXPORT 0x2
-
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 28acbaf..f104c26 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -44,20 +44,35 @@
 
 /* Video mode flags */
 /* bit compatible with the xorg definitions. */
-#define DRM_MODE_FLAG_PHSYNC	(1<<0)
-#define DRM_MODE_FLAG_NHSYNC	(1<<1)
-#define DRM_MODE_FLAG_PVSYNC	(1<<2)
-#define DRM_MODE_FLAG_NVSYNC	(1<<3)
-#define DRM_MODE_FLAG_INTERLACE	(1<<4)
-#define DRM_MODE_FLAG_DBLSCAN	(1<<5)
-#define DRM_MODE_FLAG_CSYNC	(1<<6)
-#define DRM_MODE_FLAG_PCSYNC	(1<<7)
-#define DRM_MODE_FLAG_NCSYNC	(1<<8)
-#define DRM_MODE_FLAG_HSKEW	(1<<9) /* hskew provided */
-#define DRM_MODE_FLAG_BCAST	(1<<10)
-#define DRM_MODE_FLAG_PIXMUX	(1<<11)
-#define DRM_MODE_FLAG_DBLCLK	(1<<12)
-#define DRM_MODE_FLAG_CLKDIV2	(1<<13)
+#define DRM_MODE_FLAG_PHSYNC			(1<<0)
+#define DRM_MODE_FLAG_NHSYNC			(1<<1)
+#define DRM_MODE_FLAG_PVSYNC			(1<<2)
+#define DRM_MODE_FLAG_NVSYNC			(1<<3)
+#define DRM_MODE_FLAG_INTERLACE			(1<<4)
+#define DRM_MODE_FLAG_DBLSCAN			(1<<5)
+#define DRM_MODE_FLAG_CSYNC			(1<<6)
+#define DRM_MODE_FLAG_PCSYNC			(1<<7)
+#define DRM_MODE_FLAG_NCSYNC			(1<<8)
+#define DRM_MODE_FLAG_HSKEW			(1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST			(1<<10)
+#define DRM_MODE_FLAG_PIXMUX			(1<<11)
+#define DRM_MODE_FLAG_DBLCLK			(1<<12)
+#define DRM_MODE_FLAG_CLKDIV2			(1<<13)
+ /*
+  * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+  * (define not exposed to user space).
+  */
+#define DRM_MODE_FLAG_3D_MASK			(0x1f<<14)
+#define  DRM_MODE_FLAG_3D_NONE			(0<<14)
+#define  DRM_MODE_FLAG_3D_FRAME_PACKING		(1<<14)
+#define  DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE	(2<<14)
+#define  DRM_MODE_FLAG_3D_LINE_ALTERNATIVE	(3<<14)
+#define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL	(4<<14)
+#define  DRM_MODE_FLAG_3D_L_DEPTH		(5<<14)
+#define  DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH	(6<<14)
+#define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
+#define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
+
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
@@ -165,6 +180,7 @@
 #define DRM_MODE_ENCODER_LVDS	3
 #define DRM_MODE_ENCODER_TVDAC	4
 #define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI	6
 
 struct drm_mode_get_encoder {
 	__u32 encoder_id;
@@ -203,6 +219,7 @@
 #define DRM_MODE_CONNECTOR_TV		13
 #define DRM_MODE_CONNECTOR_eDP		14
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
+#define DRM_MODE_CONNECTOR_DSI		16
 
 struct drm_mode_get_connector {
 
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 55bb572..3a4e97b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,10 +38,10 @@
  *
  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  *	event from the gpu l3 cache. Additional information supplied is ROW,
- *	BANK, SUBBANK of the affected cacheline. Userspace should keep track of
- *	these events and if a specific cache-line seems to have a persistent
- *	error remap it with the l3 remapping tool supplied in intel-gpu-tools.
- *	The value supplied with the event is always 1.
+ *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ *	track of these events and if a specific cache-line seems to have a
+ *	persistent error remap it with the l3 remapping tool supplied in
+ *	intel-gpu-tools.  The value supplied with the event is always 1.
  *
  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  *	hangcheck. The error detection event is a good indicator of when things
diff --git a/include/uapi/drm/tegra_drm.h b/include/uapi/drm/tegra_drm.h
index 73bde4e..5e1ab55 100644
--- a/include/uapi/drm/tegra_drm.h
+++ b/include/uapi/drm/tegra_drm.h
@@ -19,6 +19,9 @@
 
 #include <drm/drm.h>
 
+#define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
+#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+
 struct drm_tegra_gem_create {
 	__u64 size;
 	__u32 flags;
@@ -65,6 +68,12 @@
 	__u32 id;
 };
 
+struct drm_tegra_get_syncpt_base {
+	__u64 context;
+	__u32 syncpt;
+	__u32 id;
+};
+
 struct drm_tegra_syncpt {
 	__u32 id;
 	__u32 incrs;
@@ -115,15 +124,16 @@
 	__u32 reserved[5];	/* future expansion */
 };
 
-#define DRM_TEGRA_GEM_CREATE	0x00
-#define DRM_TEGRA_GEM_MMAP	0x01
-#define DRM_TEGRA_SYNCPT_READ	0x02
-#define DRM_TEGRA_SYNCPT_INCR	0x03
-#define DRM_TEGRA_SYNCPT_WAIT	0x04
-#define DRM_TEGRA_OPEN_CHANNEL	0x05
-#define DRM_TEGRA_CLOSE_CHANNEL	0x06
-#define DRM_TEGRA_GET_SYNCPT	0x07
-#define DRM_TEGRA_SUBMIT	0x08
+#define DRM_TEGRA_GEM_CREATE		0x00
+#define DRM_TEGRA_GEM_MMAP		0x01
+#define DRM_TEGRA_SYNCPT_READ		0x02
+#define DRM_TEGRA_SYNCPT_INCR		0x03
+#define DRM_TEGRA_SYNCPT_WAIT		0x04
+#define DRM_TEGRA_OPEN_CHANNEL		0x05
+#define DRM_TEGRA_CLOSE_CHANNEL		0x06
+#define DRM_TEGRA_GET_SYNCPT		0x07
+#define DRM_TEGRA_SUBMIT		0x08
+#define DRM_TEGRA_GET_SYNCPT_BASE	0x09
 
 #define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
 #define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
@@ -134,5 +144,6 @@
 #define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
 #define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
 #define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
+#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
 
 #endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 009a655..2fc1602 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -456,13 +456,15 @@
 	/*
 	 * Control data for the mmap() data buffer.
 	 *
-	 * User-space reading the @data_head value should issue an rmb(), on
-	 * SMP capable platforms, after reading this value -- see
-	 * perf_event_wakeup().
+	 * User-space reading the @data_head value should issue an smp_rmb(),
+	 * after reading this value.
 	 *
 	 * When the mapping is PROT_WRITE the @data_tail value should be
-	 * written by userspace to reflect the last read data. In this case
-	 * the kernel will not over-write unread data.
+	 * written by userspace to reflect the last read data, after issueing
+	 * an smp_mb() to separate the data read from the ->data_tail store.
+	 * In this case the kernel will not over-write unread data.
+	 *
+	 * See perf_output_put_handle() for the data ordering.
 	 */
 	__u64   data_head;		/* head in the data section */
 	__u64	data_tail;		/* user-space written tail */
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 130dfec..b0e99de 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -62,7 +62,7 @@
 	return err;
 }
 
-static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
 	void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	struct ctl_table ipc_table;
@@ -72,7 +72,7 @@
 	memcpy(&ipc_table, table, sizeof(ipc_table));
 	ipc_table.data = get_ipc(table);
 
-	rc = proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
+	rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
 
 	if (write && !rc && lenp_bef == *lenp)
 		/*
@@ -152,15 +152,13 @@
 #define proc_ipc_dointvec	   NULL
 #define proc_ipc_dointvec_minmax   NULL
 #define proc_ipc_dointvec_minmax_orphans   NULL
-#define proc_ipc_callback_dointvec NULL
+#define proc_ipc_callback_dointvec_minmax  NULL
 #define proc_ipcauto_dointvec_minmax NULL
 #endif
 
 static int zero;
 static int one = 1;
-#ifdef CONFIG_CHECKPOINT_RESTORE
 static int int_max = INT_MAX;
-#endif
 
 static struct ctl_table ipc_kern_table[] = {
 	{
@@ -198,21 +196,27 @@
 		.data		= &init_ipc_ns.msg_ctlmax,
 		.maxlen		= sizeof (init_ipc_ns.msg_ctlmax),
 		.mode		= 0644,
-		.proc_handler	= proc_ipc_dointvec,
+		.proc_handler	= proc_ipc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &int_max,
 	},
 	{
 		.procname	= "msgmni",
 		.data		= &init_ipc_ns.msg_ctlmni,
 		.maxlen		= sizeof (init_ipc_ns.msg_ctlmni),
 		.mode		= 0644,
-		.proc_handler	= proc_ipc_callback_dointvec,
+		.proc_handler	= proc_ipc_callback_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &int_max,
 	},
 	{
 		.procname	=  "msgmnb",
 		.data		= &init_ipc_ns.msg_ctlmnb,
 		.maxlen		= sizeof (init_ipc_ns.msg_ctlmnb),
 		.mode		= 0644,
-		.proc_handler	= proc_ipc_dointvec,
+		.proc_handler	= proc_ipc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &int_max,
 	},
 	{
 		.procname	= "sem",
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index cd55144..9c2ddfb 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -87,10 +87,31 @@
 		goto out;
 
 	/*
-	 * Publish the known good head. Rely on the full barrier implied
-	 * by atomic_dec_and_test() order the rb->head read and this
-	 * write.
+	 * Since the mmap() consumer (userspace) can run on a different CPU:
+	 *
+	 *   kernel				user
+	 *
+	 *   READ ->data_tail			READ ->data_head
+	 *   smp_mb()	(A)			smp_rmb()	(C)
+	 *   WRITE $data			READ $data
+	 *   smp_wmb()	(B)			smp_mb()	(D)
+	 *   STORE ->data_head			WRITE ->data_tail
+	 *
+	 * Where A pairs with D, and B pairs with C.
+	 *
+	 * I don't think A needs to be a full barrier because we won't in fact
+	 * write data until we see the store from userspace. So we simply don't
+	 * issue the data WRITE until we observe it. Be conservative for now.
+	 *
+	 * OTOH, D needs to be a full barrier since it separates the data READ
+	 * from the tail WRITE.
+	 *
+	 * For B a WMB is sufficient since it separates two WRITEs, and for C
+	 * an RMB is sufficient since it separates two READs.
+	 *
+	 * See perf_output_begin().
 	 */
+	smp_wmb();
 	rb->user_page->data_head = head;
 
 	/*
@@ -154,9 +175,11 @@
 		 * Userspace could choose to issue a mb() before updating the
 		 * tail pointer. So that all reads will be completed before the
 		 * write is issued.
+		 *
+		 * See perf_output_put_handle().
 		 */
 		tail = ACCESS_ONCE(rb->user_page->data_tail);
-		smp_rmb();
+		smp_mb();
 		offset = head = local_read(&rb->head);
 		head += size;
 		if (unlikely(!perf_output_space(rb, tail, offset, head)))
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 06344d9..094f315 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -983,7 +983,7 @@
 
 config DEBUG_KOBJECT_RELEASE
 	bool "kobject release debugging"
-	depends on DEBUG_KERNEL
+	depends on DEBUG_OBJECTS_TIMERS
 	help
 	  kobjects are reference counted objects.  This means that their
 	  last reference count put is not predictable, and the kobject can
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index a685c8a..d16fa29 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -577,7 +577,8 @@
 		miter->__offset += miter->consumed;
 		miter->__remaining -= miter->consumed;
 
-		if (miter->__flags & SG_MITER_TO_SG)
+		if ((miter->__flags & SG_MITER_TO_SG) &&
+		    !PageSlab(miter->page))
 			flush_kernel_dcache_page(miter->page);
 
 		if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 610e3df..cca80d9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1278,64 +1278,90 @@
 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 				unsigned long addr, pmd_t pmd, pmd_t *pmdp)
 {
+	struct anon_vma *anon_vma = NULL;
 	struct page *page;
 	unsigned long haddr = addr & HPAGE_PMD_MASK;
+	int page_nid = -1, this_nid = numa_node_id();
 	int target_nid;
-	int current_nid = -1;
-	bool migrated;
+	bool page_locked;
+	bool migrated = false;
 
 	spin_lock(&mm->page_table_lock);
 	if (unlikely(!pmd_same(pmd, *pmdp)))
 		goto out_unlock;
 
 	page = pmd_page(pmd);
-	get_page(page);
-	current_nid = page_to_nid(page);
+	page_nid = page_to_nid(page);
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (current_nid == numa_node_id())
+	if (page_nid == this_nid)
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
+	/*
+	 * Acquire the page lock to serialise THP migrations but avoid dropping
+	 * page_table_lock if at all possible
+	 */
+	page_locked = trylock_page(page);
 	target_nid = mpol_misplaced(page, vma, haddr);
 	if (target_nid == -1) {
-		put_page(page);
-		goto clear_pmdnuma;
+		/* If the page was locked, there are no parallel migrations */
+		if (page_locked)
+			goto clear_pmdnuma;
+
+		/*
+		 * Otherwise wait for potential migrations and retry. We do
+		 * relock and check_same as the page may no longer be mapped.
+		 * As the fault is being retried, do not account for it.
+		 */
+		spin_unlock(&mm->page_table_lock);
+		wait_on_page_locked(page);
+		page_nid = -1;
+		goto out;
 	}
 
-	/* Acquire the page lock to serialise THP migrations */
+	/* Page is misplaced, serialise migrations and parallel THP splits */
+	get_page(page);
 	spin_unlock(&mm->page_table_lock);
-	lock_page(page);
+	if (!page_locked)
+		lock_page(page);
+	anon_vma = page_lock_anon_vma_read(page);
 
 	/* Confirm the PTE did not while locked */
 	spin_lock(&mm->page_table_lock);
 	if (unlikely(!pmd_same(pmd, *pmdp))) {
 		unlock_page(page);
 		put_page(page);
+		page_nid = -1;
 		goto out_unlock;
 	}
-	spin_unlock(&mm->page_table_lock);
 
-	/* Migrate the THP to the requested node */
+	/*
+	 * Migrate the THP to the requested node, returns with page unlocked
+	 * and pmd_numa cleared.
+	 */
+	spin_unlock(&mm->page_table_lock);
 	migrated = migrate_misplaced_transhuge_page(mm, vma,
 				pmdp, pmd, addr, page, target_nid);
-	if (!migrated)
-		goto check_same;
+	if (migrated)
+		page_nid = target_nid;
 
-	task_numa_fault(target_nid, HPAGE_PMD_NR, true);
-	return 0;
-
-check_same:
-	spin_lock(&mm->page_table_lock);
-	if (unlikely(!pmd_same(pmd, *pmdp)))
-		goto out_unlock;
+	goto out;
 clear_pmdnuma:
+	BUG_ON(!PageLocked(page));
 	pmd = pmd_mknonnuma(pmd);
 	set_pmd_at(mm, haddr, pmdp, pmd);
 	VM_BUG_ON(pmd_numa(*pmdp));
 	update_mmu_cache_pmd(vma, addr, pmdp);
+	unlock_page(page);
 out_unlock:
 	spin_unlock(&mm->page_table_lock);
-	if (current_nid != -1)
-		task_numa_fault(current_nid, HPAGE_PMD_NR, false);
+
+out:
+	if (anon_vma)
+		page_unlock_anon_vma_read(anon_vma);
+
+	if (page_nid != -1)
+		task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
+
 	return 0;
 }
 
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7246791..72f9dec 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -81,8 +81,9 @@
 		 * decrement nr_to_walk first so that we don't livelock if we
 		 * get stuck on large numbesr of LRU_RETRY items
 		 */
-		if (--(*nr_to_walk) == 0)
+		if (!*nr_to_walk)
 			break;
+		--*nr_to_walk;
 
 		ret = isolate(item, &nlru->lock, cb_arg);
 		switch (ret) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 34d3ca9..13b9d0f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -54,6 +54,7 @@
 #include <linux/page_cgroup.h>
 #include <linux/cpu.h>
 #include <linux/oom.h>
+#include <linux/lockdep.h>
 #include "internal.h"
 #include <net/sock.h>
 #include <net/ip.h>
@@ -2046,6 +2047,12 @@
 	return total;
 }
 
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map memcg_oom_lock_dep_map = {
+	.name = "memcg_oom_lock",
+};
+#endif
+
 static DEFINE_SPINLOCK(memcg_oom_lock);
 
 /*
@@ -2083,7 +2090,8 @@
 			}
 			iter->oom_lock = false;
 		}
-	}
+	} else
+		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
 
 	spin_unlock(&memcg_oom_lock);
 
@@ -2095,6 +2103,7 @@
 	struct mem_cgroup *iter;
 
 	spin_lock(&memcg_oom_lock);
+	mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
 	for_each_mem_cgroup_tree(iter, memcg)
 		iter->oom_lock = false;
 	spin_unlock(&memcg_oom_lock);
@@ -2765,10 +2774,10 @@
 	*ptr = memcg;
 	return 0;
 nomem:
-	*ptr = NULL;
-	if (gfp_mask & __GFP_NOFAIL)
-		return 0;
-	return -ENOMEM;
+	if (!(gfp_mask & __GFP_NOFAIL)) {
+		*ptr = NULL;
+		return -ENOMEM;
+	}
 bypass:
 	*ptr = root_mem_cgroup;
 	return -EINTR;
@@ -3773,8 +3782,7 @@
 {
 	/* Update stat data for mem_cgroup */
 	preempt_disable();
-	WARN_ON_ONCE(from->stat->count[idx] < nr_pages);
-	__this_cpu_add(from->stat->count[idx], -nr_pages);
+	__this_cpu_sub(from->stat->count[idx], nr_pages);
 	__this_cpu_add(to->stat->count[idx], nr_pages);
 	preempt_enable();
 }
@@ -4950,31 +4958,18 @@
 	} while (usage > 0);
 }
 
-/*
- * This mainly exists for tests during the setting of set of use_hierarchy.
- * Since this is the very setting we are changing, the current hierarchy value
- * is meaningless
- */
-static inline bool __memcg_has_children(struct mem_cgroup *memcg)
-{
-	struct cgroup_subsys_state *pos;
-
-	/* bounce at first found */
-	css_for_each_child(pos, &memcg->css)
-		return true;
-	return false;
-}
-
-/*
- * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
- * to be already dead (as in mem_cgroup_force_empty, for instance).  This is
- * from mem_cgroup_count_children(), in the sense that we don't really care how
- * many children we have; we only need to know if we have any.  It also counts
- * any memcg without hierarchy as infertile.
- */
 static inline bool memcg_has_children(struct mem_cgroup *memcg)
 {
-	return memcg->use_hierarchy && __memcg_has_children(memcg);
+	lockdep_assert_held(&memcg_create_mutex);
+	/*
+	 * The lock does not prevent addition or deletion to the list
+	 * of children, but it prevents a new child from being
+	 * initialized based on this parent in css_online(), so it's
+	 * enough to decide whether hierarchically inherited
+	 * attributes can still be changed or not.
+	 */
+	return memcg->use_hierarchy &&
+		!list_empty(&memcg->css.cgroup->children);
 }
 
 /*
@@ -5054,7 +5049,7 @@
 	 */
 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
 				(val == 1 || val == 0)) {
-		if (!__memcg_has_children(memcg))
+		if (list_empty(&memcg->css.cgroup->children))
 			memcg->use_hierarchy = val;
 		else
 			retval = -EBUSY;
diff --git a/mm/memory.c b/mm/memory.c
index 1311f26..d176154 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3521,12 +3521,12 @@
 }
 
 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
-				unsigned long addr, int current_nid)
+				unsigned long addr, int page_nid)
 {
 	get_page(page);
 
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (current_nid == numa_node_id())
+	if (page_nid == numa_node_id())
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
 	return mpol_misplaced(page, vma, addr);
@@ -3537,7 +3537,7 @@
 {
 	struct page *page = NULL;
 	spinlock_t *ptl;
-	int current_nid = -1;
+	int page_nid = -1;
 	int target_nid;
 	bool migrated = false;
 
@@ -3567,15 +3567,10 @@
 		return 0;
 	}
 
-	current_nid = page_to_nid(page);
-	target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+	page_nid = page_to_nid(page);
+	target_nid = numa_migrate_prep(page, vma, addr, page_nid);
 	pte_unmap_unlock(ptep, ptl);
 	if (target_nid == -1) {
-		/*
-		 * Account for the fault against the current node if it not
-		 * being replaced regardless of where the page is located.
-		 */
-		current_nid = numa_node_id();
 		put_page(page);
 		goto out;
 	}
@@ -3583,11 +3578,11 @@
 	/* Migrate to the requested node */
 	migrated = migrate_misplaced_page(page, target_nid);
 	if (migrated)
-		current_nid = target_nid;
+		page_nid = target_nid;
 
 out:
-	if (current_nid != -1)
-		task_numa_fault(current_nid, 1, migrated);
+	if (page_nid != -1)
+		task_numa_fault(page_nid, 1, migrated);
 	return 0;
 }
 
@@ -3602,7 +3597,6 @@
 	unsigned long offset;
 	spinlock_t *ptl;
 	bool numa = false;
-	int local_nid = numa_node_id();
 
 	spin_lock(&mm->page_table_lock);
 	pmd = *pmdp;
@@ -3625,9 +3619,10 @@
 	for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
 		pte_t pteval = *pte;
 		struct page *page;
-		int curr_nid = local_nid;
+		int page_nid = -1;
 		int target_nid;
-		bool migrated;
+		bool migrated = false;
+
 		if (!pte_present(pteval))
 			continue;
 		if (!pte_numa(pteval))
@@ -3649,25 +3644,19 @@
 		if (unlikely(page_mapcount(page) != 1))
 			continue;
 
-		/*
-		 * Note that the NUMA fault is later accounted to either
-		 * the node that is currently running or where the page is
-		 * migrated to.
-		 */
-		curr_nid = local_nid;
-		target_nid = numa_migrate_prep(page, vma, addr,
-					       page_to_nid(page));
-		if (target_nid == -1) {
+		page_nid = page_to_nid(page);
+		target_nid = numa_migrate_prep(page, vma, addr, page_nid);
+		pte_unmap_unlock(pte, ptl);
+		if (target_nid != -1) {
+			migrated = migrate_misplaced_page(page, target_nid);
+			if (migrated)
+				page_nid = target_nid;
+		} else {
 			put_page(page);
-			continue;
 		}
 
-		/* Migrate to the requested node */
-		pte_unmap_unlock(pte, ptl);
-		migrated = migrate_misplaced_page(page, target_nid);
-		if (migrated)
-			curr_nid = target_nid;
-		task_numa_fault(curr_nid, 1, migrated);
+		if (page_nid != -1)
+			task_numa_fault(page_nid, 1, migrated);
 
 		pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 	}
diff --git a/mm/migrate.c b/mm/migrate.c
index 7a7325e..c046927 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1715,12 +1715,12 @@
 		unlock_page(new_page);
 		put_page(new_page);		/* Free it */
 
-		unlock_page(page);
+		/* Retake the callers reference and putback on LRU */
+		get_page(page);
 		putback_lru_page(page);
-
-		count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-		isolated = 0;
-		goto out;
+		mod_zone_page_state(page_zone(page),
+			 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
+		goto out_fail;
 	}
 
 	/*
@@ -1737,9 +1737,9 @@
 	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 	entry = pmd_mkhuge(entry);
 
-	page_add_new_anon_rmap(new_page, vma, haddr);
-
+	pmdp_clear_flush(vma, haddr, pmd);
 	set_pmd_at(mm, haddr, pmd, entry);
+	page_add_new_anon_rmap(new_page, vma, haddr);
 	update_mmu_cache_pmd(vma, address, &entry);
 	page_remove_rmap(page);
 	/*
@@ -1758,7 +1758,6 @@
 	count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
 	count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
 
-out:
 	mod_zone_page_state(page_zone(page),
 			NR_ISOLATED_ANON + page_lru,
 			-HPAGE_PMD_NR);
@@ -1767,6 +1766,10 @@
 out_fail:
 	count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
 out_dropref:
+	entry = pmd_mknonnuma(entry);
+	set_pmd_at(mm, haddr, pmd, entry);
+	update_mmu_cache_pmd(vma, address, &entry);
+
 	unlock_page(page);
 	put_page(page);
 	return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a3af058..412ba2b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -148,7 +148,7 @@
 				split_huge_page_pmd(vma, addr, pmd);
 			else if (change_huge_pmd(vma, pmd, addr, newprot,
 						 prot_numa)) {
-				pages += HPAGE_PMD_NR;
+				pages++;
 				continue;
 			}
 			/* fall through */
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 5da2cbc..2beeabf 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -242,7 +242,7 @@
 		if (err)
 			break;
 		pgd++;
-	} while (addr = next, addr != end);
+	} while (addr = next, addr < end);
 
 	return err;
 }
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 487ac6f..9a11f9f 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -55,6 +55,7 @@
 static unsigned int table_size, table_cnt;
 static int all_symbols = 0;
 static char symbol_prefix_char = '\0';
+static unsigned long long kernel_start_addr = 0;
 
 int token_profit[0x10000];
 
@@ -65,7 +66,10 @@
 
 static void usage(void)
 {
-	fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
+	fprintf(stderr, "Usage: kallsyms [--all-symbols] "
+			"[--symbol-prefix=<prefix char>] "
+			"[--page-offset=<CONFIG_PAGE_OFFSET>] "
+			"< in.map > out.S\n");
 	exit(1);
 }
 
@@ -194,6 +198,9 @@
 	int i;
 	int offset = 1;
 
+	if (s->addr < kernel_start_addr)
+		return 0;
+
 	/* skip prefix char */
 	if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
 		offset++;
@@ -646,6 +653,9 @@
 				if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
 					p++;
 				symbol_prefix_char = *p;
+			} else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
+				const char *p = &argv[i][14];
+				kernel_start_addr = strtoull(p, NULL, 16);
 			} else
 				usage();
 		}
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 0149949..32b10f5 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,6 +82,8 @@
 		kallsymopt="${kallsymopt} --all-symbols"
 	fi
 
+	kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+
 	local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
 		      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
 
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 17f45e8..e1e9e0c 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -49,6 +49,8 @@
 	struct snd_pcm *pcm;
 
 	list_for_each_entry(pcm, &snd_pcm_devices, list) {
+		if (pcm->internal)
+			continue;
 		if (pcm->card == card && pcm->device == device)
 			return pcm;
 	}
@@ -60,6 +62,8 @@
 	struct snd_pcm *pcm;
 
 	list_for_each_entry(pcm, &snd_pcm_devices, list) {
+		if (pcm->internal)
+			continue;
 		if (pcm->card == card && pcm->device > device)
 			return pcm->device;
 		else if (pcm->card->number > card->number)
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5b6c4e3..748c6a9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4864,8 +4864,8 @@
 	spin_unlock(&codec->power_lock);
 
 	state = hda_call_codec_suspend(codec, true);
-	codec->pm_down_notified = 0;
-	if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
+	if (!codec->pm_down_notified &&
+	    !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
 		codec->pm_down_notified = 1;
 		hda_call_pm_notify(bus, false);
 	}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 26ad4f0..b7c89df 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4475,9 +4475,11 @@
 					    true, &spec->vmaster_mute.sw_kctl);
 		if (err < 0)
 			return err;
-		if (spec->vmaster_mute.hook)
+		if (spec->vmaster_mute.hook) {
 			snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
 						 spec->vmaster_mute_enum);
+			snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
+		}
 	}
 
 	free_kctls(spec); /* no longer needed */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 0cbdd87..2aa2f57 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -968,6 +968,15 @@
 	}
 }
 
+static void ad1884_fixup_thinkpad(struct hda_codec *codec,
+				  const struct hda_fixup *fix, int action)
+{
+	struct ad198x_spec *spec = codec->spec;
+
+	if (action == HDA_FIXUP_ACT_PRE_PROBE)
+		spec->gen.keep_eapd_on = 1;
+}
+
 /* set magic COEFs for dmic */
 static const struct hda_verb ad1884_dmic_init_verbs[] = {
 	{0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
@@ -979,6 +988,7 @@
 	AD1884_FIXUP_AMP_OVERRIDE,
 	AD1884_FIXUP_HP_EAPD,
 	AD1884_FIXUP_DMIC_COEF,
+	AD1884_FIXUP_THINKPAD,
 	AD1884_FIXUP_HP_TOUCHSMART,
 };
 
@@ -997,6 +1007,12 @@
 		.type = HDA_FIXUP_VERBS,
 		.v.verbs = ad1884_dmic_init_verbs,
 	},
+	[AD1884_FIXUP_THINKPAD] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = ad1884_fixup_thinkpad,
+		.chained = true,
+		.chain_id = AD1884_FIXUP_DMIC_COEF,
+	},
 	[AD1884_FIXUP_HP_TOUCHSMART] = {
 		.type = HDA_FIXUP_VERBS,
 		.v.verbs = ad1884_dmic_init_verbs,
@@ -1008,7 +1024,7 @@
 static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
 	SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
-	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF),
+	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_THINKPAD),
 	{}
 };
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bf313bea..8ad5543 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4623,6 +4623,7 @@
 	SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
+	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
 	SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
 	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 8b50e59..01daf65 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -530,6 +530,7 @@
 				hubs->hp_startup_mode);
 			break;
 		}
+		break;
 
 	case SND_SOC_DAPM_PRE_PMD:
 		snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c17c14c..b2949ae 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1949,7 +1949,7 @@
 				w->active ? "active" : "inactive");
 
 	list_for_each_entry(p, &w->sources, list_sink) {
-		if (p->connected && !p->connected(w, p->sink))
+		if (p->connected && !p->connected(w, p->source))
 			continue;
 
 		if (p->connect)
@@ -3495,6 +3495,7 @@
 		if (!w) {
 			dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
 				dai->driver->playback.stream_name);
+			return -ENOMEM;
 		}
 
 		w->priv = dai;
@@ -3513,6 +3514,7 @@
 		if (!w) {
 			dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
 				dai->driver->capture.stream_name);
+			return -ENOMEM;
 		}
 
 		w->priv = dai;
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e297b74..ca0d3d9 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -90,8 +90,20 @@
 	Number of mmap data pages. Must be a power of two.
 
 -g::
+	Enables call-graph (stack chain/backtrace) recording.
+
 --call-graph::
-	Do call-graph (stack chain/backtrace) recording.
+	Setup and enable call-graph (stack chain/backtrace) recording,
+	implies -g.
+
+	Allows specifying "fp" (frame pointer) or "dwarf"
+	(DWARF's CFI - Call Frame Information) as the method to collect
+	the information used to show the call graphs.
+
+	In some systems, where binaries are build with gcc
+	--fomit-frame-pointer, using the "fp" method will produce bogus
+	call graphs, using "dwarf", if available (perf tools linked to
+	the libunwind library) should be used instead.
 
 -q::
 --quiet::
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 58d6598..6a118e7 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -140,20 +140,12 @@
 --asm-raw::
 	Show raw instruction encoding of assembly instructions.
 
--G [type,min,order]::
+-G::
+	Enables call-graph (stack chain/backtrace) recording.
+
 --call-graph::
-        Display call chains using type, min percent threshold and order.
-	type can be either:
-	- flat: single column, linear exposure of call chains.
-	- graph: use a graph tree, displaying absolute overhead rates.
-	- fractal: like graph, but displays relative rates. Each branch of
-		 the tree is considered as a new profiled object.
-
-	order can be either:
-	- callee: callee based call graph.
-	- caller: inverted caller based call graph.
-
-	Default: fractal,0.5,callee.
+	Setup and enable call-graph (stack chain/backtrace) recording,
+	implies -G.
 
 --ignore-callees=<regex>::
         Ignore callees of the function(s) matching the given regex.
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 935d522..fbc2888 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -888,11 +888,18 @@
 	while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
 		err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
 		if (err) {
+			perf_evlist__mmap_consume(kvm->evlist, idx);
 			pr_err("Failed to parse sample\n");
 			return -1;
 		}
 
 		err = perf_session_queue_event(kvm->session, event, &sample, 0);
+		/*
+		 * FIXME: Here we can't consume the event, as perf_session_queue_event will
+		 *        point to it, and it'll get possibly overwritten by the kernel.
+		 */
+		perf_evlist__mmap_consume(kvm->evlist, idx);
+
 		if (err) {
 			pr_err("Failed to enqueue sample: %d\n", err);
 			return -1;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a41ac415..d046514 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -712,21 +712,12 @@
 }
 #endif /* LIBUNWIND_SUPPORT */
 
-int record_parse_callchain_opt(const struct option *opt,
-			       const char *arg, int unset)
+int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
 {
-	struct perf_record_opts *opts = opt->value;
 	char *tok, *name, *saveptr = NULL;
 	char *buf;
 	int ret = -1;
 
-	/* --no-call-graph */
-	if (unset)
-		return 0;
-
-	/* We specified default option if none is provided. */
-	BUG_ON(!arg);
-
 	/* We need buffer that we know we can write to. */
 	buf = malloc(strlen(arg) + 1);
 	if (!buf)
@@ -764,13 +755,9 @@
 				ret = get_stack_size(tok, &size);
 				opts->stack_dump_size = size;
 			}
-
-			if (!ret)
-				pr_debug("callchain: stack dump size %d\n",
-					 opts->stack_dump_size);
 #endif /* LIBUNWIND_SUPPORT */
 		} else {
-			pr_err("callchain: Unknown -g option "
+			pr_err("callchain: Unknown --call-graph option "
 			       "value: %s\n", arg);
 			break;
 		}
@@ -778,13 +765,52 @@
 	} while (0);
 
 	free(buf);
+	return ret;
+}
 
+static void callchain_debug(struct perf_record_opts *opts)
+{
+	pr_debug("callchain: type %d\n", opts->call_graph);
+
+	if (opts->call_graph == CALLCHAIN_DWARF)
+		pr_debug("callchain: stack dump size %d\n",
+			 opts->stack_dump_size);
+}
+
+int record_parse_callchain_opt(const struct option *opt,
+			       const char *arg,
+			       int unset)
+{
+	struct perf_record_opts *opts = opt->value;
+	int ret;
+
+	/* --no-call-graph */
+	if (unset) {
+		opts->call_graph = CALLCHAIN_NONE;
+		pr_debug("callchain: disabled\n");
+		return 0;
+	}
+
+	ret = record_parse_callchain(arg, opts);
 	if (!ret)
-		pr_debug("callchain: type %d\n", opts->call_graph);
+		callchain_debug(opts);
 
 	return ret;
 }
 
+int record_callchain_opt(const struct option *opt,
+			 const char *arg __maybe_unused,
+			 int unset __maybe_unused)
+{
+	struct perf_record_opts *opts = opt->value;
+
+	if (opts->call_graph == CALLCHAIN_NONE)
+		opts->call_graph = CALLCHAIN_FP;
+
+	callchain_debug(opts);
+	return 0;
+}
+
 static const char * const record_usage[] = {
 	"perf record [<options>] [<command>]",
 	"perf record [<options>] -- <command> [<options>]",
@@ -813,12 +839,12 @@
 	},
 };
 
-#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
+#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
 
 #ifdef LIBUNWIND_SUPPORT
-const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
 #else
-const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
+const char record_callchain_help[] = CALLCHAIN_HELP "fp";
 #endif
 
 /*
@@ -858,9 +884,12 @@
 		     "number of mmap data pages"),
 	OPT_BOOLEAN(0, "group", &record.opts.group,
 		    "put the counters into a counter group"),
-	OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
-			     "mode[,dump_size]", record_callchain_help,
-			     &record_parse_callchain_opt, "fp"),
+	OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
+			   NULL, "enables call-graph recording" ,
+			   &record_callchain_opt),
+	OPT_CALLBACK(0, "call-graph", &record.opts,
+		     "mode[,dump_size]", record_callchain_help,
+		     &record_parse_callchain_opt),
 	OPT_INCR('v', "verbose", &verbose,
 		    "be more verbose (show counter open errors, etc)"),
 	OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 2122141..5a11f13 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -810,7 +810,7 @@
 		ret = perf_evlist__parse_sample(top->evlist, event, &sample);
 		if (ret) {
 			pr_err("Can't parse sample, err = %d\n", ret);
-			continue;
+			goto next_event;
 		}
 
 		evsel = perf_evlist__id2evsel(session->evlist, sample.id);
@@ -825,13 +825,13 @@
 		case PERF_RECORD_MISC_USER:
 			++top->us_samples;
 			if (top->hide_user_symbols)
-				continue;
+				goto next_event;
 			machine = &session->machines.host;
 			break;
 		case PERF_RECORD_MISC_KERNEL:
 			++top->kernel_samples;
 			if (top->hide_kernel_symbols)
-				continue;
+				goto next_event;
 			machine = &session->machines.host;
 			break;
 		case PERF_RECORD_MISC_GUEST_KERNEL:
@@ -847,7 +847,7 @@
 			 */
 			/* Fall thru */
 		default:
-			continue;
+			goto next_event;
 		}
 
 
@@ -859,6 +859,8 @@
 			machine__process_event(machine, event);
 		} else
 			++session->stats.nr_unknown_events;
+next_event:
+		perf_evlist__mmap_consume(top->evlist, idx);
 	}
 }
 
@@ -1016,16 +1018,16 @@
 }
 
 static int
+callchain_opt(const struct option *opt, const char *arg, int unset)
+{
+	symbol_conf.use_callchain = true;
+	return record_callchain_opt(opt, arg, unset);
+}
+
+static int
 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
-	/*
-	 * --no-call-graph
-	 */
-	if (unset)
-		return 0;
-
 	symbol_conf.use_callchain = true;
-
 	return record_parse_callchain_opt(opt, arg, unset);
 }
 
@@ -1106,9 +1108,12 @@
 		   "sort by key(s): pid, comm, dso, symbol, parent, weight, local_weight"),
 	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
 		    "Show a column with the number of samples"),
-	OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
-			     "mode[,dump_size]", record_callchain_help,
-			     &parse_callchain_opt, "fp"),
+	OPT_CALLBACK_NOOPT('G', NULL, &top.record_opts,
+			   NULL, "enables call-graph recording",
+			   &callchain_opt),
+	OPT_CALLBACK(0, "call-graph", &top.record_opts,
+		     "mode[,dump_size]", record_callchain_help,
+		     &parse_callchain_opt),
 	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
 		   "ignore callees of these functions in call graphs",
 		   report_parse_ignore_callees_opt),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 71aa3e3..99c8d9a 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -987,7 +987,7 @@
 			err = perf_evlist__parse_sample(evlist, event, &sample);
 			if (err) {
 				fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
-				continue;
+				goto next_event;
 			}
 
 			if (trace->base_time == 0)
@@ -1001,18 +1001,20 @@
 			evsel = perf_evlist__id2evsel(evlist, sample.id);
 			if (evsel == NULL) {
 				fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
-				continue;
+				goto next_event;
 			}
 
 			if (sample.raw_data == NULL) {
 				fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
 				       perf_evsel__name(evsel), sample.tid,
 				       sample.cpu, sample.raw_size);
-				continue;
+				goto next_event;
 			}
 
 			handler = evsel->handler.func;
 			handler(trace, evsel, &sample);
+next_event:
+			perf_evlist__mmap_consume(evlist, i);
 
 			if (done)
 				goto out_unmap_evlist;
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 6fb781d..e3fedfa 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -290,6 +290,7 @@
 	for (i = 0; i < evlist->nr_mmaps; i++) {
 		while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
 			ret = process_event(machine, evlist, event, state);
+			perf_evlist__mmap_consume(evlist, i);
 			if (ret < 0)
 				return ret;
 		}
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index d444ea2..376c356 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -36,6 +36,7 @@
 			    (pid_t)event->comm.tid == getpid() &&
 			    strcmp(event->comm.comm, comm) == 0)
 				found += 1;
+			perf_evlist__mmap_consume(evlist, i);
 		}
 	}
 	return found;
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index c4185b9..a7232c2 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -122,6 +122,7 @@
 			goto out_munmap;
 		}
 		nr_events[evsel->idx]++;
+		perf_evlist__mmap_consume(evlist, 0);
 	}
 
 	err = 0;
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
index fc5b9fc..524b221 100644
--- a/tools/perf/tests/open-syscall-tp-fields.c
+++ b/tools/perf/tests/open-syscall-tp-fields.c
@@ -77,8 +77,10 @@
 
 				++nr_events;
 
-				if (type != PERF_RECORD_SAMPLE)
+				if (type != PERF_RECORD_SAMPLE) {
+					perf_evlist__mmap_consume(evlist, i);
 					continue;
+				}
 
 				err = perf_evsel__parse_sample(evsel, event, &sample);
 				if (err) {
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index b8a7056..7923b06 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -263,6 +263,8 @@
 						 type);
 					++errs;
 				}
+
+				perf_evlist__mmap_consume(evlist, i);
 			}
 		}
 
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 0ab61b1..4ca1b93 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -122,7 +122,7 @@
 			if (event->header.type != PERF_RECORD_COMM ||
 			    (pid_t)event->comm.pid != getpid() ||
 			    (pid_t)event->comm.tid != getpid())
-				continue;
+				goto next_event;
 
 			if (strcmp(event->comm.comm, comm1) == 0) {
 				CHECK__(perf_evsel__parse_sample(evsel, event,
@@ -134,6 +134,8 @@
 								 &sample));
 				comm2_time = sample.time;
 			}
+next_event:
+			perf_evlist__mmap_consume(evlist, i);
 		}
 	}
 
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 2e41e2d..6e2b44e 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -78,7 +78,7 @@
 		struct perf_sample sample;
 
 		if (event->header.type != PERF_RECORD_SAMPLE)
-			continue;
+			goto next_event;
 
 		err = perf_evlist__parse_sample(evlist, event, &sample);
 		if (err < 0) {
@@ -88,6 +88,8 @@
 
 		total_periods += sample.period;
 		nr_samples++;
+next_event:
+		perf_evlist__mmap_consume(evlist, 0);
 	}
 
 	if ((u64) nr_samples == total_periods) {
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 28fe589..a3e6487 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -96,10 +96,10 @@
 
 retry:
 	while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
-		if (event->header.type != PERF_RECORD_EXIT)
-			continue;
+		if (event->header.type == PERF_RECORD_EXIT)
+			nr_exit++;
 
-		nr_exit++;
+		perf_evlist__mmap_consume(evlist, 0);
 	}
 
 	if (!exited || !nr_exit) {
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 194e2f4..6c15268 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -315,8 +315,7 @@
 }
 
 static int hist_entry__period_snprintf(struct perf_hpp *hpp,
-				       struct hist_entry *he,
-				       bool color)
+				       struct hist_entry *he)
 {
 	const char *sep = symbol_conf.field_sep;
 	struct perf_hpp_fmt *fmt;
@@ -338,7 +337,7 @@
 		} else
 			first = false;
 
-		if (color && fmt->color)
+		if (perf_hpp__use_color() && fmt->color)
 			ret = fmt->color(fmt, hpp, he);
 		else
 			ret = fmt->entry(fmt, hpp, he);
@@ -358,12 +357,11 @@
 		.buf		= bf,
 		.size		= size,
 	};
-	bool color = !symbol_conf.field_sep;
 
 	if (size == 0 || size > bfsz)
 		size = hpp.size = bfsz;
 
-	ret = hist_entry__period_snprintf(&hpp, he, color);
+	ret = hist_entry__period_snprintf(&hpp, he);
 	hist_entry__sort_snprintf(he, bf + ret, size - ret, hists);
 
 	ret = fprintf(fp, "%s\n", bf);
@@ -482,6 +480,7 @@
 
 print_entries:
 	linesz = hists__sort_list_width(hists) + 3 + 1;
+	linesz += perf_hpp__color_overhead();
 	line = malloc(linesz);
 	if (line == NULL) {
 		ret = -1;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 2b585bc..9e99060 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -147,6 +147,9 @@
 
 struct option;
 
+int record_parse_callchain(const char *arg, struct perf_record_opts *opts);
 int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
+int record_callchain_opt(const struct option *opt, const char *arg, int unset);
+
 extern const char record_callchain_help[];
 #endif	/* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 63df031..49096ea 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -213,7 +213,7 @@
 		       &event->mmap.pgoff,
 		       execname);
 
-		if (n != 8)
+		if (n != 5)
 			continue;
 
 		if (prot[2] != 'x')
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index f9f77be..e584cd3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -545,12 +545,19 @@
 
 	md->prev = old;
 
-	if (!evlist->overwrite)
-		perf_mmap__write_tail(md, old);
-
 	return event;
 }
 
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+	if (!evlist->overwrite) {
+		struct perf_mmap *md = &evlist->mmap[idx];
+		unsigned int old = md->prev;
+
+		perf_mmap__write_tail(md, old);
+	}
+}
+
 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
 {
 	if (evlist->mmap[idx].base != NULL) {
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 880d713..206d093 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -89,6 +89,8 @@
 
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
 
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
+
 int perf_evlist__open(struct perf_evlist *evlist);
 void perf_evlist__close(struct perf_evlist *evlist);
 
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 1329b6b..ce8dc61 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -5,6 +5,7 @@
 #include <pthread.h>
 #include "callchain.h"
 #include "header.h"
+#include "color.h"
 
 extern struct callchain_param callchain_param;
 
@@ -175,6 +176,18 @@
 void perf_hpp__column_register(struct perf_hpp_fmt *format);
 void perf_hpp__column_enable(unsigned col);
 
+static inline size_t perf_hpp__use_color(void)
+{
+	return !symbol_conf.field_sep;
+}
+
+static inline size_t perf_hpp__color_overhead(void)
+{
+	return perf_hpp__use_color() ?
+	       (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
+	       : 0;
+}
+
 struct perf_evlist;
 
 struct hist_browser_timer {
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 71b5412..2ac4bc9 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -822,6 +822,8 @@
 		PyObject *pyevent = pyrf_event__new(event);
 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
 
+		perf_evlist__mmap_consume(evlist, cpu);
+
 		if (pyevent == NULL)
 			return PyErr_NoMemory();
 
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index cc75a3c..95d91a0 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -56,6 +56,17 @@
 	Py_FatalError("problem in Python trace event handler");
 }
 
+/*
+ * Insert val into into the dictionary and decrement the reference counter.
+ * This is necessary for dictionaries since PyDict_SetItemString() does not 
+ * steal a reference, as opposed to PyTuple_SetItem().
+ */
+static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
+{
+	PyDict_SetItemString(dict, key, val);
+	Py_DECREF(val);
+}
+
 static void define_value(enum print_arg_type field_type,
 			 const char *ev_name,
 			 const char *field_name,
@@ -279,11 +290,11 @@
 		PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
 		PyTuple_SetItem(t, n++, PyString_FromString(comm));
 	} else {
-		PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
-		PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
-		PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
-		PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
-		PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
+		pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
+		pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
+		pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
+		pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
+		pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
 	}
 	for (field = event->format.fields; field; field = field->next) {
 		if (field->flags & FIELD_IS_STRING) {
@@ -313,7 +324,7 @@
 		if (handler)
 			PyTuple_SetItem(t, n++, obj);
 		else
-			PyDict_SetItemString(dict, field->name, obj);
+			pydict_set_item_string_decref(dict, field->name, obj);
 
 	}
 	if (!handler)
@@ -370,21 +381,21 @@
 	if (!handler || !PyCallable_Check(handler))
 		goto exit;
 
-	PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
-	PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize(
+	pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
+	pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
 			(const char *)&evsel->attr, sizeof(evsel->attr)));
-	PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize(
+	pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
 			(const char *)sample, sizeof(*sample)));
-	PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize(
+	pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
 			(const char *)sample->raw_data, sample->raw_size));
-	PyDict_SetItemString(dict, "comm",
+	pydict_set_item_string_decref(dict, "comm",
 			PyString_FromString(thread->comm));
 	if (al->map) {
-		PyDict_SetItemString(dict, "dso",
+		pydict_set_item_string_decref(dict, "dso",
 			PyString_FromString(al->map->dso->name));
 	}
 	if (al->sym) {
-		PyDict_SetItemString(dict, "symbol",
+		pydict_set_item_string_decref(dict, "symbol",
 			PyString_FromString(al->sym->name));
 	}
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a9dd682..1cf9ccb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3091,7 +3091,7 @@
 
 static int kvm_init_debug(void)
 {
-	int r = -EFAULT;
+	int r = -EEXIST;
 	struct kvm_stats_debugfs_item *p;
 
 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);